[ Edit, Fix ] fixes for the chat completion new changes

This commit is contained in:
Anas Fikhi
2024-02-21 22:54:33 +01:00
parent c3f5fe2a49
commit e4b3eb531c
7 changed files with 21 additions and 16 deletions

View File

@ -22,9 +22,9 @@ void main() async {
),
//! image url contents are allowed only for models with image support
OpenAIChatCompletionChoiceMessageContentItemModel.imageUrl(
"https://placehold.co/600x400",
),
// OpenAIChatCompletionChoiceMessageContentItemModel.imageUrl(
// "https://placehold.co/600x400",
// ),
],
role: OpenAIChatMessageRole.user,
);
@ -41,8 +41,11 @@ void main() async {
messages: requestMessages,
temperature: 0.2,
maxTokens: 500,
toolChoice: "auto",
tools: [/* tools if you have any */],
// uncomment and set your own properties if you want to use tool choices feature..
// toolChoice: "auto",
// tools: [],
);
print(chatCompletion.choices.first.message); //

View File

@ -7,8 +7,6 @@ import 'env/env.dart';
void main() async {
OpenAI.apiKey = Env.apiKey;
final ourMockMethodName = "fastestCarInTheWorldInTheYear";
final chatStream = OpenAI.instance.chat.createStream(
model: "gpt-3.5-turbo",
messages: [
@ -25,11 +23,11 @@ void main() async {
OpenAIToolModel(
type: "function",
function: OpenAIFunctionModel.withParameters(
name: ourMockMethodName,
name: "fastestCarInTheWorldInTheYear",
parameters: [
OpenAIFunctionProperty.integer(
name: "year",
description: "The year to get the fastest car in",
description: "The year to get the fastest car in the world for.",
),
],
),
@ -52,7 +50,7 @@ void main() async {
stringBuf.write(args);
}
}, onDone: () {
if (functionNameMapper.containsKey(ourMockMethodName)) {
if (functionNameMapper.containsKey("fastestCarInTheWorldInTheYear")) {
final fullResponse = stringBuf.toString();
print(fullResponse);

View File

@ -9,9 +9,9 @@ void main() async {
// The speech request.
File speechFile = await OpenAI.instance.audio.createSpeech(
model: "tts-1",
input: "Say my name is ",
input: "it is what it is.",
voice: "nova",
responseFormat: OpenAIAudioSpeechResponseFormat.mp3,
responseFormat: OpenAIAudioSpeechResponseFormat.opus,
outputDirectory: await Directory("speechOutput").create(),
outputFileName: DateTime.now().microsecondsSinceEpoch.toString(),
);

View File

@ -17,7 +17,7 @@ Future<void> main() async {
'https://www.cbvoiceovers.com/wp-content/uploads/2017/05/Commercial-showreel.mp3',
),
model: "whisper-1",
responseFormat: OpenAIAudioResponseFormat.srt,
responseFormat: OpenAIAudioResponseFormat.text,
);
// print the transcription.

View File

@ -46,7 +46,7 @@ class OpenAIChatCompletionChoiceMessageContentItemModel {
String imageUrl,
) {
return OpenAIChatCompletionChoiceMessageContentItemModel._(
type: 'image',
type: 'image_url',
imageUrl: imageUrl,
);
}

View File

@ -255,8 +255,10 @@ abstract class OpenAINetworkingClient {
OpenAILogger.requestFinishedSuccessfully();
final fileTypeHeader = "content-type";
final fileExtensionFromBodyResponseFormat =
response.headers["response_format"] ?? "mp3";
response.headers[fileTypeHeader]?.split("/")?.last ?? "mp3";
final fileName =
outputFileName + "." + fileExtensionFromBodyResponseFormat;
@ -277,6 +279,7 @@ abstract class OpenAINetworkingClient {
response.bodyBytes,
mode: FileMode.write,
);
OpenAILogger.fileContentWrittenSuccessfully(fileName);
return onFileResponse(file);

View File

@ -91,7 +91,8 @@ interface class OpenAIChat implements OpenAIChatBase {
"messages": messages.map((message) => message.toMap()).toList(),
if (tools != null)
"tools": tools.map((tool) => tool.toMap()).toList(growable: false),
if (toolChoice != null) "tool_choice": toolChoice.value,
if (toolChoice != null)
"tool_choice": toolChoice is String ? toolChoice : toolChoice.value,
if (temperature != null) "temperature": temperature,
if (topP != null) "top_p": topP,
if (n != null) "n": n,