From 950e83017a8797a0cff9b672553af74e906f0df7 Mon Sep 17 00:00:00 2001 From: Ryan Voots Date: Thu, 23 Nov 2023 14:51:54 -0500 Subject: [PATCH] Finish off completions direct modules --- lib/OpenAIAsync/Types/Results/Completion.pod | 6 +++ .../Types/Results/CompletionChoices.pod | 53 +++++++++++++++++++ 2 files changed, 59 insertions(+) create mode 100644 lib/OpenAIAsync/Types/Results/CompletionChoices.pod diff --git a/lib/OpenAIAsync/Types/Results/Completion.pod b/lib/OpenAIAsync/Types/Results/Completion.pod index fc36519..8cab96c 100644 --- a/lib/OpenAIAsync/Types/Results/Completion.pod +++ b/lib/OpenAIAsync/Types/Results/Completion.pod @@ -30,6 +30,12 @@ A result from a completion request, L id of the completion response, used for tracking duplicate responses or reporting issues to the service +=head1 choices + +An array of L objects. If you asked for more than 1 response with the request parameter C then they will be present here. + +You likely just want to get ->text from the first result, as demonstrated in the synopsis but see the ::CompletionChoices docs for more detailed information. + =head2 model The model that was used to generate the response. Usually will be what you requested, diff --git a/lib/OpenAIAsync/Types/Results/CompletionChoices.pod b/lib/OpenAIAsync/Types/Results/CompletionChoices.pod new file mode 100644 index 0000000..1afe016 --- /dev/null +++ b/lib/OpenAIAsync/Types/Results/CompletionChoices.pod @@ -0,0 +1,53 @@ +=pod + +=head1 NAME + +OpenAIAsync::Types::Results::CompletionChoices + +=head1 DESCRIPTION + +A choice from a completion request, L as part of L + +=head1 SYNOPSIS + + use OpenAIAsync::Client; + use IO::Async::Loop; + + my $loop = IO::Async::Loop->new(); + my $client = OpenAIAsync::Client->new(); + + $loop->add($client) + + my $output_future = $client->completion({max_tokens => 1024, prompt => "Tell a story about a princess named Judy and her princess sister Emmy"}); + + my $result = $output_future->get(); + + print $result->choices->[0]->text; + +=head1 Fields + +=head2 text + +The contents of the response, very likely all you want or need + +=head2 index + +Index of the choice? I believe this will just always be the same as it's position in the array. + +=head2 logprobs + +Logit probabilities, see L for details + +=head2 finish_reason + +What made the model stop generating. Could be from hitting a stop token, or running into max tokens. + +=head1 SEE ALSO + +L, L, L + +=head1 AUTHOR + +Ryan Voots ... + +=cut \ No newline at end of file