diff --git a/Client.pod b/Client.pod index 5e724b3..50328d4 100644 --- a/Client.pod +++ b/Client.pod @@ -9,7 +9,7 @@ use IO::Async; use OpenAIAsync::Types::Results; use OpenAIAsync::Types::Requests; -our $VERSION="v0.1.0"; +our $VERSION = '0.02'; # ABSTRACT: Async client for OpenAI style REST API for various AI systems (LLMs, Images, Video, etc.) @@ -48,7 +48,7 @@ OpenAIAsync::Client - IO::Async based client for OpenAI compatible APIs max_tokens => 1024, })->get(); - # $output is now an OpenAIAsync::Type::Response::ChatCompletion + # $output is now an OpenAIAsync::Type::Results::ChatCompletion =head1 THEORY OF OPERATION @@ -174,6 +174,10 @@ Unimplemented. The opposite of the above. Unimplemented, I've not investigated this one much yet but I believe it's to get a description of an image and it's contents. +=head2 Missing apis + +At least some for getting the list of models and some other meta information, those will be added next after I get some more documentation written + =head1 See Also L, L, L diff --git a/lib/OpenAIAsync/Client.pm b/lib/OpenAIAsync/Client.pm index b11388f..50328d4 100644 --- a/lib/OpenAIAsync/Client.pm +++ b/lib/OpenAIAsync/Client.pm @@ -48,7 +48,7 @@ OpenAIAsync::Client - IO::Async based client for OpenAI compatible APIs max_tokens => 1024, })->get(); - # $output is now an OpenAIAsync::Type::Response::ChatCompletion + # $output is now an OpenAIAsync::Type::Results::ChatCompletion =head1 THEORY OF OPERATION diff --git a/lib/OpenAIAsync/Types/Requests/ChatCompletion.pod b/lib/OpenAIAsync/Types/Requests/ChatCompletion.pod index ff1b5f5..f63e8c2 100644 --- a/lib/OpenAIAsync/Types/Requests/ChatCompletion.pod +++ b/lib/OpenAIAsync/Types/Requests/ChatCompletion.pod @@ -6,7 +6,7 @@ OpenAIAsync::Types::Request::ChatCompletion =head1 DESCRIPTION -A chat completion request, once put through the client you'll get a L with the result of the model. +A chat completion request, once put through the client you'll get a L with the result of the model. =head1 SYNOPSIS @@ -199,7 +199,7 @@ That will generate a new response based on the results of the function calls wit =head1 SEE ALSO -L, L +L, L =head1 AUTHOR diff --git a/lib/OpenAIAsync/Types/Requests/Completion.pod b/lib/OpenAIAsync/Types/Requests/Completion.pod index 0dc3b05..d37ffbd 100644 --- a/lib/OpenAIAsync/Types/Requests/Completion.pod +++ b/lib/OpenAIAsync/Types/Requests/Completion.pod @@ -6,7 +6,7 @@ OpenAIAsync::Types::Request::Completion =head1 DESCRIPTION -A completion request, once put through the client you'll get a L with the result of the model. +A completion request, once put through the client you'll get a L with the result of the model. This type of request is officially deprecated by OpenAI and got it's final update in June 2023. That said it's a very simple API and will likely exist for some time, but it can be more difficult to control and get continuous responses since you have to do all the prompt formatting @@ -155,7 +155,7 @@ lead to less variation in the responses at the same time. =head1 SEE ALSO -L, L +L, L =head1 AUTHOR diff --git a/lib/OpenAIAsync/Types/Requests/Embedding.pod b/lib/OpenAIAsync/Types/Requests/Embedding.pod index c25cbd4..4bb1314 100644 --- a/lib/OpenAIAsync/Types/Requests/Embedding.pod +++ b/lib/OpenAIAsync/Types/Requests/Embedding.pod @@ -6,7 +6,7 @@ OpenAIAsync::Types::Request::Embedding =head1 DESCRIPTION -An embedding request, once put through the client you'll get a L with the result of the model. +An embedding request, once put through the client you'll get a L with the result of the model. =head1 SYNOPSIS @@ -47,7 +47,7 @@ Parameter used for tracking users when you make the api request. Give it whatev =head1 SEE ALSO -L, L +L, L =head1 AUTHOR diff --git a/lib/OpenAIAsync/Types/Results/ChatCompletion.pod b/lib/OpenAIAsync/Types/Results/ChatCompletion.pod new file mode 100644 index 0000000..3852fed --- /dev/null +++ b/lib/OpenAIAsync/Types/Results/ChatCompletion.pod @@ -0,0 +1,75 @@ +=pod + +=head1 NAME + +OpenAIAsync::Types::Results::ChatCompletion + +=head1 DESCRIPTION + +An object representing a Chat Completion response, see L + +=head1 SYNOPSIS + + use OpenAIAsync::Client; + use IO::Async::Loop; + + my $loop = IO::Async::Loop->new(); + + my $client = OpenAIAsync::Client->new(); + $loop->add($client); + + my $output_future = $client->chat({ + model => "gpt-3.5-turbo", + messages => [ + { + role => "system", + content => "You are a helpful assistant that tells fanciful stories" + }, + { + role => "user", + content => "Tell me a story of two princesses, Judy and Emmy. Judy is 8 and Emmy is 2." + } + ], + + max_tokens => 1024, + }); + +=head1 Fields + +=head2 id + +id of the response, used for debugging and tracking + +=head2 choices + +The chat responses, L for details. The text of the responses will be here + +=head2 created + +Date and time of when the response was generated + +=head2 model + +Name of the model that actually generated the response, may not be the same as the requested model depending on the service + +=head2 system_fingerprint + +Given by the service to identify which server actually generated the response, used to detect changes and issues with servers + +=head2 usage + +Token counts for the generated responses, in a L object. Has C, C, and C fields. + +=head2 object + +Static field that will likely only ever contain, C + +=head1 SEE ALSO + +L, L, L + +=head1 AUTHOR + +Ryan Voots ... + +=cut \ No newline at end of file diff --git a/lib/OpenAIAsync/Types/Results/CompletionChoices.pod b/lib/OpenAIAsync/Types/Results/CompletionChoices.pod index 4e5c190..0ab77bf 100644 --- a/lib/OpenAIAsync/Types/Results/CompletionChoices.pod +++ b/lib/OpenAIAsync/Types/Results/CompletionChoices.pod @@ -6,7 +6,7 @@ OpenAIAsync::Types::Results::CompletionChoices =head1 DESCRIPTION -A choice from a completion request, L as part of L +A choice from a completion request, L as part of L =head1 SYNOPSIS @@ -44,7 +44,7 @@ What made the model stop generating. Could be from hitting a stop token, or run =head1 SEE ALSO -L, L, L +L, L, L =head1 AUTHOR diff --git a/lib/OpenAIAsync/Types/Results/LogProbs.pod b/lib/OpenAIAsync/Types/Results/LogProbs.pod index 7633c31..4ddc8c9 100644 --- a/lib/OpenAIAsync/Types/Results/LogProbs.pod +++ b/lib/OpenAIAsync/Types/Results/LogProbs.pod @@ -24,6 +24,8 @@ Which position in the resulting text this log probability represents =head2 top_logprobss +Not available on my local ai server, will update in next set of changes from how OpenAI implements them + =head1 SEE ALSO L, L, L @@ -32,4 +34,4 @@ L, L