openai-async/t/03-create-server.t
Ryan Voots 502433f9a1
Some checks failed
ci/woodpecker/push/author-tests/1 Pipeline failed
ci/woodpecker/push/author-tests/2 Pipeline failed
ci/woodpecker/push/author-tests/3 Pipeline failed
ci/woodpecker/push/author-tests/4 Pipeline failed
beginning rename of things
2024-12-18 11:07:10 -05:00

108 lines
3.8 KiB
Perl

# test for OpenAIAsync perl module
use strict;
use warnings;
# Use the Test2::V0 module for testing
use Test2::V0;
use OpenAIAsync::Server;
# Import the OpenAIAsync::Server module
use OpenAIAsync::Server;
# Use Object::Pad for object-oriented programming
use Object::Pad;
# Import IO::Async::Loop for managing loops and event loops
use IO::Async::Loop;
# Use Future::AsyncAwait for easier handling of asynchronous operations
use Future::AsyncAwait;
# Import JSON::MaybeXS for encoding and decoding JSON data
use JSON::MaybeXS;
# Import Net::Async::HTTP for asynchronous HTTP requests
use Net::Async::HTTP;
# Use the relative path './lib' for module lookup
use lib::relative './lib';
# Create an instance of IO::Async::Loop
my $loop = IO::Async::Loop->new();
# Set the OPENAI_API_KEY environment variable if it is not set or not equal to "12345"
BEGIN {
no warnings 'uninitialized';
$ENV{OPENAI_API_KEY}="12345" unless $ENV{OPENAI_API_KEY}eq"12345";
$ENV{OPENAI_API_KEY} = "12345" unless $ENV{OPENAI_API_KEY} eq "12345";
}
# Define a TestServer class that inherits from OpenAIAsync::Server
class TestServer {
# Inherit methods and properties from OpenAIAsync::Server
inherit OpenAIAsync::Server;
# Apply methods from various OpenAIAsync::Server::API::Test modules
apply OpenAIAsync::Server::API::Test::ChatCompletion;
apply OpenAIAsync::Server::API::Test::Audio;
apply OpenAIAsync::Server::API::Test::Completions;
apply OpenAIAsync::Server::API::Test::Embeddings;
apply OpenAIAsync::Server::API::Test::File;
apply OpenAIAsync::Server::API::Test::Image;
apply OpenAIAsync::Server::API::Test::ModelList;
apply OpenAIAsync::Server::API::Test::Moderations;
}
# Pick a random high port, TODO better scheme for this
my $port = int(2048+rand(20480));
# Pick a random high port number between 2048 and 22528
my $port = int(2048 + rand(20480));
# Create an instance of the TestServer class that listens on localhost with the chosen port
my $server = TestServer->new(listen => '127.0.0.1', port => $port);
# Create an instance of Net::Async::HTTP for making HTTP requests
my $http_client = Net::Async::HTTP->new();
# Add the $http_client and $server instances to the event loop
$loop->add($http_client);
$loop->add($server);
# Define a hash ref for the chat completion request data
my $chat_completion_input = {
"model" => "gpt-3.5-turbo",
"messages" => [
{"role" => "user", "content" => "Say this is a test!"}
],
"temperature" => 0.7
};
# Subroutine to make an HTTP POST request to the server
sub mk_req($uri, $content) {
my $content_json = encode_json($content);
return $http_client->POST("http://127.0.0.1:$port/v1".$uri, $content_json, content_type => 'application/json');
my $content_json = encode_json($content); # Encode the content data as JSON
return $http_client->POST("http://127.0.0.1:$port/v1{$uri}", $content_json, content_type => 'application/json'); # Make the POST request
}
# Make an HTTP POST request to the chat completions endpoint with the chat completion input data
my $res_fut = mk_req("/chat/completions", $chat_completion_input);
# Delay the loop for 5 seconds to allow the request to complete
$loop->delay_future(after => 5)->get();
# Get the response object from the future
my $res = $res_fut->get();
# Extract the response content
my $content = $res->content;
is($content, '{"choices":[],"created":"0","id":"24601","model":"GumbyBrain-llm","object":"text_completion","system_fingerprint":"SHODAN node 12 of 16 tertiary adjunct of unimatrix 42","usage":{"completion_tokens":9,"prompt_tokens":6,"total_tokens":42}}', "check marshalling of data directly");
done_testing();
# Compare the response content to the expected content and provide a test message
is($content, '{"choices":[],"created":"0","id":"24601","model":"GumbyBrain-llm","object":"text_completion","