From e46c43afa68c33c0d9498a8f04a4eeefdc3f10ef Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Thu, 23 Feb 2023 13:42:23 -0300 Subject: [PATCH] Move some stuff from server.py to modules --- modules/models.py | 3 +++ modules/shared.py | 22 +++++++++++++++++++++- server.py | 24 ------------------------ 3 files changed, 24 insertions(+), 25 deletions(-) diff --git a/modules/models.py b/modules/models.py index ad825c79..85e1362c 100644 --- a/modules/models.py +++ b/modules/models.py @@ -7,9 +7,12 @@ from pathlib import Path import modules.shared as shared import numpy as np import torch +import transformers from transformers import AutoModelForCausalLM from transformers import AutoTokenizer +transformers.logging.set_verbosity_error() + local_rank = None if shared.args.flexgen: diff --git a/modules/shared.py b/modules/shared.py index 1823683a..7744771f 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -6,7 +6,27 @@ model_name = "" soft_prompt_tensor = None soft_prompt = False stop_everything = False -settings = {} + +settings = { + 'max_new_tokens': 200, + 'max_new_tokens_min': 1, + 'max_new_tokens_max': 2000, + 'preset': 'NovelAI-Sphinx Moth', + 'name1': 'Person 1', + 'name2': 'Person 2', + 'context': 'This is a conversation between two people.', + 'prompt': 'Common sense questions and answers\n\nQuestion: \nFactual answer:', + 'prompt_gpt4chan': '-----\n--- 865467536\nInput text\n--- 865467537\n', + 'stop_at_newline': True, + 'chat_prompt_size': 2048, + 'chat_prompt_size_min': 0, + 'chat_prompt_size_max': 2048, + 'preset_pygmalion': 'Pygmalion', + 'name1_pygmalion': 'You', + 'name2_pygmalion': 'Kawaii', + 'context_pygmalion': "Kawaii's persona: Kawaii is a cheerful person who loves to make others smile. She is an optimist who loves to spread happiness and positivity wherever she goes.\n", + 'stop_at_newline_pygmalion': False, +} parser = argparse.ArgumentParser(formatter_class=lambda prog: argparse.HelpFormatter(prog,max_help_position=54)) parser.add_argument('--model', type=str, help='Name of the model to load by default.') diff --git a/server.py b/server.py index 5a6f250b..791d98a3 100644 --- a/server.py +++ b/server.py @@ -9,7 +9,6 @@ from pathlib import Path import gradio as gr import torch -import transformers import modules.chat as chat import modules.extensions as extensions_module @@ -23,32 +22,9 @@ from modules.models import load_model from modules.models import load_soft_prompt from modules.text_generation import generate_reply -transformers.logging.set_verbosity_error() - if (shared.args.chat or shared.args.cai_chat) and not shared.args.no_stream: print("Warning: chat mode currently becomes somewhat slower with text streaming on.\nConsider starting the web UI with the --no-stream option.\n") -shared.settings = { - 'max_new_tokens': 200, - 'max_new_tokens_min': 1, - 'max_new_tokens_max': 2000, - 'preset': 'NovelAI-Sphinx Moth', - 'name1': 'Person 1', - 'name2': 'Person 2', - 'context': 'This is a conversation between two people.', - 'prompt': 'Common sense questions and answers\n\nQuestion: \nFactual answer:', - 'prompt_gpt4chan': '-----\n--- 865467536\nInput text\n--- 865467537\n', - 'stop_at_newline': True, - 'chat_prompt_size': 2048, - 'chat_prompt_size_min': 0, - 'chat_prompt_size_max': 2048, - 'preset_pygmalion': 'Pygmalion', - 'name1_pygmalion': 'You', - 'name2_pygmalion': 'Kawaii', - 'context_pygmalion': "Kawaii's persona: Kawaii is a cheerful person who loves to make others smile. She is an optimist who loves to spread happiness and positivity wherever she goes.\n", - 'stop_at_newline_pygmalion': False, -} - if shared.args.settings is not None and Path(shared.args.settings).exists(): new_settings = json.loads(open(Path(shared.args.settings), 'r').read()) for item in new_settings: