Restructure the repository (#6904)

This commit is contained in:
oobabooga 2025-04-26 08:56:54 -03:00 committed by GitHub
parent d4017fbb6d
commit d9de14d1f7
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
116 changed files with 254 additions and 261 deletions

20
.gitignore vendored
View file

@ -1,26 +1,8 @@
/cache
/characters
/css /css
/extensions /extensions
/grammars
/installer_files /installer_files
/logs
/loras
/models
/presets
/prompts
/repositories /repositories
/softprompts /user_data
/torch-dumps
/training/datasets
/CMD_FLAGS.txt
/img_bot*
/img_me*
/models/config-user.yaml
/notification.mp3
/settings*.json
/settings*.yaml
.chroma .chroma
.DS_Store .DS_Store

View file

@ -182,17 +182,17 @@ List of command-line flags
</summary> </summary>
```txt ```txt
usage: server.py [-h] [--multi-user] [--character CHARACTER] [--model MODEL] [--lora LORA [LORA ...]] [--model-dir MODEL_DIR] [--lora-dir LORA_DIR] [--settings SETTINGS] usage: server.py [-h] [--multi-user] [--character CHARACTER] [--model MODEL] [--lora LORA [LORA ...]] [--model-dir MODEL_DIR] [--lora-dir LORA_DIR] [--model-menu] [--settings SETTINGS]
[--extensions EXTENSIONS [EXTENSIONS ...]] [--verbose] [--idle-timeout IDLE_TIMEOUT] [--loader LOADER] [--cpu] [--auto-devices] [--gpu-memory GPU_MEMORY [GPU_MEMORY ...]] [--extensions EXTENSIONS [EXTENSIONS ...]] [--verbose] [--idle-timeout IDLE_TIMEOUT] [--loader LOADER] [--cpu] [--cpu-memory CPU_MEMORY] [--disk] [--disk-cache-dir DISK_CACHE_DIR]
[--cpu-memory CPU_MEMORY] [--disk] [--disk-cache-dir DISK_CACHE_DIR] [--load-in-8bit] [--bf16] [--no-cache] [--trust-remote-code] [--force-safetensors] [--no_use_fast] [--load-in-8bit] [--bf16] [--no-cache] [--trust-remote-code] [--force-safetensors] [--no_use_fast] [--use_flash_attention_2] [--use_eager_attention] [--torch-compile] [--load-in-4bit]
[--use_flash_attention_2] [--use_eager_attention] [--torch-compile] [--load-in-4bit] [--use_double_quant] [--compute_dtype COMPUTE_DTYPE] [--quant_type QUANT_TYPE] [--flash-attn] [--use_double_quant] [--compute_dtype COMPUTE_DTYPE] [--quant_type QUANT_TYPE] [--flash-attn] [--threads THREADS] [--threads-batch THREADS_BATCH] [--batch-size BATCH_SIZE] [--no-mmap]
[--n_ctx N_CTX] [--threads THREADS] [--threads-batch THREADS_BATCH] [--batch-size BATCH_SIZE] [--no-mmap] [--mlock] [--n-gpu-layers N_GPU_LAYERS] [--tensor-split TENSOR_SPLIT] [--mlock] [--n-gpu-layers N_GPU_LAYERS] [--tensor-split TENSOR_SPLIT] [--numa] [--no-kv-offload] [--row-split] [--extra-flags EXTRA_FLAGS] [--streaming-llm] [--ctx-size CTX_SIZE]
[--numa] [--no-kv-offload] [--row-split] [--gpu-split GPU_SPLIT] [--autosplit] [--max_seq_len MAX_SEQ_LEN] [--cfg-cache] [--no_flash_attn] [--no_xformers] [--no_sdpa] [--model-draft MODEL_DRAFT] [--draft-max DRAFT_MAX] [--gpu-layers-draft GPU_LAYERS_DRAFT] [--device-draft DEVICE_DRAFT] [--ctx-size-draft CTX_SIZE_DRAFT] [--gpu-split GPU_SPLIT]
[--num_experts_per_token NUM_EXPERTS_PER_TOKEN] [--enable_tp] [--hqq-backend HQQ_BACKEND] [--cpp-runner] [--cache_type CACHE_TYPE] [--deepspeed] [--nvme-offload-dir NVME_OFFLOAD_DIR] [--autosplit] [--cfg-cache] [--no_flash_attn] [--no_xformers] [--no_sdpa] [--num_experts_per_token NUM_EXPERTS_PER_TOKEN] [--enable_tp] [--hqq-backend HQQ_BACKEND] [--cpp-runner]
[--local_rank LOCAL_RANK] [--alpha_value ALPHA_VALUE] [--rope_freq_base ROPE_FREQ_BASE] [--compress_pos_emb COMPRESS_POS_EMB] [--listen] [--listen-port LISTEN_PORT] [--cache_type CACHE_TYPE] [--deepspeed] [--nvme-offload-dir NVME_OFFLOAD_DIR] [--local_rank LOCAL_RANK] [--alpha_value ALPHA_VALUE] [--rope_freq_base ROPE_FREQ_BASE]
[--listen-host LISTEN_HOST] [--share] [--auto-launch] [--gradio-auth GRADIO_AUTH] [--gradio-auth-path GRADIO_AUTH_PATH] [--ssl-keyfile SSL_KEYFILE] [--ssl-certfile SSL_CERTFILE] [--compress_pos_emb COMPRESS_POS_EMB] [--listen] [--listen-port LISTEN_PORT] [--listen-host LISTEN_HOST] [--share] [--auto-launch] [--gradio-auth GRADIO_AUTH]
[--subpath SUBPATH] [--old-colors] [--api] [--public-api] [--public-api-id PUBLIC_API_ID] [--api-port API_PORT] [--api-key API_KEY] [--admin-key ADMIN_KEY] [--api-enable-ipv6] [--gradio-auth-path GRADIO_AUTH_PATH] [--ssl-keyfile SSL_KEYFILE] [--ssl-certfile SSL_CERTFILE] [--subpath SUBPATH] [--old-colors] [--api] [--public-api]
[--api-disable-ipv4] [--nowebui] [--public-api-id PUBLIC_API_ID] [--api-port API_PORT] [--api-key API_KEY] [--admin-key ADMIN_KEY] [--api-enable-ipv6] [--api-disable-ipv4] [--nowebui]
Text generation web UI Text generation web UI
@ -206,24 +206,22 @@ Basic settings:
--lora LORA [LORA ...] The list of LoRAs to load. If you want to load more than one LoRA, write the names separated by spaces. --lora LORA [LORA ...] The list of LoRAs to load. If you want to load more than one LoRA, write the names separated by spaces.
--model-dir MODEL_DIR Path to directory with all the models. --model-dir MODEL_DIR Path to directory with all the models.
--lora-dir LORA_DIR Path to directory with all the loras. --lora-dir LORA_DIR Path to directory with all the loras.
--settings SETTINGS Load the default interface settings from this yaml file. See settings-template.yaml for an example. If you create a file called settings.yaml, this --model-menu Show a model menu in the terminal when the web UI is first launched.
file will be loaded by default without the need to use the --settings flag. --settings SETTINGS Load the default interface settings from this yaml file. See settings-template.yaml for an example. If you create a file called settings.yaml,
this file will be loaded by default without the need to use the --settings flag.
--extensions EXTENSIONS [EXTENSIONS ...] The list of extensions to load. If you want to load more than one extension, write the names separated by spaces. --extensions EXTENSIONS [EXTENSIONS ...] The list of extensions to load. If you want to load more than one extension, write the names separated by spaces.
--verbose Print the prompts to the terminal. --verbose Print the prompts to the terminal.
--idle-timeout IDLE_TIMEOUT Unload model after this many minutes of inactivity. It will be automatically reloaded when you try to use it again. --idle-timeout IDLE_TIMEOUT Unload model after this many minutes of inactivity. It will be automatically reloaded when you try to use it again.
Model loader: Model loader:
--loader LOADER Choose the model loader manually, otherwise, it will get autodetected. Valid options: Transformers, llama.cpp, ExLlamav3_HF, ExLlamav2_HF, ExLlamav2, --loader LOADER Choose the model loader manually, otherwise, it will get autodetected. Valid options: Transformers, llama.cpp, ExLlamav3_HF, ExLlamav2_HF,
HQQ, TensorRT-LLM. ExLlamav2, HQQ, TensorRT-LLM.
Transformers/Accelerate: Transformers/Accelerate:
--cpu Use the CPU to generate text. Warning: Training on CPU is extremely slow. --cpu Use the CPU to generate text. Warning: Training on CPU is extremely slow.
--auto-devices Automatically split the model across the available GPU(s) and CPU. --cpu-memory CPU_MEMORY Maximum CPU memory in GiB. Use this for CPU offloading.
--gpu-memory GPU_MEMORY [GPU_MEMORY ...] Maximum GPU memory in GiB to be allocated per GPU. Example: --gpu-memory 10 for a single GPU, --gpu-memory 10 5 for two GPUs. You can also set values
in MiB like --gpu-memory 3500MiB.
--cpu-memory CPU_MEMORY Maximum CPU memory in GiB to allocate for offloaded weights. Same as above.
--disk If the model is too large for your GPU(s) and CPU combined, send the remaining layers to the disk. --disk If the model is too large for your GPU(s) and CPU combined, send the remaining layers to the disk.
--disk-cache-dir DISK_CACHE_DIR Directory to save the disk cache to. Defaults to "cache". --disk-cache-dir DISK_CACHE_DIR Directory to save the disk cache to. Defaults to "user_data/cache".
--load-in-8bit Load the model with 8-bit precision (using bitsandbytes). --load-in-8bit Load the model with 8-bit precision (using bitsandbytes).
--bf16 Load the model with bfloat16 precision. Requires NVIDIA Ampere GPU. --bf16 Load the model with bfloat16 precision. Requires NVIDIA Ampere GPU.
--no-cache Set use_cache to False while generating text. This reduces VRAM usage slightly, but it comes at a performance cost. --no-cache Set use_cache to False while generating text. This reduces VRAM usage slightly, but it comes at a performance cost.
@ -242,7 +240,6 @@ bitsandbytes 4-bit:
llama.cpp: llama.cpp:
--flash-attn Use flash-attention. --flash-attn Use flash-attention.
--n_ctx N_CTX Size of the prompt context.
--threads THREADS Number of threads to use. --threads THREADS Number of threads to use.
--threads-batch THREADS_BATCH Number of threads to use for batches/prompt processing. --threads-batch THREADS_BATCH Number of threads to use for batches/prompt processing.
--batch-size BATCH_SIZE Maximum number of prompt tokens to batch together when calling llama_eval. --batch-size BATCH_SIZE Maximum number of prompt tokens to batch together when calling llama_eval.
@ -253,11 +250,23 @@ llama.cpp:
--numa Activate NUMA task allocation for llama.cpp. --numa Activate NUMA task allocation for llama.cpp.
--no-kv-offload Do not offload the K, Q, V to the GPU. This saves VRAM but reduces the performance. --no-kv-offload Do not offload the K, Q, V to the GPU. This saves VRAM but reduces the performance.
--row-split Split the model by rows across GPUs. This may improve multi-gpu performance. --row-split Split the model by rows across GPUs. This may improve multi-gpu performance.
--extra-flags EXTRA_FLAGS Extra flags to pass to llama-server. Format: "flag1=value1;flag2;flag3=value3". Example: "override-tensor=exps=CPU"
--streaming-llm Activate StreamingLLM to avoid re-evaluating the entire prompt when old messages are removed.
Context and cache management:
--ctx-size CTX_SIZE, --n_ctx CTX_SIZE, --max_seq_len CTX_SIZE
Context size in tokens.
Speculative decoding:
--model-draft MODEL_DRAFT Path to the draft model for speculative decoding.
--draft-max DRAFT_MAX Number of tokens to draft for speculative decoding.
--gpu-layers-draft GPU_LAYERS_DRAFT Number of layers to offload to the GPU for the draft model.
--device-draft DEVICE_DRAFT Comma-separated list of devices to use for offloading the draft model. Example: CUDA0,CUDA1
--ctx-size-draft CTX_SIZE_DRAFT Size of the prompt context for the draft model. If 0, uses the same as the main model.
ExLlamaV2: ExLlamaV2:
--gpu-split GPU_SPLIT Comma-separated list of VRAM (in GB) to use per GPU device for model layers. Example: 20,7,7. --gpu-split GPU_SPLIT Comma-separated list of VRAM (in GB) to use per GPU device for model layers. Example: 20,7,7.
--autosplit Autosplit the model tensors across the available GPUs. This causes --gpu-split to be ignored. --autosplit Autosplit the model tensors across the available GPUs. This causes --gpu-split to be ignored.
--max_seq_len MAX_SEQ_LEN Maximum sequence length.
--cfg-cache ExLlamav2_HF: Create an additional cache for CFG negative prompts. Necessary to use CFG with that loader. --cfg-cache ExLlamav2_HF: Create an additional cache for CFG negative prompts. Necessary to use CFG with that loader.
--no_flash_attn Force flash-attention to not be used. --no_flash_attn Force flash-attention to not be used.
--no_xformers Force xformers to not be used. --no_xformers Force xformers to not be used.
@ -317,12 +326,13 @@ https://github.com/oobabooga/text-generation-webui/wiki
## Downloading models ## Downloading models
Models should be placed in the folder `text-generation-webui/models`. They are usually downloaded from [Hugging Face](https://huggingface.co/models?pipeline_tag=text-generation&sort=downloads). Models should be placed in the folder `text-generation-webui/user_data/models`. They are usually downloaded from [Hugging Face](https://huggingface.co/models?pipeline_tag=text-generation&sort=downloads).
* GGUF models are a single file and should be placed directly into `models`. Example: * GGUF models are a single file and should be placed directly into `user_data/models`. Example:
``` ```
text-generation-webui text-generation-webui
└── user_data
└── models └── models
└── llama-2-13b-chat.Q4_K_M.gguf └── llama-2-13b-chat.Q4_K_M.gguf
``` ```
@ -331,21 +341,22 @@ text-generation-webui
``` ```
text-generation-webui text-generation-webui
├── models └── user_data
│   ├── lmsys_vicuna-33b-v1.3 └── models
│   │   ├── config.json └── lmsys_vicuna-33b-v1.3
│   │   ├── generation_config.json ├── config.json
│   │   ├── pytorch_model-00001-of-00007.bin ├── generation_config.json
│   │   ├── pytorch_model-00002-of-00007.bin ├── pytorch_model-00001-of-00007.bin
│   │   ├── pytorch_model-00003-of-00007.bin ├── pytorch_model-00002-of-00007.bin
│   │   ├── pytorch_model-00004-of-00007.bin ├── pytorch_model-00003-of-00007.bin
│   │   ├── pytorch_model-00005-of-00007.bin ├── pytorch_model-00004-of-00007.bin
│   │   ├── pytorch_model-00006-of-00007.bin ├── pytorch_model-00005-of-00007.bin
│   │   ├── pytorch_model-00007-of-00007.bin ├── pytorch_model-00006-of-00007.bin
│   │   ├── pytorch_model.bin.index.json ├── pytorch_model-00007-of-00007.bin
│   │   ├── special_tokens_map.json ├── pytorch_model.bin.index.json
│   │   ├── tokenizer_config.json ├── special_tokens_map.json
│   │   └── tokenizer.model ├── tokenizer_config.json
└── tokenizer.model
``` ```
In both cases, you can use the "Model" tab of the UI to download the model from Hugging Face automatically. It is also possible to download it via the command-line with: In both cases, you can use the "Model" tab of the UI to download the model from Hugging Face automatically. It is also possible to download it via the command-line with:

View file

@ -1,5 +1,5 @@
''' '''
Downloads models from Hugging Face to models/username_modelname. Downloads models from Hugging Face to user_data/models/username_modelname.
Example: Example:
python download-model.py facebook/opt-1.3b python download-model.py facebook/opt-1.3b
@ -175,7 +175,7 @@ class ModelDownloader:
if model_dir: if model_dir:
base_folder = model_dir base_folder = model_dir
else: else:
base_folder = 'models' if not is_lora else 'loras' base_folder = 'user_data/models' if not is_lora else 'user_data/loras'
# If the model is of type GGUF, save directly in the base_folder # If the model is of type GGUF, save directly in the base_folder
if is_llamacpp: if is_llamacpp:
@ -356,7 +356,7 @@ if __name__ == '__main__':
parser.add_argument('--specific-file', type=str, default=None, help='Name of the specific file to download (if not provided, downloads all).') parser.add_argument('--specific-file', type=str, default=None, help='Name of the specific file to download (if not provided, downloads all).')
parser.add_argument('--exclude-pattern', type=str, default=None, help='Regex pattern to exclude files from download.') parser.add_argument('--exclude-pattern', type=str, default=None, help='Regex pattern to exclude files from download.')
parser.add_argument('--output', type=str, default=None, help='Save the model files to this folder.') parser.add_argument('--output', type=str, default=None, help='Save the model files to this folder.')
parser.add_argument('--model-dir', type=str, default=None, help='Save the model files to a subfolder of this folder instead of the default one (text-generation-webui/models).') parser.add_argument('--model-dir', type=str, default=None, help='Save the model files to a subfolder of this folder instead of the default one (text-generation-webui/user_data/models).')
parser.add_argument('--clean', action='store_true', help='Does not resume the previous download.') parser.add_argument('--clean', action='store_true', help='Does not resume the previous download.')
parser.add_argument('--check', action='store_true', help='Validates the checksums of model files.') parser.add_argument('--check', action='store_true', help='Validates the checksums of model files.')
parser.add_argument('--max-retries', type=int, default=7, help='Max retries count when get error in download time.') parser.add_argument('--max-retries', type=int, default=7, help='Max retries count when get error in download time.')

View file

@ -175,23 +175,23 @@ def ui():
with gr.Row(): with gr.Row():
with gr.Column(): with gr.Column():
with gr.Row(): with gr.Row():
dataset = gr.Dropdown(choices=get_datasets('training/datasets', 'json'), value='None', label='Dataset', info='The dataset file to use for training.', elem_classes=['slim-dropdown']) dataset = gr.Dropdown(choices=get_datasets('user_data/training/datasets', 'json'), value='None', label='Dataset', info='The dataset file to use for training.', elem_classes=['slim-dropdown'])
create_refresh_button(dataset, lambda: None, lambda: {'choices': get_datasets('training/datasets', 'json')}, 'refresh-button') create_refresh_button(dataset, lambda: None, lambda: {'choices': get_datasets('user_data/training/datasets', 'json')}, 'refresh-button')
with gr.Row(): with gr.Row():
eval_dataset = gr.Dropdown(choices=get_datasets('training/datasets', 'json'), value='None', label='Evaluation Dataset', info='The (optional) dataset file used to evaluate the model after training.', elem_classes=['slim-dropdown']) eval_dataset = gr.Dropdown(choices=get_datasets('user_data/training/datasets', 'json'), value='None', label='Evaluation Dataset', info='The (optional) dataset file used to evaluate the model after training.', elem_classes=['slim-dropdown'])
create_refresh_button(eval_dataset, lambda: None, lambda: {'choices': get_datasets('training/datasets', 'json')}, 'refresh-button') create_refresh_button(eval_dataset, lambda: None, lambda: {'choices': get_datasets('user_data/training/datasets', 'json')}, 'refresh-button')
with gr.Column(): with gr.Column():
with gr.Row(): with gr.Row():
format = gr.Dropdown(choices=get_datasets('training/formats', 'json'), value='None', label='Data Format', info='The format file used to decide how to format the dataset input.', elem_classes=['slim-dropdown']) format = gr.Dropdown(choices=get_datasets('user_data/training/formats', 'json'), value='None', label='Data Format', info='The format file used to decide how to format the dataset input.', elem_classes=['slim-dropdown'])
create_refresh_button(format, lambda: None, lambda: {'choices': get_datasets('training/formats', 'json')}, 'refresh-button') create_refresh_button(format, lambda: None, lambda: {'choices': get_datasets('user_data/training/formats', 'json')}, 'refresh-button')
with gr.Row(): with gr.Row():
eval_steps = gr.Number(label='Evaluate every n steps', value=100, info='If an evaluation dataset is given, test it every time this many steps pass.') eval_steps = gr.Number(label='Evaluate every n steps', value=100, info='If an evaluation dataset is given, test it every time this many steps pass.')
with gr.Tab(label="Text file"): with gr.Tab(label="Text file"):
with gr.Row(): with gr.Row():
raw_text_file = gr.Dropdown(choices=get_datasets('training/datasets', 'txt'), value='None', label='Text file', info='The text file to use for training.', elem_classes=['slim-dropdown']) raw_text_file = gr.Dropdown(choices=get_datasets('user_data/training/datasets', 'txt'), value='None', label='Text file', info='The text file to use for training.', elem_classes=['slim-dropdown'])
create_refresh_button(raw_text_file, lambda: None, lambda: {'choices': get_datasets('training/datasets', 'txt')}, 'refresh-button') create_refresh_button(raw_text_file, lambda: None, lambda: {'choices': get_datasets('user_data/training/datasets', 'txt')}, 'refresh-button')
with gr.Row(): with gr.Row():
with gr.Column(): with gr.Column():
@ -208,7 +208,7 @@ def ui():
download_file_url = gr.Textbox(label='Download JSON or txt file to datasets (or formats) folder', value='',info='The URL of a file to download. If on github, make sure you get url of the raw file (https://raw.githubusercontent.com/...). If huggin face, make sure the url has /resolve/ in it not /blob/') download_file_url = gr.Textbox(label='Download JSON or txt file to datasets (or formats) folder', value='',info='The URL of a file to download. If on github, make sure you get url of the raw file (https://raw.githubusercontent.com/...). If huggin face, make sure the url has /resolve/ in it not /blob/')
with gr.Row(): with gr.Row():
download_check_overwrite = gr.Checkbox(label='Overwrite', value=False, info='Overwrite if file exist') download_check_overwrite = gr.Checkbox(label='Overwrite', value=False, info='Overwrite if file exist')
download_folder = gr.Radio(label="Destination", value='training/datasets', choices=['training/datasets', 'training/formats'], interactive=True) download_folder = gr.Radio(label="Destination", value='user_data/training/datasets', choices=['user_data/training/datasets', 'user_data/training/formats'], interactive=True)
download_button = gr.Button('Download') download_button = gr.Button('Download')
download_status = gr.Textbox(label='Download Status', value='', interactive=False) download_status = gr.Textbox(label='Download Status', value='', interactive=False)
with gr.Row(): with gr.Row():
@ -235,7 +235,7 @@ def ui():
with gr.Row(): with gr.Row():
with gr.Column(): with gr.Column():
models = gr.Dropdown(utils.get_available_models(), label='Models', multiselect=True) models = gr.Dropdown(utils.get_available_models(), label='Models', multiselect=True)
evaluate_text_file = gr.Dropdown(choices=['wikitext', 'ptb', 'ptb_new'] + get_datasets('training/datasets', 'txt')[1:], value='wikitext', label='Input dataset', info='The text file on which the model will be evaluated. The first options are automatically downloaded: wikitext, ptb, and ptb_new. The next options are your local text files under training/datasets.') evaluate_text_file = gr.Dropdown(choices=['wikitext', 'ptb', 'ptb_new'] + get_datasets('user_data/training/datasets', 'txt')[1:], value='wikitext', label='Input dataset', info='The text file on which the model will be evaluated. The first options are automatically downloaded: wikitext, ptb, and ptb_new. The next options are your local text files under user_data/training/datasets.')
with gr.Row(): with gr.Row():
with gr.Column(): with gr.Column():
stride_length = gr.Slider(label='Stride', minimum=1, maximum=2048, value=512, step=1, info='Used to make the evaluation faster at the cost of accuracy. 1 = slowest but most accurate. 512 is a common value.') stride_length = gr.Slider(label='Stride', minimum=1, maximum=2048, value=512, step=1, info='Used to make the evaluation faster at the cost of accuracy. 1 = slowest but most accurate. 512 is a common value.')
@ -310,7 +310,7 @@ def ui():
if raw_text_file not in ['None', '']: if raw_text_file not in ['None', '']:
logger.info("Loading Text file...") logger.info("Loading Text file...")
fullpath = clean_path('training/datasets', f'{raw_text_file}') fullpath = clean_path('user_data/training/datasets', f'{raw_text_file}')
fullpath = Path(fullpath) fullpath = Path(fullpath)
if fullpath.is_dir(): if fullpath.is_dir():
logger.info('Training path directory {}'.format(raw_text_file)) logger.info('Training path directory {}'.format(raw_text_file))
@ -324,10 +324,10 @@ def ui():
logger.info(f"Loaded training file: {file_path.name}") logger.info(f"Loaded training file: {file_path.name}")
else: else:
try: try:
with open(clean_path('training/datasets', f'{raw_text_file}.txt'), 'r', encoding='utf-8') as file: with open(clean_path('user_data/training/datasets', f'{raw_text_file}.txt'), 'r', encoding='utf-8') as file:
raw_text = file.read().replace('\r', '') raw_text = file.read().replace('\r', '')
except: except:
yield f"{raw_text_file}.txt doesn't seem to exsist anymore... check your training/datasets folder" yield f"{raw_text_file}.txt doesn't seem to exsist anymore... check your user_data/training/datasets folder"
return return
@ -353,7 +353,7 @@ def ui():
yield "Select format choice for dataset." yield "Select format choice for dataset."
return return
with open(clean_path('training/formats', f'{format}.json'), 'r', encoding='utf-8-sig') as formatFile: with open(clean_path('user_data/training/formats', f'{format}.json'), 'r', encoding='utf-8-sig') as formatFile:
format_data: dict[str, str] = json.load(formatFile) format_data: dict[str, str] = json.load(formatFile)
def generate_prompt(data_point: dict[str, str]): def generate_prompt(data_point: dict[str, str]):
@ -381,7 +381,7 @@ def ui():
return tokenize_dummy(prompt) return tokenize_dummy(prompt)
logger.info("Loading JSON datasets...") logger.info("Loading JSON datasets...")
data = load_dataset("json", data_files=clean_path('training/datasets', f'{dataset}.json')) data = load_dataset("json", data_files=clean_path('user_data/training/datasets', f'{dataset}.json'))
data_keys = [] data_keys = []
@ -456,7 +456,7 @@ def ui():
#debug_slicer.change(lambda x: non_serialized_params.update({"debug_slicer": x}), debug_slicer, None) #debug_slicer.change(lambda x: non_serialized_params.update({"debug_slicer": x}), debug_slicer, None)
def update_dataset(): def update_dataset():
return gr.update(choices=get_datasets('training/datasets', 'json')), gr.update(choices=get_datasets('training/datasets', 'txt')) return gr.update(choices=get_datasets('user_data/training/datasets', 'json')), gr.update(choices=get_datasets('user_data/training/datasets', 'txt'))
download_button.click(download_file_from_url, [download_file_url,download_check_overwrite,download_folder] , download_status).then(update_dataset,None,[dataset , raw_text_file]) download_button.click(download_file_from_url, [download_file_url,download_check_overwrite,download_folder] , download_status).then(update_dataset,None,[dataset , raw_text_file])
@ -670,7 +670,7 @@ def do_train(lora_name: str, always_override: bool, save_steps: int, micro_batch
if raw_text_file not in ['None', '']: if raw_text_file not in ['None', '']:
train_template["template_type"] = "raw_text" train_template["template_type"] = "raw_text"
logger.info("Loading text file...") logger.info("Loading text file...")
fullpath = clean_path('training/datasets', f'{raw_text_file}') fullpath = clean_path('user_data/training/datasets', f'{raw_text_file}')
fullpath = Path(fullpath) fullpath = Path(fullpath)
if fullpath.is_dir(): if fullpath.is_dir():
logger.info('Training path directory {}'.format(raw_text_file)) logger.info('Training path directory {}'.format(raw_text_file))
@ -683,7 +683,7 @@ def do_train(lora_name: str, always_override: bool, save_steps: int, micro_batch
logger.info(f"Loaded training file: {file_path.name}") logger.info(f"Loaded training file: {file_path.name}")
else: else:
with open(clean_path('training/datasets', f'{raw_text_file}.txt'), 'r', encoding='utf-8') as file: with open(clean_path('user_data/training/datasets', f'{raw_text_file}.txt'), 'r', encoding='utf-8') as file:
raw_text = file.read().replace('\r', '') raw_text = file.read().replace('\r', '')
# FPHAM PRECISE SLICING # FPHAM PRECISE SLICING
@ -720,7 +720,7 @@ def do_train(lora_name: str, always_override: bool, save_steps: int, micro_batch
train_template["template_type"] = "dataset" train_template["template_type"] = "dataset"
with open(clean_path('training/formats', f'{format}.json'), 'r', encoding='utf-8-sig') as formatFile: with open(clean_path('user_data/training/formats', f'{format}.json'), 'r', encoding='utf-8-sig') as formatFile:
format_data: dict[str, str] = json.load(formatFile) format_data: dict[str, str] = json.load(formatFile)
# == store training prompt == # == store training prompt ==
@ -742,7 +742,7 @@ def do_train(lora_name: str, always_override: bool, save_steps: int, micro_batch
return tokenize(prompt, add_eos_token, add_bos_token) return tokenize(prompt, add_eos_token, add_bos_token)
logger.info("Loading JSON datasets...") logger.info("Loading JSON datasets...")
data = load_dataset("json", data_files=clean_path('training/datasets', f'{dataset}.json')) data = load_dataset("json", data_files=clean_path('user_data/training/datasets', f'{dataset}.json'))
train_data = data['train'].map(generate_and_tokenize_prompt, new_fingerprint='%030x' % random.randrange(16**30)) train_data = data['train'].map(generate_and_tokenize_prompt, new_fingerprint='%030x' % random.randrange(16**30))
print(f"BOS: {add_bos_token} EOS: {add_eos_token}") print(f"BOS: {add_bos_token} EOS: {add_eos_token}")
@ -751,7 +751,7 @@ def do_train(lora_name: str, always_override: bool, save_steps: int, micro_batch
if eval_dataset == 'None': if eval_dataset == 'None':
eval_data = None eval_data = None
else: else:
eval_data = load_dataset("json", data_files=clean_path('training/datasets', f'{eval_dataset}.json')) eval_data = load_dataset("json", data_files=clean_path('user_data/training/datasets', f'{eval_dataset}.json'))
eval_data = eval_data['train'].map(generate_and_tokenize_prompt, new_fingerprint='%030x' % random.randrange(16**30)) eval_data = eval_data['train'].map(generate_and_tokenize_prompt, new_fingerprint='%030x' % random.randrange(16**30))
# == We MUST reload model if it went through any previous training, even failed one == # == We MUST reload model if it went through any previous training, even failed one ==
@ -1157,11 +1157,11 @@ def do_train(lora_name: str, always_override: bool, save_steps: int, micro_batch
decoded_entries.append({"value": decoded_text}) decoded_entries.append({"value": decoded_text})
# Write the log file # Write the log file
Path('logs').mkdir(exist_ok=True) Path('user_data/logs').mkdir(exist_ok=True)
with open(Path('logs/train_dataset_sample.json'), 'w') as json_file: with open(Path('user_data/logs/train_dataset_sample.json'), 'w') as json_file:
json.dump(decoded_entries, json_file, indent=4) json.dump(decoded_entries, json_file, indent=4)
logger.info("Log file 'train_dataset_sample.json' created in the 'logs' directory.") logger.info("Log file 'train_dataset_sample.json' created in the 'user_data/logs' directory.")
except Exception as e: except Exception as e:
logger.error(f"Failed to create log file due to error: {e}") logger.error(f"Failed to create log file due to error: {e}")

View file

@ -194,13 +194,13 @@ def precise_cut(text: str, overlap: bool, min_chars_cut: int, eos_to_hc: bool, c
if debug_slicer: if debug_slicer:
# Write the log file # Write the log file
Path('logs').mkdir(exist_ok=True) Path('user_data/logs').mkdir(exist_ok=True)
sentencelist_dict = {index: sentence for index, sentence in enumerate(sentencelist)} sentencelist_dict = {index: sentence for index, sentence in enumerate(sentencelist)}
output_file = "logs/sentencelist.json" output_file = "user_data/logs/sentencelist.json"
with open(output_file, 'w') as f: with open(output_file, 'w') as f:
json.dump(sentencelist_dict, f,indent=2) json.dump(sentencelist_dict, f,indent=2)
print("Saved sentencelist.json in logs folder") print("Saved sentencelist.json in user_data/logs folder")
return sentencelist return sentencelist
@ -281,13 +281,13 @@ def sliding_block_cut(text: str, min_chars_cut: int, eos_to_hc: bool, cutoff_len
if debug_slicer: if debug_slicer:
# Write the log file # Write the log file
Path('logs').mkdir(exist_ok=True) Path('user_data/logs').mkdir(exist_ok=True)
sentencelist_dict = {index: sentence for index, sentence in enumerate(sentencelist)} sentencelist_dict = {index: sentence for index, sentence in enumerate(sentencelist)}
output_file = "logs/sentencelist.json" output_file = "user_data/logs/sentencelist.json"
with open(output_file, 'w') as f: with open(output_file, 'w') as f:
json.dump(sentencelist_dict, f,indent=2) json.dump(sentencelist_dict, f,indent=2)
print("Saved sentencelist.json in logs folder") print("Saved sentencelist.json in user_data/logs folder")
return sentencelist return sentencelist

View file

@ -72,13 +72,13 @@ def generate_html():
global cards global cards
cards = [] cards = []
# Iterate through files in image folder # Iterate through files in image folder
for file in sorted(Path("characters").glob("*")): for file in sorted(Path("user_data/characters").glob("*")):
if file.suffix in [".json", ".yml", ".yaml"]: if file.suffix in [".json", ".yml", ".yaml"]:
character = file.stem character = file.stem
container_html = '<div class="character-container">' container_html = '<div class="character-container">'
image_html = "<div class='placeholder'></div>" image_html = "<div class='placeholder'></div>"
for path in [Path(f"characters/{character}.{extension}") for extension in ['png', 'jpg', 'jpeg']]: for path in [Path(f"user_data/characters/{character}.{extension}") for extension in ['png', 'jpg', 'jpeg']]:
if path.exists(): if path.exists():
image_html = f'<img src="file/{get_image_cache(path)}">' image_html = f'<img src="file/{get_image_cache(path)}">'
break break

View file

@ -6,7 +6,7 @@ from pydantic import BaseModel, Field
class GenerationOptions(BaseModel): class GenerationOptions(BaseModel):
preset: str | None = Field(default=None, description="The name of a file under text-generation-webui/presets (without the .yaml extension). The sampling parameters that get overwritten by this option are the keys in the default_preset() function in modules/presets.py.") preset: str | None = Field(default=None, description="The name of a file under text-generation-webui/user_data/presets (without the .yaml extension). The sampling parameters that get overwritten by this option are the keys in the default_preset() function in modules/presets.py.")
dynatemp_low: float = 1 dynatemp_low: float = 1
dynatemp_high: float = 1 dynatemp_high: float = 1
dynatemp_exponent: float = 1 dynatemp_exponent: float = 1
@ -103,10 +103,10 @@ class ChatCompletionRequestParams(BaseModel):
mode: str = Field(default='instruct', description="Valid options: instruct, chat, chat-instruct.") mode: str = Field(default='instruct', description="Valid options: instruct, chat, chat-instruct.")
instruction_template: str | None = Field(default=None, description="An instruction template defined under text-generation-webui/instruction-templates. If not set, the correct template will be automatically obtained from the model metadata.") instruction_template: str | None = Field(default=None, description="An instruction template defined under text-generation-webui/user_data/instruction-templates. If not set, the correct template will be automatically obtained from the model metadata.")
instruction_template_str: str | None = Field(default=None, description="A Jinja2 instruction template. If set, will take precedence over everything else.") instruction_template_str: str | None = Field(default=None, description="A Jinja2 instruction template. If set, will take precedence over everything else.")
character: str | None = Field(default=None, description="A character defined under text-generation-webui/characters. If not set, the default \"Assistant\" character will be used.") character: str | None = Field(default=None, description="A character defined under text-generation-webui/user_data/characters. If not set, the default \"Assistant\" character will be used.")
bot_name: str | None = Field(default=None, description="Overwrites the value set by character field.", alias="name2") bot_name: str | None = Field(default=None, description="Overwrites the value set by character field.", alias="name2")
context: str | None = Field(default=None, description="Overwrites the value set by character field.") context: str | None = Field(default=None, description="Overwrites the value set by character field.")
greeting: str | None = Field(default=None, description="Overwrites the value set by character field.") greeting: str | None = Field(default=None, description="Overwrites the value set by character field.")

View file

@ -395,7 +395,7 @@ let bigPictureVisible = false;
function addBigPicture() { function addBigPicture() {
var imgElement = document.createElement("img"); var imgElement = document.createElement("img");
var timestamp = new Date().getTime(); var timestamp = new Date().getTime();
imgElement.src = "/file/cache/pfp_character.png?time=" + timestamp; imgElement.src = "/file/user_data/cache/pfp_character.png?time=" + timestamp;
imgElement.classList.add("bigProfilePicture"); imgElement.classList.add("bigProfilePicture");
imgElement.addEventListener("load", function () { imgElement.addEventListener("load", function () {
this.style.visibility = "visible"; this.style.visibility = "visible";

View file

@ -2,6 +2,6 @@ function updateBigPicture() {
var existingElement = document.querySelector(".bigProfilePicture"); var existingElement = document.querySelector(".bigProfilePicture");
if (existingElement) { if (existingElement) {
var timestamp = new Date().getTime(); var timestamp = new Date().getTime();
existingElement.src = "/file/cache/pfp_character.png?time=" + timestamp; existingElement.src = "/file/user_data/cache/pfp_character.png?time=" + timestamp;
} }
} }

View file

@ -525,9 +525,9 @@ def start_new_chat(state):
def get_history_file_path(unique_id, character, mode): def get_history_file_path(unique_id, character, mode):
if mode == 'instruct': if mode == 'instruct':
p = Path(f'logs/instruct/{unique_id}.json') p = Path(f'user_data/logs/instruct/{unique_id}.json')
else: else:
p = Path(f'logs/chat/{character}/{unique_id}.json') p = Path(f'user_data/logs/chat/{character}/{unique_id}.json')
return p return p
@ -563,13 +563,13 @@ def rename_history(old_id, new_id, character, mode):
def get_paths(state): def get_paths(state):
if state['mode'] == 'instruct': if state['mode'] == 'instruct':
return Path('logs/instruct').glob('*.json') return Path('user_data/logs/instruct').glob('*.json')
else: else:
character = state['character_menu'] character = state['character_menu']
# Handle obsolete filenames and paths # Handle obsolete filenames and paths
old_p = Path(f'logs/{character}_persistent.json') old_p = Path(f'user_data/logs/{character}_persistent.json')
new_p = Path(f'logs/persistent_{character}.json') new_p = Path(f'user_data/logs/persistent_{character}.json')
if old_p.exists(): if old_p.exists():
logger.warning(f"Renaming \"{old_p}\" to \"{new_p}\"") logger.warning(f"Renaming \"{old_p}\" to \"{new_p}\"")
old_p.rename(new_p) old_p.rename(new_p)
@ -581,7 +581,7 @@ def get_paths(state):
p.parent.mkdir(exist_ok=True) p.parent.mkdir(exist_ok=True)
new_p.rename(p) new_p.rename(p)
return Path(f'logs/chat/{character}').glob('*.json') return Path(f'user_data/logs/chat/{character}').glob('*.json')
def find_all_histories(state): def find_all_histories(state):
@ -732,7 +732,7 @@ def generate_pfp_cache(character):
if not cache_folder.exists(): if not cache_folder.exists():
cache_folder.mkdir() cache_folder.mkdir()
for path in [Path(f"characters/{character}.{extension}") for extension in ['png', 'jpg', 'jpeg']]: for path in [Path(f"user_data/characters/{character}.{extension}") for extension in ['png', 'jpg', 'jpeg']]:
if path.exists(): if path.exists():
original_img = Image.open(path) original_img = Image.open(path)
original_img.save(Path(f'{cache_folder}/pfp_character.png'), format='PNG') original_img.save(Path(f'{cache_folder}/pfp_character.png'), format='PNG')
@ -752,12 +752,12 @@ def load_character(character, name1, name2):
filepath = None filepath = None
for extension in ["yml", "yaml", "json"]: for extension in ["yml", "yaml", "json"]:
filepath = Path(f'characters/{character}.{extension}') filepath = Path(f'user_data/characters/{character}.{extension}')
if filepath.exists(): if filepath.exists():
break break
if filepath is None or not filepath.exists(): if filepath is None or not filepath.exists():
logger.error(f"Could not find the character \"{character}\" inside characters/. No character has been loaded.") logger.error(f"Could not find the character \"{character}\" inside user_data/characters. No character has been loaded.")
raise ValueError raise ValueError
file_contents = open(filepath, 'r', encoding='utf-8').read() file_contents = open(filepath, 'r', encoding='utf-8').read()
@ -796,7 +796,7 @@ def load_instruction_template(template):
if template == 'None': if template == 'None':
return '' return ''
for filepath in [Path(f'instruction-templates/{template}.yaml'), Path('instruction-templates/Alpaca.yaml')]: for filepath in [Path(f'user_data/instruction-templates/{template}.yaml'), Path('user_data/instruction-templates/Alpaca.yaml')]:
if filepath.exists(): if filepath.exists():
break break
else: else:
@ -838,17 +838,17 @@ def upload_character(file, img, tavern=False):
outfile_name = name outfile_name = name
i = 1 i = 1
while Path(f'characters/{outfile_name}.yaml').exists(): while Path(f'user_data/characters/{outfile_name}.yaml').exists():
outfile_name = f'{name}_{i:03d}' outfile_name = f'{name}_{i:03d}'
i += 1 i += 1
with open(Path(f'characters/{outfile_name}.yaml'), 'w', encoding='utf-8') as f: with open(Path(f'user_data/characters/{outfile_name}.yaml'), 'w', encoding='utf-8') as f:
f.write(yaml_data) f.write(yaml_data)
if img is not None: if img is not None:
img.save(Path(f'characters/{outfile_name}.png')) img.save(Path(f'user_data/characters/{outfile_name}.png'))
logger.info(f'New character saved to "characters/{outfile_name}.yaml".') logger.info(f'New character saved to "user_data/characters/{outfile_name}.yaml".')
return gr.update(value=outfile_name, choices=get_available_characters()) return gr.update(value=outfile_name, choices=get_available_characters())
@ -923,9 +923,9 @@ def save_character(name, greeting, context, picture, filename):
return return
data = generate_character_yaml(name, greeting, context) data = generate_character_yaml(name, greeting, context)
filepath = Path(f'characters/{filename}.yaml') filepath = Path(f'user_data/characters/{filename}.yaml')
save_file(filepath, data) save_file(filepath, data)
path_to_img = Path(f'characters/{filename}.png') path_to_img = Path(f'user_data/characters/{filename}.png')
if picture is not None: if picture is not None:
picture.save(path_to_img) picture.save(path_to_img)
logger.info(f'Saved {path_to_img}.') logger.info(f'Saved {path_to_img}.')
@ -933,9 +933,9 @@ def save_character(name, greeting, context, picture, filename):
def delete_character(name, instruct=False): def delete_character(name, instruct=False):
for extension in ["yml", "yaml", "json"]: for extension in ["yml", "yaml", "json"]:
delete_file(Path(f'characters/{name}.{extension}')) delete_file(Path(f'user_data/characters/{name}.{extension}'))
delete_file(Path(f'characters/{name}.png')) delete_file(Path(f'user_data/characters/{name}.png'))
def jinja_template_from_old_format(params, verbose=False): def jinja_template_from_old_format(params, verbose=False):
@ -1238,7 +1238,7 @@ def handle_save_template_click(instruction_template_str):
contents = generate_instruction_template_yaml(instruction_template_str) contents = generate_instruction_template_yaml(instruction_template_str)
return [ return [
"My Template.yaml", "My Template.yaml",
"instruction-templates/", "user_data/instruction-templates/",
contents, contents,
gr.update(visible=True) gr.update(visible=True)
] ]
@ -1247,7 +1247,7 @@ def handle_save_template_click(instruction_template_str):
def handle_delete_template_click(template): def handle_delete_template_click(template):
return [ return [
f"{template}.yaml", f"{template}.yaml",
"instruction-templates/", "user_data/instruction-templates/",
gr.update(visible=False) gr.update(visible=False)
] ]

View file

@ -12,8 +12,8 @@ from modules.text_generation import encode
def load_past_evaluations(): def load_past_evaluations():
if Path('logs/evaluations.csv').exists(): if Path('user_data/logs/evaluations.csv').exists():
df = pd.read_csv(Path('logs/evaluations.csv'), dtype=str) df = pd.read_csv(Path('user_data/logs/evaluations.csv'), dtype=str)
df['Perplexity'] = pd.to_numeric(df['Perplexity']) df['Perplexity'] = pd.to_numeric(df['Perplexity'])
return df return df
else: else:
@ -26,7 +26,7 @@ past_evaluations = load_past_evaluations()
def save_past_evaluations(df): def save_past_evaluations(df):
global past_evaluations global past_evaluations
past_evaluations = df past_evaluations = df
filepath = Path('logs/evaluations.csv') filepath = Path('user_data/logs/evaluations.csv')
filepath.parent.mkdir(parents=True, exist_ok=True) filepath.parent.mkdir(parents=True, exist_ok=True)
df.to_csv(filepath, index=False) df.to_csv(filepath, index=False)
@ -69,7 +69,7 @@ def calculate_perplexity(models, input_dataset, stride, _max_length):
data = load_dataset('ptb_text_only', 'penn_treebank', split='test') data = load_dataset('ptb_text_only', 'penn_treebank', split='test')
text = " ".join(data['sentence']) text = " ".join(data['sentence'])
else: else:
with open(Path(f'training/datasets/{input_dataset}.txt'), 'r', encoding='utf-8') as f: with open(Path(f'user_data/training/datasets/{input_dataset}.txt'), 'r', encoding='utf-8') as f:
text = f.read() text = f.read()
for model in models: for model in models:

View file

@ -387,13 +387,13 @@ def generate_cai_chat_html(history, name1, name2, style, character, reset_cache=
# We use ?character and ?time.time() to force the browser to reset caches # We use ?character and ?time.time() to force the browser to reset caches
img_bot = ( img_bot = (
f'<img src="file/cache/pfp_character_thumb.png?{character}" class="pfp_character">' f'<img src="file/user_data/cache/pfp_character_thumb.png?{character}" class="pfp_character">'
if Path("cache/pfp_character_thumb.png").exists() else '' if Path("user_data/cache/pfp_character_thumb.png").exists() else ''
) )
img_me = ( img_me = (
f'<img src="file/cache/pfp_me.png?{time.time() if reset_cache else ""}">' f'<img src="file/user_data/cache/pfp_me.png?{time.time() if reset_cache else ""}">'
if Path("cache/pfp_me.png").exists() else '' if Path("user_data/cache/pfp_me.png").exists() else ''
) )
for i in range(len(history['visible'])): for i in range(len(history['visible'])):

View file

@ -25,7 +25,7 @@ def get_fallback_settings():
def get_model_metadata(model): def get_model_metadata(model):
model_settings = {} model_settings = {}
# Get settings from models/config.yaml and models/config-user.yaml # Get settings from user_data/models/config.yaml and user_data/models/config-user.yaml
settings = shared.model_config settings = shared.model_config
for pat in settings: for pat in settings:
if re.match(pat.lower(), Path(model).name.lower()): if re.match(pat.lower(), Path(model).name.lower()):
@ -144,7 +144,7 @@ def get_model_metadata(model):
if 'rope_freq_base' in model_settings and model_settings['rope_freq_base'] == 10000: if 'rope_freq_base' in model_settings and model_settings['rope_freq_base'] == 10000:
model_settings.pop('rope_freq_base') model_settings.pop('rope_freq_base')
# Apply user settings from models/config-user.yaml # Apply user settings from user_data/models/config-user.yaml
settings = shared.user_config settings = shared.user_config
for pat in settings: for pat in settings:
if re.match(pat.lower(), Path(model).name.lower()): if re.match(pat.lower(), Path(model).name.lower()):
@ -223,7 +223,7 @@ def apply_model_settings_to_state(model, state):
def save_model_settings(model, state): def save_model_settings(model, state):
''' '''
Save the settings for this model to models/config-user.yaml Save the settings for this model to user_data/models/config-user.yaml
''' '''
if model == 'None': if model == 'None':
yield ("Not saving the settings because no model is selected in the menu.") yield ("Not saving the settings because no model is selected in the menu.")

View file

@ -58,7 +58,7 @@ def presets_params():
def load_preset(name, verbose=False): def load_preset(name, verbose=False):
generate_params = default_preset() generate_params = default_preset()
if name not in ['None', None, '']: if name not in ['None', None, '']:
path = Path(f'presets/{name}.yaml') path = Path(f'user_data/presets/{name}.yaml')
if path.exists(): if path.exists():
with open(path, 'r') as infile: with open(path, 'r') as infile:
preset = yaml.safe_load(infile) preset = yaml.safe_load(infile)

View file

@ -7,7 +7,7 @@ def load_prompt(fname):
if fname in ['None', '']: if fname in ['None', '']:
return '' return ''
else: else:
file_path = Path(f'prompts/{fname}.txt') file_path = Path(f'user_data/prompts/{fname}.txt')
if not file_path.exists(): if not file_path.exists():
return '' return ''

View file

@ -78,8 +78,8 @@ group.add_argument('--multi-user', action='store_true', help='Multi-user mode. C
group.add_argument('--character', type=str, help='The name of the character to load in chat mode by default.') group.add_argument('--character', type=str, help='The name of the character to load in chat mode by default.')
group.add_argument('--model', type=str, help='Name of the model to load by default.') group.add_argument('--model', type=str, help='Name of the model to load by default.')
group.add_argument('--lora', type=str, nargs='+', help='The list of LoRAs to load. If you want to load more than one LoRA, write the names separated by spaces.') group.add_argument('--lora', type=str, nargs='+', help='The list of LoRAs to load. If you want to load more than one LoRA, write the names separated by spaces.')
group.add_argument('--model-dir', type=str, default='models/', help='Path to directory with all the models.') group.add_argument('--model-dir', type=str, default='user_data/models', help='Path to directory with all the models.')
group.add_argument('--lora-dir', type=str, default='loras/', help='Path to directory with all the loras.') group.add_argument('--lora-dir', type=str, default='user_data/loras', help='Path to directory with all the loras.')
group.add_argument('--model-menu', action='store_true', help='Show a model menu in the terminal when the web UI is first launched.') group.add_argument('--model-menu', action='store_true', help='Show a model menu in the terminal when the web UI is first launched.')
group.add_argument('--settings', type=str, help='Load the default interface settings from this yaml file. See settings-template.yaml for an example. If you create a file called settings.yaml, this file will be loaded by default without the need to use the --settings flag.') group.add_argument('--settings', type=str, help='Load the default interface settings from this yaml file. See settings-template.yaml for an example. If you create a file called settings.yaml, this file will be loaded by default without the need to use the --settings flag.')
group.add_argument('--extensions', type=str, nargs='+', help='The list of extensions to load. If you want to load more than one extension, write the names separated by spaces.') group.add_argument('--extensions', type=str, nargs='+', help='The list of extensions to load. If you want to load more than one extension, write the names separated by spaces.')
@ -95,7 +95,7 @@ group = parser.add_argument_group('Transformers/Accelerate')
group.add_argument('--cpu', action='store_true', help='Use the CPU to generate text. Warning: Training on CPU is extremely slow.') group.add_argument('--cpu', action='store_true', help='Use the CPU to generate text. Warning: Training on CPU is extremely slow.')
group.add_argument('--cpu-memory', type=float, default=0, help='Maximum CPU memory in GiB. Use this for CPU offloading.') group.add_argument('--cpu-memory', type=float, default=0, help='Maximum CPU memory in GiB. Use this for CPU offloading.')
group.add_argument('--disk', action='store_true', help='If the model is too large for your GPU(s) and CPU combined, send the remaining layers to the disk.') group.add_argument('--disk', action='store_true', help='If the model is too large for your GPU(s) and CPU combined, send the remaining layers to the disk.')
group.add_argument('--disk-cache-dir', type=str, default='cache', help='Directory to save the disk cache to. Defaults to "cache".') group.add_argument('--disk-cache-dir', type=str, default='user_data/cache', help='Directory to save the disk cache to. Defaults to "user_data/cache".')
group.add_argument('--load-in-8bit', action='store_true', help='Load the model with 8-bit precision (using bitsandbytes).') group.add_argument('--load-in-8bit', action='store_true', help='Load the model with 8-bit precision (using bitsandbytes).')
group.add_argument('--bf16', action='store_true', help='Load the model with bfloat16 precision. Requires NVIDIA Ampere GPU.') group.add_argument('--bf16', action='store_true', help='Load the model with bfloat16 precision. Requires NVIDIA Ampere GPU.')
group.add_argument('--no-cache', action='store_true', help='Set use_cache to False while generating text. This reduces VRAM usage slightly, but it comes at a performance cost.') group.add_argument('--no-cache', action='store_true', help='Set use_cache to False while generating text. This reduces VRAM usage slightly, but it comes at a performance cost.')
@ -207,7 +207,7 @@ group.add_argument('--nowebui', action='store_true', help='Do not launch the Gra
group = parser.add_argument_group('Deprecated') group = parser.add_argument_group('Deprecated')
# Handle CMD_FLAGS.txt # Handle CMD_FLAGS.txt
cmd_flags_path = Path(__file__).parent.parent / "CMD_FLAGS.txt" cmd_flags_path = Path(__file__).parent.parent / "user_data" / "CMD_FLAGS.txt"
if cmd_flags_path.exists(): if cmd_flags_path.exists():
with cmd_flags_path.open('r', encoding='utf-8') as f: with cmd_flags_path.open('r', encoding='utf-8') as f:
cmd_flags = ' '.join( cmd_flags = ' '.join(

View file

@ -106,23 +106,23 @@ def create_ui():
with gr.Column(): with gr.Column():
with gr.Tab(label='Formatted Dataset'): with gr.Tab(label='Formatted Dataset'):
with gr.Row(): with gr.Row():
format = gr.Dropdown(choices=utils.get_datasets('training/formats', 'json'), value='None', label='Data Format', info='The format file used to decide how to format the dataset input.', elem_classes=['slim-dropdown'], interactive=not mu) format = gr.Dropdown(choices=utils.get_datasets('user_data/training/formats', 'json'), value='None', label='Data Format', info='The format file used to decide how to format the dataset input.', elem_classes=['slim-dropdown'], interactive=not mu)
ui.create_refresh_button(format, lambda: None, lambda: {'choices': utils.get_datasets('training/formats', 'json')}, 'refresh-button', interactive=not mu) ui.create_refresh_button(format, lambda: None, lambda: {'choices': utils.get_datasets('user_data/training/formats', 'json')}, 'refresh-button', interactive=not mu)
with gr.Row(): with gr.Row():
dataset = gr.Dropdown(choices=utils.get_datasets('training/datasets', 'json'), value='None', label='Dataset', info='The dataset file to use for training.', elem_classes=['slim-dropdown'], interactive=not mu) dataset = gr.Dropdown(choices=utils.get_datasets('user_data/training/datasets', 'json'), value='None', label='Dataset', info='The dataset file to use for training.', elem_classes=['slim-dropdown'], interactive=not mu)
ui.create_refresh_button(dataset, lambda: None, lambda: {'choices': utils.get_datasets('training/datasets', 'json')}, 'refresh-button', interactive=not mu) ui.create_refresh_button(dataset, lambda: None, lambda: {'choices': utils.get_datasets('user_data/training/datasets', 'json')}, 'refresh-button', interactive=not mu)
with gr.Row(): with gr.Row():
eval_dataset = gr.Dropdown(choices=utils.get_datasets('training/datasets', 'json'), value='None', label='Evaluation Dataset', info='The (optional) dataset file used to evaluate the model after training.', elem_classes=['slim-dropdown'], interactive=not mu) eval_dataset = gr.Dropdown(choices=utils.get_datasets('user_data/training/datasets', 'json'), value='None', label='Evaluation Dataset', info='The (optional) dataset file used to evaluate the model after training.', elem_classes=['slim-dropdown'], interactive=not mu)
ui.create_refresh_button(eval_dataset, lambda: None, lambda: {'choices': utils.get_datasets('training/datasets', 'json')}, 'refresh-button', interactive=not mu) ui.create_refresh_button(eval_dataset, lambda: None, lambda: {'choices': utils.get_datasets('user_data/training/datasets', 'json')}, 'refresh-button', interactive=not mu)
eval_steps = gr.Number(label='Evaluate every n steps', value=100, info='If an evaluation dataset is given, test it every time this many steps pass.') eval_steps = gr.Number(label='Evaluate every n steps', value=100, info='If an evaluation dataset is given, test it every time this many steps pass.')
with gr.Tab(label="Raw text file"): with gr.Tab(label="Raw text file"):
with gr.Row(): with gr.Row():
raw_text_file = gr.Dropdown(choices=utils.get_datasets('training/datasets', 'txt'), value='None', label='Text file', info='The raw text file to use for training.', elem_classes=['slim-dropdown'], interactive=not mu) raw_text_file = gr.Dropdown(choices=utils.get_datasets('user_data/training/datasets', 'txt'), value='None', label='Text file', info='The raw text file to use for training.', elem_classes=['slim-dropdown'], interactive=not mu)
ui.create_refresh_button(raw_text_file, lambda: None, lambda: {'choices': utils.get_datasets('training/datasets', 'txt')}, 'refresh-button', interactive=not mu) ui.create_refresh_button(raw_text_file, lambda: None, lambda: {'choices': utils.get_datasets('user_data/training/datasets', 'txt')}, 'refresh-button', interactive=not mu)
with gr.Row(): with gr.Row():
with gr.Column(): with gr.Column():
@ -143,7 +143,7 @@ def create_ui():
with gr.Row(): with gr.Row():
with gr.Column(): with gr.Column():
models = gr.Dropdown(utils.get_available_models(), label='Models', multiselect=True, interactive=not mu) models = gr.Dropdown(utils.get_available_models(), label='Models', multiselect=True, interactive=not mu)
evaluate_text_file = gr.Dropdown(choices=['wikitext', 'ptb', 'ptb_new'] + utils.get_datasets('training/datasets', 'txt')[1:], value='wikitext', label='Input dataset', info='The raw text file on which the model will be evaluated. The first options are automatically downloaded: wikitext, ptb, and ptb_new. The next options are your local text files under training/datasets.', interactive=not mu) evaluate_text_file = gr.Dropdown(choices=['wikitext', 'ptb', 'ptb_new'] + utils.get_datasets('user_data/training/datasets', 'txt')[1:], value='wikitext', label='Input dataset', info='The raw text file on which the model will be evaluated. The first options are automatically downloaded: wikitext, ptb, and ptb_new. The next options are your local text files under user_data/training/datasets.', interactive=not mu)
with gr.Row(): with gr.Row():
with gr.Column(): with gr.Column():
stride_length = gr.Slider(label='Stride', minimum=0, maximum=32768, value=512, step=256, info='Used to make the evaluation faster at the cost of accuracy. 1 = slowest but most accurate. 512 is a common value.') stride_length = gr.Slider(label='Stride', minimum=0, maximum=32768, value=512, step=256, info='Used to make the evaluation faster at the cost of accuracy. 1 = slowest but most accurate. 512 is a common value.')
@ -402,7 +402,7 @@ def do_train(lora_name: str, always_override: bool, q_proj_en: bool, v_proj_en:
if raw_text_file not in ['None', '']: if raw_text_file not in ['None', '']:
train_template["template_type"] = "raw_text" train_template["template_type"] = "raw_text"
logger.info("Loading raw text file dataset") logger.info("Loading raw text file dataset")
fullpath = clean_path('training/datasets', f'{raw_text_file}') fullpath = clean_path('user_data/training/datasets', f'{raw_text_file}')
fullpath = Path(fullpath) fullpath = Path(fullpath)
if fullpath.is_dir(): if fullpath.is_dir():
logger.info('Training path directory {}'.format(raw_text_file)) logger.info('Training path directory {}'.format(raw_text_file))
@ -415,7 +415,7 @@ def do_train(lora_name: str, always_override: bool, q_proj_en: bool, v_proj_en:
logger.info(f"Loaded training file: {file_path.name}") logger.info(f"Loaded training file: {file_path.name}")
else: else:
with open(clean_path('training/datasets', f'{raw_text_file}.txt'), 'r', encoding='utf-8') as file: with open(clean_path('user_data/training/datasets', f'{raw_text_file}.txt'), 'r', encoding='utf-8') as file:
raw_text = file.read().replace('\r', '') raw_text = file.read().replace('\r', '')
cut_string = hard_cut_string.replace('\\n', '\n') cut_string = hard_cut_string.replace('\\n', '\n')
@ -460,7 +460,7 @@ def do_train(lora_name: str, always_override: bool, q_proj_en: bool, v_proj_en:
train_template["template_type"] = "dataset" train_template["template_type"] = "dataset"
with open(clean_path('training/formats', f'{format}.json'), 'r', encoding='utf-8-sig') as formatFile: with open(clean_path('user_data/training/formats', f'{format}.json'), 'r', encoding='utf-8-sig') as formatFile:
format_data: dict[str, str] = json.load(formatFile) format_data: dict[str, str] = json.load(formatFile)
# == store training prompt == # == store training prompt ==
@ -482,13 +482,13 @@ def do_train(lora_name: str, always_override: bool, q_proj_en: bool, v_proj_en:
return tokenize(prompt, add_eos_token) return tokenize(prompt, add_eos_token)
logger.info("Loading JSON datasets") logger.info("Loading JSON datasets")
data = load_dataset("json", data_files=clean_path('training/datasets', f'{dataset}.json')) data = load_dataset("json", data_files=clean_path('user_data/training/datasets', f'{dataset}.json'))
train_data = data['train'].map(generate_and_tokenize_prompt, new_fingerprint='%030x' % random.randrange(16**30)) train_data = data['train'].map(generate_and_tokenize_prompt, new_fingerprint='%030x' % random.randrange(16**30))
if eval_dataset == 'None': if eval_dataset == 'None':
eval_data = None eval_data = None
else: else:
eval_data = load_dataset("json", data_files=clean_path('training/datasets', f'{eval_dataset}.json')) eval_data = load_dataset("json", data_files=clean_path('user_data/training/datasets', f'{eval_dataset}.json'))
eval_data = eval_data['train'].map(generate_and_tokenize_prompt, new_fingerprint='%030x' % random.randrange(16**30)) eval_data = eval_data['train'].map(generate_and_tokenize_prompt, new_fingerprint='%030x' % random.randrange(16**30))
# == We MUST reload model if it went through any previous training, even failed one == # == We MUST reload model if it went through any previous training, even failed one ==
@ -676,11 +676,11 @@ def do_train(lora_name: str, always_override: bool, q_proj_en: bool, v_proj_en:
decoded_entries.append({"value": decoded_text}) decoded_entries.append({"value": decoded_text})
# Write the log file # Write the log file
Path('logs').mkdir(exist_ok=True) Path('user_data/logs').mkdir(exist_ok=True)
with open(Path('logs/train_dataset_sample.json'), 'w') as json_file: with open(Path('user_data/logs/train_dataset_sample.json'), 'w') as json_file:
json.dump(decoded_entries, json_file, indent=4) json.dump(decoded_entries, json_file, indent=4)
logger.info("Log file 'train_dataset_sample.json' created in the 'logs' directory.") logger.info("Log file 'train_dataset_sample.json' created in the 'user_data/logs' directory.")
except Exception as e: except Exception as e:
logger.error(f"Failed to create log file due to error: {e}") logger.error(f"Failed to create log file due to error: {e}")

View file

@ -249,7 +249,7 @@ def load_model_HF(model_name):
) )
if shared.args.disk: if shared.args.disk:
params['offload_folder'] = shared.args.disk_cache_dir params['offload_folder'] = str(Path(shared.args.disk_cache_dir))
if shared.args.compress_pos_emb > 1: if shared.args.compress_pos_emb > 1:
params['rope_scaling'] = {'type': 'linear', 'factor': shared.args.compress_pos_emb} params['rope_scaling'] = {'type': 'linear', 'factor': shared.args.compress_pos_emb}

View file

@ -94,7 +94,7 @@ if not shared.args.old_colors:
input_radius='0.375rem', input_radius='0.375rem',
) )
if Path("notification.mp3").exists(): if Path("user_data/notification.mp3").exists():
audio_notification_js = "document.querySelector('#audio_notification audio')?.play();" audio_notification_js = "document.querySelector('#audio_notification audio')?.play();"
else: else:
audio_notification_js = "" audio_notification_js = ""

View file

@ -146,7 +146,7 @@ def create_chat_settings_ui():
with gr.Column(scale=1): with gr.Column(scale=1):
shared.gradio['character_picture'] = gr.Image(label='Character picture', type='pil', interactive=not mu) shared.gradio['character_picture'] = gr.Image(label='Character picture', type='pil', interactive=not mu)
shared.gradio['your_picture'] = gr.Image(label='Your picture', type='pil', value=Image.open(Path('cache/pfp_me.png')) if Path('cache/pfp_me.png').exists() else None, interactive=not mu) shared.gradio['your_picture'] = gr.Image(label='Your picture', type='pil', value=Image.open(Path('user_data/cache/pfp_me.png')) if Path('user_data/cache/pfp_me.png').exists() else None, interactive=not mu)
with gr.Tab('Instruction template'): with gr.Tab('Instruction template'):
with gr.Row(): with gr.Row():

View file

@ -102,7 +102,7 @@ def handle_save_prompt(text):
return [ return [
text, text,
utils.current_time() + ".txt", utils.current_time() + ".txt",
"prompts/", "user_data/prompts/",
gr.update(visible=True) gr.update(visible=True)
] ]
@ -110,6 +110,6 @@ def handle_save_prompt(text):
def handle_delete_prompt(prompt): def handle_delete_prompt(prompt):
return [ return [
prompt + ".txt", prompt + ".txt",
"prompts/", "user_data/prompts/",
gr.update(visible=True) gr.update(visible=True)
] ]

View file

@ -28,7 +28,7 @@ def create_ui():
# Character saver/deleter # Character saver/deleter
with gr.Group(visible=False, elem_classes='file-saver') as shared.gradio['character_saver']: with gr.Group(visible=False, elem_classes='file-saver') as shared.gradio['character_saver']:
shared.gradio['save_character_filename'] = gr.Textbox(lines=1, label='File name', info='The character will be saved to your characters/ folder with this base filename.') shared.gradio['save_character_filename'] = gr.Textbox(lines=1, label='File name', info='The character will be saved to your user_data/characters folder with this base filename.')
with gr.Row(): with gr.Row():
shared.gradio['save_character_cancel'] = gr.Button('Cancel', elem_classes="small-button") shared.gradio['save_character_cancel'] = gr.Button('Cancel', elem_classes="small-button")
shared.gradio['save_character_confirm'] = gr.Button('Save', elem_classes="small-button", variant='primary', interactive=not mu) shared.gradio['save_character_confirm'] = gr.Button('Save', elem_classes="small-button", variant='primary', interactive=not mu)
@ -41,7 +41,7 @@ def create_ui():
# Preset saver # Preset saver
with gr.Group(visible=False, elem_classes='file-saver') as shared.gradio['preset_saver']: with gr.Group(visible=False, elem_classes='file-saver') as shared.gradio['preset_saver']:
shared.gradio['save_preset_filename'] = gr.Textbox(lines=1, label='File name', info='The preset will be saved to your presets/ folder with this base filename.') shared.gradio['save_preset_filename'] = gr.Textbox(lines=1, label='File name', info='The preset will be saved to your user_data/presets folder with this base filename.')
shared.gradio['save_preset_contents'] = gr.Textbox(lines=10, label='File contents') shared.gradio['save_preset_contents'] = gr.Textbox(lines=10, label='File contents')
with gr.Row(): with gr.Row():
shared.gradio['save_preset_cancel'] = gr.Button('Cancel', elem_classes="small-button") shared.gradio['save_preset_cancel'] = gr.Button('Cancel', elem_classes="small-button")
@ -72,7 +72,7 @@ def create_event_handlers():
def handle_save_preset_confirm_click(filename, contents): def handle_save_preset_confirm_click(filename, contents):
try: try:
utils.save_file(f"presets/{filename}.yaml", contents) utils.save_file(f"user_data/presets/{filename}.yaml", contents)
available_presets = utils.get_available_presets() available_presets = utils.get_available_presets()
output = gr.update(choices=available_presets, value=filename) output = gr.update(choices=available_presets, value=filename)
except Exception: except Exception:
@ -145,7 +145,7 @@ def handle_save_preset_click(state):
def handle_delete_preset_click(preset): def handle_delete_preset_click(preset):
return [ return [
f"{preset}.yaml", f"{preset}.yaml",
"presets/", "user_data/presets/",
gr.update(visible=True) gr.update(visible=True)
] ]
@ -154,7 +154,7 @@ def handle_save_grammar_click(grammar_string):
return [ return [
grammar_string, grammar_string,
"My Fancy Grammar.gbnf", "My Fancy Grammar.gbnf",
"grammars/", "user_data/grammars/",
gr.update(visible=True) gr.update(visible=True)
] ]
@ -162,6 +162,6 @@ def handle_save_grammar_click(grammar_string):
def handle_delete_grammar_click(grammar_file): def handle_delete_grammar_click(grammar_file):
return [ return [
grammar_file, grammar_file,
"grammars/", "user_data/grammars/",
gr.update(visible=True) gr.update(visible=True)
] ]

View file

@ -223,9 +223,9 @@ def download_model_wrapper(repo_id, specific_file, progress=gr.Progress(), retur
model_dir=shared.args.model_dir if shared.args.model_dir != shared.args_defaults.model_dir else None model_dir=shared.args.model_dir if shared.args.model_dir != shared.args_defaults.model_dir else None
) )
if output_folder == Path("models"): if output_folder == Path("user_data/models"):
output_folder = Path(shared.args.model_dir) output_folder = Path(shared.args.model_dir)
elif output_folder == Path("loras"): elif output_folder == Path("user_data/loras"):
output_folder = Path(shared.args.lora_dir) output_folder = Path(shared.args.lora_dir)
if check: if check:

View file

@ -128,7 +128,7 @@ def get_truncation_length():
def load_grammar(name): def load_grammar(name):
p = Path(f'grammars/{name}') p = Path(f'user_data/grammars/{name}')
if p.exists(): if p.exists():
return open(p, 'r', encoding='utf-8').read() return open(p, 'r', encoding='utf-8').read()
else: else:

View file

@ -48,7 +48,7 @@ def handle_save_settings(state, preset, extensions, show_controls, theme):
return [ return [
contents, contents,
"settings.yaml", "settings.yaml",
"./", "./user_data",
gr.update(visible=True) gr.update(visible=True)
] ]

View file

@ -98,7 +98,7 @@ def get_available_models():
dirs_with_gguf = set() dirs_with_gguf = set()
for gguf_path in gguf_files: for gguf_path in gguf_files:
path = Path(gguf_path) path = Path(gguf_path)
if path.parts: if len(path.parts) > 0:
dirs_with_gguf.add(path.parts[0]) dirs_with_gguf.add(path.parts[0])
# Find directories with safetensors files # Find directories with safetensors files
@ -141,11 +141,11 @@ def get_available_ggufs():
def get_available_presets(): def get_available_presets():
return sorted(set((k.stem for k in Path('presets').glob('*.yaml'))), key=natural_keys) return sorted(set((k.stem for k in Path('user_data/presets').glob('*.yaml'))), key=natural_keys)
def get_available_prompts(): def get_available_prompts():
prompt_files = list(Path('prompts').glob('*.txt')) prompt_files = list(Path('user_data/prompts').glob('*.txt'))
sorted_files = sorted(prompt_files, key=lambda x: x.stat().st_mtime, reverse=True) sorted_files = sorted(prompt_files, key=lambda x: x.stat().st_mtime, reverse=True)
prompts = [file.stem for file in sorted_files] prompts = [file.stem for file in sorted_files]
prompts.append('None') prompts.append('None')
@ -153,12 +153,12 @@ def get_available_prompts():
def get_available_characters(): def get_available_characters():
paths = (x for x in Path('characters').iterdir() if x.suffix in ('.json', '.yaml', '.yml')) paths = (x for x in Path('user_data/characters').iterdir() if x.suffix in ('.json', '.yaml', '.yml'))
return sorted(set((k.stem for k in paths)), key=natural_keys) return sorted(set((k.stem for k in paths)), key=natural_keys)
def get_available_instruction_templates(): def get_available_instruction_templates():
path = "instruction-templates" path = "user_data/instruction-templates"
paths = [] paths = []
if os.path.exists(path): if os.path.exists(path):
paths = (x for x in Path(path).iterdir() if x.suffix in ('.json', '.yaml', '.yml')) paths = (x for x in Path(path).iterdir() if x.suffix in ('.json', '.yaml', '.yml'))
@ -189,4 +189,4 @@ def get_available_chat_styles():
def get_available_grammars(): def get_available_grammars():
return ['None'] + sorted([item.name for item in list(Path('grammars').glob('*.gbnf'))], key=natural_keys) return ['None'] + sorted([item.name for item in list(Path('user_data/grammars').glob('*.gbnf'))], key=natural_keys)

View file

@ -293,10 +293,10 @@ def install_webui():
# Write a flag to CMD_FLAGS.txt for CPU mode # Write a flag to CMD_FLAGS.txt for CPU mode
if selected_gpu == "NONE": if selected_gpu == "NONE":
cmd_flags_path = os.path.join(script_dir, "CMD_FLAGS.txt") cmd_flags_path = os.path.join(script_dir, "user_data", "CMD_FLAGS.txt")
with open(cmd_flags_path, 'r+') as cmd_flags_file: with open(cmd_flags_path, 'r+') as cmd_flags_file:
if "--cpu" not in cmd_flags_file.read(): if "--cpu" not in cmd_flags_file.read():
print_big_message("Adding the --cpu flag to CMD_FLAGS.txt.") print_big_message("Adding the --cpu flag to user_data/CMD_FLAGS.txt.")
cmd_flags_file.write("\n--cpu\n") cmd_flags_file.write("\n--cpu\n")
# Handle CUDA version display # Handle CUDA version display
@ -532,7 +532,7 @@ if __name__ == "__main__":
flags_list = re.split(' +(?=(?:[^\"]*\"[^\"]*\")*[^\"]*$)|=', flags) flags_list = re.split(' +(?=(?:[^\"]*\"[^\"]*\")*[^\"]*$)|=', flags)
model_dir = [flags_list[(flags_list.index(flag) + 1)] for flag in flags_list if flag == '--model-dir'][0].strip('"\'') model_dir = [flags_list[(flags_list.index(flag) + 1)] for flag in flags_list if flag == '--model-dir'][0].strip('"\'')
else: else:
model_dir = 'models' model_dir = 'user_data/models'
if len([item for item in glob.glob(f'{model_dir}/*') if not item.endswith(('.txt', '.yaml'))]) == 0: if len([item for item in glob.glob(f'{model_dir}/*') if not item.endswith(('.txt', '.yaml'))]) == 0:
print_big_message("You haven't downloaded any model yet.\nOnce the web UI launches, head over to the \"Model\" tab and download one.") print_big_message("You haven't downloaded any model yet.\nOnce the web UI launches, head over to the \"Model\" tab and download one.")

View file

@ -94,8 +94,8 @@ def create_interface():
'filter_by_loader': shared.args.loader or 'All' 'filter_by_loader': shared.args.loader or 'All'
}) })
if Path("cache/pfp_character.png").exists(): if Path("user_data/cache/pfp_character.png").exists():
Path("cache/pfp_character.png").unlink() Path("user_data/cache/pfp_character.png").unlink()
# css/js strings # css/js strings
css = ui.css css = ui.css
@ -112,8 +112,8 @@ def create_interface():
shared.gradio['interface_state'] = gr.State({k: None for k in shared.input_elements}) shared.gradio['interface_state'] = gr.State({k: None for k in shared.input_elements})
# Audio notification # Audio notification
if Path("notification.mp3").exists(): if Path("user_data/notification.mp3").exists():
shared.gradio['audio_notification'] = gr.Audio(interactive=False, value="notification.mp3", elem_id="audio_notification", visible=False) shared.gradio['audio_notification'] = gr.Audio(interactive=False, value="user_data/notification.mp3", elem_id="audio_notification", visible=False)
# Floating menus for saving/deleting files # Floating menus for saving/deleting files
ui_file_saving.create_ui() ui_file_saving.create_ui()
@ -179,7 +179,7 @@ def create_interface():
ssl_keyfile=shared.args.ssl_keyfile, ssl_keyfile=shared.args.ssl_keyfile,
ssl_certfile=shared.args.ssl_certfile, ssl_certfile=shared.args.ssl_certfile,
root_path=shared.args.subpath, root_path=shared.args.subpath,
allowed_paths=["cache", "css", "extensions", "js"] allowed_paths=["css", "js", "extensions", "user_data/cache"]
) )
@ -192,10 +192,10 @@ if __name__ == "__main__":
settings_file = None settings_file = None
if shared.args.settings is not None and Path(shared.args.settings).exists(): if shared.args.settings is not None and Path(shared.args.settings).exists():
settings_file = Path(shared.args.settings) settings_file = Path(shared.args.settings)
elif Path('settings.yaml').exists(): elif Path('user_data/settings.yaml').exists():
settings_file = Path('settings.yaml') settings_file = Path('user_data/settings.yaml')
elif Path('settings.json').exists(): elif Path('user_data/settings.json').exists():
settings_file = Path('settings.json') settings_file = Path('user_data/settings.json')
if settings_file is not None: if settings_file is not None:
logger.info(f"Loading settings from \"{settings_file}\"") logger.info(f"Loading settings from \"{settings_file}\"")

View file

Before

Width:  |  Height:  |  Size: 206 KiB

After

Width:  |  Height:  |  Size: 206 KiB

Some files were not shown because too many files have changed in this diff Show more