Revert "Dynamic Chat Message UI Update Speed (#6952)" (for now)

This reverts commit 8137eb8ef4.
This commit is contained in:
oobabooga 2025-05-18 12:38:36 -07:00
parent 9d7a36356d
commit 126b3a768f
5 changed files with 15 additions and 8 deletions

View file

@ -47,6 +47,7 @@ settings = {
'max_new_tokens_max': 4096, 'max_new_tokens_max': 4096,
'prompt_lookup_num_tokens': 0, 'prompt_lookup_num_tokens': 0,
'max_tokens_second': 0, 'max_tokens_second': 0,
'max_updates_second': 12,
'auto_max_new_tokens': True, 'auto_max_new_tokens': True,
'ban_eos_token': False, 'ban_eos_token': False,
'add_bos_token': True, 'add_bos_token': True,

View file

@ -65,39 +65,41 @@ def _generate_reply(question, state, stopping_strings=None, is_chat=False, escap
all_stop_strings += st all_stop_strings += st
shared.stop_everything = False shared.stop_everything = False
last_update = -1
reply = '' reply = ''
is_stream = state['stream'] is_stream = state['stream']
if len(all_stop_strings) > 0 and not state['stream']: if len(all_stop_strings) > 0 and not state['stream']:
state = copy.deepcopy(state) state = copy.deepcopy(state)
state['stream'] = True state['stream'] = True
min_update_interval = 0
if state.get('max_updates_second', 0) > 0:
min_update_interval = 1 / state['max_updates_second']
# Generate # Generate
last_update = -1
latency_threshold = 1 / 1000
for reply in generate_func(question, original_question, state, stopping_strings, is_chat=is_chat): for reply in generate_func(question, original_question, state, stopping_strings, is_chat=is_chat):
cur_time = time.monotonic()
reply, stop_found = apply_stopping_strings(reply, all_stop_strings) reply, stop_found = apply_stopping_strings(reply, all_stop_strings)
if escape_html: if escape_html:
reply = html.escape(reply) reply = html.escape(reply)
if is_stream: if is_stream:
cur_time = time.time()
# Limit number of tokens/second to make text readable in real time # Limit number of tokens/second to make text readable in real time
if state['max_tokens_second'] > 0: if state['max_tokens_second'] > 0:
diff = 1 / state['max_tokens_second'] - (cur_time - last_update) diff = 1 / state['max_tokens_second'] - (cur_time - last_update)
if diff > 0: if diff > 0:
time.sleep(diff) time.sleep(diff)
last_update = time.monotonic() last_update = time.time()
yield reply yield reply
# Limit updates to avoid lag in the Gradio UI # Limit updates to avoid lag in the Gradio UI
# API updates are not limited # API updates are not limited
else: else:
# If 'generate_func' takes less than 0.001 seconds to yield the next token if cur_time - last_update > min_update_interval:
# (equivalent to more than 1000 tok/s), assume that the UI is lagging behind and skip yielding last_update = cur_time
if (cur_time - last_update) > latency_threshold:
yield reply yield reply
last_update = time.monotonic()
if stop_found or (state['max_tokens_second'] > 0 and shared.stop_everything): if stop_found or (state['max_tokens_second'] > 0 and shared.stop_everything):
break break

View file

@ -192,6 +192,7 @@ def list_interface_input_elements():
'max_new_tokens', 'max_new_tokens',
'prompt_lookup_num_tokens', 'prompt_lookup_num_tokens',
'max_tokens_second', 'max_tokens_second',
'max_updates_second',
'do_sample', 'do_sample',
'dynamic_temperature', 'dynamic_temperature',
'temperature_last', 'temperature_last',

View file

@ -71,6 +71,8 @@ def create_ui(default_preset):
shared.gradio['max_new_tokens'] = gr.Slider(minimum=shared.settings['max_new_tokens_min'], maximum=shared.settings['max_new_tokens_max'], value=shared.settings['max_new_tokens'], step=1, label='max_new_tokens', info='⚠️ Setting this too high can cause prompt truncation.') shared.gradio['max_new_tokens'] = gr.Slider(minimum=shared.settings['max_new_tokens_min'], maximum=shared.settings['max_new_tokens_max'], value=shared.settings['max_new_tokens'], step=1, label='max_new_tokens', info='⚠️ Setting this too high can cause prompt truncation.')
shared.gradio['prompt_lookup_num_tokens'] = gr.Slider(value=shared.settings['prompt_lookup_num_tokens'], minimum=0, maximum=10, step=1, label='prompt_lookup_num_tokens', info='Activates Prompt Lookup Decoding.') shared.gradio['prompt_lookup_num_tokens'] = gr.Slider(value=shared.settings['prompt_lookup_num_tokens'], minimum=0, maximum=10, step=1, label='prompt_lookup_num_tokens', info='Activates Prompt Lookup Decoding.')
shared.gradio['max_tokens_second'] = gr.Slider(value=shared.settings['max_tokens_second'], minimum=0, maximum=20, step=1, label='Maximum tokens/second', info='To make text readable in real time.') shared.gradio['max_tokens_second'] = gr.Slider(value=shared.settings['max_tokens_second'], minimum=0, maximum=20, step=1, label='Maximum tokens/second', info='To make text readable in real time.')
shared.gradio['max_updates_second'] = gr.Slider(value=shared.settings['max_updates_second'], minimum=0, maximum=24, step=1, label='Maximum UI updates/second', info='Set this if you experience lag in the UI during streaming.')
with gr.Column(): with gr.Column():
with gr.Row(): with gr.Row():
with gr.Column(): with gr.Column():

View file

@ -18,6 +18,7 @@ max_new_tokens_min: 1
max_new_tokens_max: 4096 max_new_tokens_max: 4096
prompt_lookup_num_tokens: 0 prompt_lookup_num_tokens: 0
max_tokens_second: 0 max_tokens_second: 0
max_updates_second: 12
auto_max_new_tokens: true auto_max_new_tokens: true
ban_eos_token: false ban_eos_token: false
add_bos_token: true add_bos_token: true