response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Returns a javascript string that gets appended to the javascript
for the webui. | def custom_js():
"""
Returns a javascript string that gets appended to the javascript
for the webui.
"""
return '' |
Gets executed only once, when the extension is imported. | def setup():
"""
Gets executed only once, when the extension is imported.
"""
pass |
Gets executed when the UI is drawn. Custom gradio elements and
their corresponding event handlers should be defined here.
To learn about gradio components, check out the docs:
https://gradio.app/docs/ | def ui():
"""
Gets executed when the UI is drawn. Custom gradio elements and
their corresponding event handlers should be defined here.
To learn about gradio components, check out the docs:
https://gradio.app/docs/
"""
pass |
This function is applied to your text inputs before
they are fed into the model. | def input_modifier(string):
"""
This function is applied to your text inputs before
they are fed into the model.
"""
if not params['activate']:
return string
return GoogleTranslator(source=params['language string'], target='en').translate(string) |
This function is applied to the model outputs. | def output_modifier(string):
"""
This function is applied to the model outputs.
"""
if not params['activate']:
return string
translated_str = GoogleTranslator(source='en', target=params['language string']).translate(html.unescape(string))
return html.escape(translated_str) |
This function is only applied in chat mode. It modifies
the prefix text for the Bot and can be used to bias its
behavior. | def bot_prefix_modifier(string):
"""
This function is only applied in chat mode. It modifies
the prefix text for the Bot and can be used to bias its
behavior.
"""
return string |
Modifies the chat history.
Only used in chat mode. | def history_modifier(history):
"""
Modifies the chat history.
Only used in chat mode.
"""
return history |
Modifies the state variable, which is a dictionary containing the input
values in the UI like sliders and checkboxes. | def state_modifier(state):
"""
Modifies the state variable, which is a dictionary containing the input
values in the UI like sliders and checkboxes.
"""
return state |
Modifies the user input string in chat mode (visible_text).
You can also modify the internal representation of the user
input (text) to change how it will appear in the prompt. | def chat_input_modifier(text, visible_text, state):
"""
Modifies the user input string in chat mode (visible_text).
You can also modify the internal representation of the user
input (text) to change how it will appear in the prompt.
"""
return text, visible_text |
In default/notebook modes, modifies the whole prompt.
In chat mode, it is the same as chat_input_modifier but only applied
to "text", here called "string", and not to "visible_text". | def input_modifier(string, state):
"""
In default/notebook modes, modifies the whole prompt.
In chat mode, it is the same as chat_input_modifier but only applied
to "text", here called "string", and not to "visible_text".
"""
return string |
Modifies the prefix for the next bot reply in chat mode.
By default, the prefix will be something like "Bot Name:". | def bot_prefix_modifier(string, state):
"""
Modifies the prefix for the next bot reply in chat mode.
By default, the prefix will be something like "Bot Name:".
"""
return string |
Modifies the input ids and embeds.
Used by the multimodal extension to put image embeddings in the prompt.
Only used by loaders that use the transformers library for sampling. | def tokenizer_modifier(state, prompt, input_ids, input_embeds):
"""
Modifies the input ids and embeds.
Used by the multimodal extension to put image embeddings in the prompt.
Only used by loaders that use the transformers library for sampling.
"""
global initial_size
initial_size = input_ids.shape[-1]
return prompt, input_ids, input_embeds |
Adds logits processors to the list, allowing you to access and modify
the next token probabilities.
Only used by loaders that use the transformers library for sampling. | def logits_processor_modifier(processor_list, input_ids):
"""
Adds logits processors to the list, allowing you to access and modify
the next token probabilities.
Only used by loaders that use the transformers library for sampling.
"""
processor_list.append(MyLogits())
return processor_list |
Modifies the LLM output before it gets presented.
In chat mode, the modified version goes into history['visible'],
and the original version goes into history['internal']. | def output_modifier(string, state):
"""
Modifies the LLM output before it gets presented.
In chat mode, the modified version goes into history['visible'],
and the original version goes into history['internal'].
"""
return string |
Replaces the function that generates the prompt from the chat history.
Only used in chat mode. | def custom_generate_chat_prompt(user_input, state, **kwargs):
"""
Replaces the function that generates the prompt from the chat history.
Only used in chat mode.
"""
result = chat.generate_chat_prompt(user_input, state, **kwargs)
return result |
Returns a CSS string that gets appended to the CSS for the webui. | def custom_css():
"""
Returns a CSS string that gets appended to the CSS for the webui.
"""
return '' |
Returns a javascript string that gets appended to the javascript
for the webui. | def custom_js():
"""
Returns a javascript string that gets appended to the javascript
for the webui.
"""
return '' |
Gets executed only once, when the extension is imported. | def setup():
"""
Gets executed only once, when the extension is imported.
"""
pass |
Gets executed when the UI is drawn. Custom gradio elements and
their corresponding event handlers should be defined here.
To learn about gradio components, check out the docs:
https://gradio.app/docs/ | def ui():
"""
Gets executed when the UI is drawn. Custom gradio elements and
their corresponding event handlers should be defined here.
To learn about gradio components, check out the docs:
https://gradio.app/docs/
"""
min_length = gr.Slider(0, 800, step=10, value=params['min_length'], label='Minimum reply length')
min_length.change(lambda x: params.update({'min_length': x}), min_length, None) |
Chat histories in this program are in the format [message, reply].
This function converts OpenAI histories to that format. | def convert_history(history):
'''
Chat histories in this program are in the format [message, reply].
This function converts OpenAI histories to that format.
'''
chat_dialogue = []
current_message = ""
current_reply = ""
user_input = ""
user_input_last = True
system_message = ""
# Multimodal: convert OpenAI format to multimodal extension format
if any('content' in entry and isinstance(entry['content'], list) for entry in history):
new_history = []
for entry in history:
if isinstance(entry['content'], list):
image_url = None
content = None
for item in entry['content']:
if not isinstance(item, dict):
continue
if item['type'] == 'image_url' and isinstance(item['image_url'], dict):
image_url = item['image_url']['url']
elif item['type'] == 'text' and isinstance(item['text'], str):
content = item['text']
if image_url and content:
new_history.append({"image_url": image_url, "role": "user"})
new_history.append({"content": content, "role": "user"})
else:
new_history.append(entry)
history = new_history
for entry in history:
if "image_url" in entry:
image_url = entry['image_url']
if "base64" in image_url:
image_url = re.sub('^data:image/.+;base64,', '', image_url)
img = Image.open(BytesIO(base64.b64decode(image_url)))
else:
try:
my_res = requests.get(image_url)
img = Image.open(BytesIO(my_res.content))
except Exception:
raise 'Image cannot be loaded from the URL!'
buffered = BytesIO()
if img.mode in ("RGBA", "P"):
img = img.convert("RGB")
img.save(buffered, format="JPEG")
img_str = base64.b64encode(buffered.getvalue()).decode('utf-8')
content = f'<img src="data:image/jpeg;base64,{img_str}">'
else:
content = entry["content"]
role = entry["role"]
if role == "user":
user_input = content
user_input_last = True
if current_message:
chat_dialogue.append([current_message, ''])
current_message = ""
current_message = content
elif role == "assistant":
current_reply = content
user_input_last = False
if current_message:
chat_dialogue.append([current_message, current_reply])
current_message = ""
current_reply = ""
else:
chat_dialogue.append(['', current_reply])
elif role == "system":
system_message = content
if not user_input_last:
user_input = ""
return user_input, system_message, {'internal': chat_dialogue, 'visible': copy.deepcopy(chat_dialogue)} |
using 'lazy loading' to avoid circular import
so this function will be executed only once | def initialize_embedding_params():
'''
using 'lazy loading' to avoid circular import
so this function will be executed only once
'''
global embeddings_params_initialized
if not embeddings_params_initialized:
from extensions.openai.script import params
global st_model, embeddings_model, embeddings_device
st_model = os.environ.get("OPENEDAI_EMBEDDING_MODEL", params.get('embedding_model', 'all-mpnet-base-v2'))
embeddings_model = None
# OPENEDAI_EMBEDDING_DEVICE: auto (best or cpu), cpu, cuda, ipu, xpu, mkldnn, opengl, opencl, ideep, hip, ve, fpga, ort, xla, lazy, vulkan, mps, meta, hpu, mtia, privateuseone
embeddings_device = os.environ.get("OPENEDAI_EMBEDDING_DEVICE", params.get('embedding_device', 'cpu'))
if embeddings_device.lower() == 'auto':
embeddings_device = None
embeddings_params_initialized = True |
Ex. /v1/dashboard/billing/usage?start_date=2023-05-01&end_date=2023-05-31 | def handle_billing_usage():
'''
Ex. /v1/dashboard/billing/usage?start_date=2023-05-01&end_date=2023-05-31
'''
return JSONResponse(content={"total_usage": 0}) |
Green-yellow-red color scale | def probability_color_scale(prob):
'''
Green-yellow-red color scale
'''
rv = 0
gv = 0
if prob <= 0.5:
rv = 'ff'
gv = hex(int(255 * prob * 2))[2:]
if len(gv) < 2:
gv = '0' * (2 - len(gv)) + gv
else:
rv = hex(int(255 - 255 * (prob - 0.5) * 2))[2:]
gv = 'ff'
if len(rv) < 2:
rv = '0' * (2 - len(rv)) + rv
return rv + gv + '00' |
Red component only, white for 0 perplexity (sorry if you're not in dark mode) | def perplexity_color_scale(ppl):
'''
Red component only, white for 0 perplexity (sorry if you're not in dark mode)
'''
value = hex(max(int(255.0 - params['ppl_scale'] * (float(ppl) - 1.0)), 0))[2:]
if len(value) < 2:
value = '0' * (2 - len(value)) + value
return 'ff' + value + value |
Green-yellow-red for probability and blue component for perplexity | def probability_perplexity_color_scale(prob, ppl):
'''
Green-yellow-red for probability and blue component for perplexity
'''
rv = 0
gv = 0
bv = hex(min(max(int(params['ppl_scale'] * (float(ppl) - 1.0)), 0), 255))[2:]
if len(bv) < 2:
bv = '0' * (2 - len(bv)) + bv
if prob <= 0.5:
rv = 'ff'
gv = hex(int(255 * prob * 2))[2:]
if len(gv) < 2:
gv = '0' * (2 - len(gv)) + gv
else:
rv = hex(int(255 - 255 * (prob - 0.5) * 2))[2:]
gv = 'ff'
if len(rv) < 2:
rv = '0' * (2 - len(rv)) + rv
return rv + gv + bv |
This function is applied to your text inputs before
they are fed into the model. | def input_modifier(string):
"""
This function is applied to your text inputs before
they are fed into the model.
"""
global params
if not params['mode'] == 1: # if not in immersive/interactive mode, do nothing
return string
if triggers_are_in(string): # if we're in it, check for trigger words
toggle_generation(True)
string = string.lower()
if "of" in string:
subject = string.split('of', 1)[1] # subdivide the string once by the first 'of' instance and get what's coming after it
string = params['textgen_prefix'].replace("[subject]", subject)
else:
string = params['textgen_prefix'].replace("[subject]", "your appearance, your surroundings and what you are doing right now")
return string |
This function is applied to the model outputs. | def output_modifier(string, state):
"""
This function is applied to the model outputs.
"""
global picture_response, params
if not picture_response:
return string
string = remove_surrounded_chars(string)
string = string.replace('"', '')
string = string.replace('“', '')
string = string.replace('\n', ' ')
string = string.strip()
if string == '':
string = 'no viable description in reply, try regenerating'
return string
text = ""
if (params['mode'] < 2):
toggle_generation(False)
text = f'*Sends a picture which portrays: “{string}”*'
else:
text = string
string = get_SD_pictures(string, state['character_menu']) + "\n" + text
return string |
This function is only applied in chat mode. It modifies
the prefix text for the Bot and can be used to bias its
behavior. | def bot_prefix_modifier(string):
"""
This function is only applied in chat mode. It modifies
the prefix text for the Bot and can be used to bias its
behavior.
"""
return string |
This function is applied to the model outputs. | def output_modifier(string):
"""
This function is applied to the model outputs.
"""
global model, current_params
original_string = string
string = tts_preprocessor.preprocess(string)
processed_string = string
if string == '':
string = '*Empty reply, try regenerating*'
else:
output_file = Path(f'extensions/silero_tts/outputs/test_{int(time.time())}.wav')
prosody = '<prosody rate="{}" pitch="{}">'.format(params['voice_speed'], params['voice_pitch'])
silero_input = f'<speak>{prosody}{xmlesc(string)}</prosody></speak>'
model.save_wav(ssml_text=silero_input, speaker=params['speaker'], sample_rate=int(params['sample_rate']), audio_path=str(output_file))
autoplay = 'autoplay' if params['autoplay'] else ''
string = f'<audio src="file/{output_file.as_posix()}" controls {autoplay}></audio>'
if params['show_text']:
string += f'\n\n{original_string}\n\nProcessed:\n{processed_string}'
print(string) |
This function takes a corpus of text and splits it into chunks of a specified length,
then adds a specified amount of context to each chunk. The context is added by first
going backwards from the start of the chunk and then going forwards from the end of the
chunk, ensuring that the context includes only whole words and that the total context length
does not exceed the specified limit. This function uses binary search for efficiency.
Returns:
chunks (list of str): The chunks of text.
chunks_with_context (list of str): The chunks of text with added context.
chunk_with_context_start_indices (list of int): The starting indices of each chunk with context in the corpus. | def _create_chunks_with_context(corpus, chunk_len, context_left, context_right):
"""
This function takes a corpus of text and splits it into chunks of a specified length,
then adds a specified amount of context to each chunk. The context is added by first
going backwards from the start of the chunk and then going forwards from the end of the
chunk, ensuring that the context includes only whole words and that the total context length
does not exceed the specified limit. This function uses binary search for efficiency.
Returns:
chunks (list of str): The chunks of text.
chunks_with_context (list of str): The chunks of text with added context.
chunk_with_context_start_indices (list of int): The starting indices of each chunk with context in the corpus.
"""
words = re.split('(\\s+)', corpus)
word_start_indices = [0]
current_index = 0
for word in words:
current_index += len(word)
word_start_indices.append(current_index)
chunks, chunk_lengths, chunk_start_indices, chunk_with_context_start_indices = [], [], [], []
current_length = 0
current_index = 0
chunk = []
for word in words:
if current_length + len(word) > chunk_len:
chunks.append(''.join(chunk))
chunk_lengths.append(current_length)
chunk_start_indices.append(current_index - current_length)
chunk = [word]
current_length = len(word)
else:
chunk.append(word)
current_length += len(word)
current_index += len(word)
if chunk:
chunks.append(''.join(chunk))
chunk_lengths.append(current_length)
chunk_start_indices.append(current_index - current_length)
chunks_with_context = []
for start_index, chunk_length in zip(chunk_start_indices, chunk_lengths):
context_start_index = bisect.bisect_right(word_start_indices, start_index - context_left)
context_end_index = bisect.bisect_left(word_start_indices, start_index + chunk_length + context_right)
# Combine all the words in the context range (before, chunk, and after)
chunk_with_context = ''.join(words[context_start_index:context_end_index])
chunks_with_context.append(chunk_with_context)
# Determine the start index of the chunk with context
chunk_with_context_start_index = word_start_indices[context_start_index]
chunk_with_context_start_indices.append(chunk_with_context_start_index)
return chunks, chunks_with_context, chunk_with_context_start_indices |
Args:
optimizer ([`~torch.optim.Optimizer`]):
The optimizer for which to schedule the learning rate.
num_warmup_steps (`int`):
The number of steps for the warmup phase.
num_training_steps (`int`):
The total number of training steps.
last_epoch (`int`, *optional*, defaults to -1):
The index of the last epoch when resuming training.
Return:
`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. | def custom_cosine_scheduler_with_warmup(optimizer, num_warmup_steps, num_training_steps, num_firstepoch_steps, last_epoch=-1):
"""
Args:
optimizer ([`~torch.optim.Optimizer`]):
The optimizer for which to schedule the learning rate.
num_warmup_steps (`int`):
The number of steps for the warmup phase.
num_training_steps (`int`):
The total number of training steps.
last_epoch (`int`, *optional*, defaults to -1):
The index of the last epoch when resuming training.
Return:
`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
"""
lr_lambda = partial(
_get_fp_cosine_schedule_with_warmup_lr_lambda,
num_warmup_steps=num_warmup_steps,
num_training_steps=num_training_steps,
num_firstepoch_steps = num_firstepoch_steps,
)
return LambdaLR(optimizer, lr_lambda, last_epoch) |
Args:
optimizer ([`~torch.optim.Optimizer`]):
The optimizer for which to schedule the learning rate.
num_warmup_steps (`int`):
The number of steps for the warmup phase.
num_training_steps (`int`):
The total number of training steps.
last_epoch (`int`, *optional*, defaults to -1):
The index of the last epoch when resuming training.
Return:
`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. | def custom_half_scheduler_with_warmup(optimizer, num_warmup_steps, num_training_steps, num_firstepoch_steps, last_epoch=-1):
"""
Args:
optimizer ([`~torch.optim.Optimizer`]):
The optimizer for which to schedule the learning rate.
num_warmup_steps (`int`):
The number of steps for the warmup phase.
num_training_steps (`int`):
The total number of training steps.
last_epoch (`int`, *optional*, defaults to -1):
The index of the last epoch when resuming training.
Return:
`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
"""
lr_lambda = partial(
_get_fp_half_schedule_with_warmup_lr_lambda,
num_warmup_steps=num_warmup_steps,
num_training_steps=num_training_steps,
num_firstepoch_steps = num_firstepoch_steps,
)
return LambdaLR(optimizer, lr_lambda, last_epoch) |
Args:
optimizer ([`~torch.optim.Optimizer`]):
The optimizer for which to schedule the learning rate.
num_warmup_steps (`int`):
The number of steps for the warmup phase.
num_training_steps (`int`):
The total number of training steps.
last_epoch (`int`, *optional*, defaults to -1):
The index of the last epoch when resuming training.
Return:
`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. | def custom_raise_fall_scheduler_with_warmup(optimizer, num_warmup_steps, num_training_steps, num_firstepoch_steps, last_epoch=-1):
"""
Args:
optimizer ([`~torch.optim.Optimizer`]):
The optimizer for which to schedule the learning rate.
num_warmup_steps (`int`):
The number of steps for the warmup phase.
num_training_steps (`int`):
The total number of training steps.
last_epoch (`int`, *optional*, defaults to -1):
The index of the last epoch when resuming training.
Return:
`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
"""
lr_lambda = partial(
_get_fp_cosine_raise_and_fall_lr_lambda,
num_warmup_steps=num_warmup_steps,
num_training_steps=num_training_steps,
num_firstepoch_steps = num_firstepoch_steps,
)
return LambdaLR(optimizer, lr_lambda, last_epoch) |
Implements the NEFTune forward pass for the model. Note this works only for
torch.nn.Embedding layers. This method is slightly adapted from the original source code
that can be found here: https://github.com/neelsjain/NEFTune
Args:
input (`torch.Tensor`):
The input tensor to the model.
noise_alpha (`float`):
The noise alpha value to use for the NEFTune forward pass. | def neftune_forward(self, input: torch.Tensor):
"""
Implements the NEFTune forward pass for the model. Note this works only for
torch.nn.Embedding layers. This method is slightly adapted from the original source code
that can be found here: https://github.com/neelsjain/NEFTune
Args:
input (`torch.Tensor`):
The input tensor to the model.
noise_alpha (`float`):
The noise alpha value to use for the NEFTune forward pass.
"""
embeddings = torch.nn.functional.embedding(
input, self.weight, self.padding_idx, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.sparse
)
if self.training:
# Add noise to the embeddings
dims = torch.tensor(embeddings.size(1) * embeddings.size(2))
mag_norm = self.neftune_noise_alpha / torch.sqrt(dims)
embeddings = embeddings + torch.zeros_like(embeddings).uniform_(-mag_norm, mag_norm)
return embeddings |
Strips unusual symbols and forcibly builds a path as relative to the intended directory. | def clean_path(base_path: str, path: str):
"""Strips unusual symbols and forcibly builds a path as relative to the intended directory."""
path = path.replace('\\', '/').replace('..', '_')
if base_path is None:
return path
return f'{Path(base_path).absolute()}/{path}' |
Given two torch tensors, finds the length of the longest
common prefix between the two. | def find_prefix_length(past_seq, seq_tensor):
'''
Given two torch tensors, finds the length of the longest
common prefix between the two.
'''
min_length = min(past_seq.shape[0], seq_tensor.shape[0])
indices = torch.nonzero(~torch.eq(past_seq[:min_length], seq_tensor[:min_length]))
if len(indices) > 0:
prefix_length = indices[0].item()
else:
prefix_length = min_length
return prefix_length |
Given two lists, solves the Longest Common Substring problem.
It returns the indices where the substring starts and ends in
s1 and s2.
Example:
ir, jr, ir2, jr2 = find_longest_common_substring_indices(s1, s2)
print(s1[ir:jr + 1])
print(s2[ir2:jr2 + 1])
Adapted from
https://rosettacode.org/wiki/Longest_common_substring#Python | def find_longest_common_substring_indices(list1, list2):
'''
Given two lists, solves the Longest Common Substring problem.
It returns the indices where the substring starts and ends in
s1 and s2.
Example:
ir, jr, ir2, jr2 = find_longest_common_substring_indices(s1, s2)
print(s1[ir:jr + 1])
print(s2[ir2:jr2 + 1])
Adapted from
https://rosettacode.org/wiki/Longest_common_substring#Python
'''
len_list1, len_list2 = len(list1), len(list2)
start_index_list1, end_index_list1 = 0, -1
start_index_list2, end_index_list2 = 0, -1
# for index1 in tqdm(range(0, len_list1), desc="StreamingLLM prompt comparison", leave=False):
for index1 in range(0, len_list1):
try:
index2 = list2.index(list1[index1])
except:
continue
while index2 >= 0:
temp_index1, temp_index2 = index1, index2
while temp_index1 < len_list1 and temp_index2 < len_list2 and list2[temp_index2] == list1[temp_index1]:
if temp_index1 - index1 >= end_index_list1 - start_index_list1:
start_index_list1, end_index_list1 = index1, temp_index1
start_index_list2, end_index_list2 = index2, temp_index2
temp_index1 += 1
temp_index2 += 1
try:
index2 = list2.index(list1[index1], index2 + 1)
except:
break
return start_index_list1, end_index_list1, start_index_list2, end_index_list2 |
Copied from https://github.com/yaml/pyyaml/issues/240
Makes pyyaml output prettier multiline strings. | def str_presenter(dumper, data):
"""
Copied from https://github.com/yaml/pyyaml/issues/240
Makes pyyaml output prettier multiline strings.
"""
if data.count('\n') > 0:
return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|')
return dumper.represent_scalar('tag:yaml.org,2002:str', data) |
Given a Jinja template, reverse-engineers the prefix and the suffix for
an assistant message (if impersonate=False) or an user message
(if impersonate=True) | def get_generation_prompt(renderer, impersonate=False, strip_trailing_spaces=True):
'''
Given a Jinja template, reverse-engineers the prefix and the suffix for
an assistant message (if impersonate=False) or an user message
(if impersonate=True)
'''
if impersonate:
messages = [
{"role": "user", "content": "<<|user-message-1|>>"},
{"role": "user", "content": "<<|user-message-2|>>"},
]
else:
messages = [
{"role": "assistant", "content": "<<|user-message-1|>>"},
{"role": "assistant", "content": "<<|user-message-2|>>"},
]
prompt = renderer(messages=messages)
suffix_plus_prefix = prompt.split("<<|user-message-1|>>")[1].split("<<|user-message-2|>>")[0]
suffix = prompt.split("<<|user-message-2|>>")[1]
prefix = suffix_plus_prefix[len(suffix):]
if strip_trailing_spaces:
prefix = prefix.rstrip(' ')
return prefix, suffix |
Same as above but returns HTML for the UI | def generate_chat_reply_wrapper(text, state, regenerate=False, _continue=False):
'''
Same as above but returns HTML for the UI
'''
if not character_is_loaded(state):
return
if state['start_with'] != '' and not _continue:
if regenerate:
text, state['history'] = remove_last_message(state['history'])
regenerate = False
_continue = True
send_dummy_message(text, state)
send_dummy_reply(state['start_with'], state)
for i, history in enumerate(generate_chat_reply(text, state, regenerate, _continue, loading_message=True, for_ui=True)):
yield chat_html_wrapper(history, state['name1'], state['name2'], state['mode'], state['chat_style'], state['character_menu']), history |
Loads the latest history for the given character in chat or chat-instruct
mode, or the latest instruct history for instruct mode. | def load_latest_history(state):
'''
Loads the latest history for the given character in chat or chat-instruct
mode, or the latest instruct history for instruct mode.
'''
if shared.args.multi_user:
return start_new_chat(state)
histories = find_all_histories(state)
if len(histories) > 0:
history = load_history(histories[0], state['character_menu'], state['mode'])
else:
history = start_new_chat(state)
return history |
Loads the latest history for the given character in chat or chat-instruct
mode, or the latest instruct history for instruct mode. | def load_history_after_deletion(state, idx):
'''
Loads the latest history for the given character in chat or chat-instruct
mode, or the latest instruct history for instruct mode.
'''
if shared.args.multi_user:
return start_new_chat(state)
histories = find_all_histories(state)
idx = min(int(idx), len(histories) - 1)
idx = max(0, idx)
if len(histories) > 0:
history = load_history(histories[idx], state['character_menu'], state['mode'])
else:
history = start_new_chat(state)
histories = find_all_histories(state)
return history, gr.update(choices=histories, value=histories[idx]) |
pyyaml is very inconsistent with multiline strings.
for simple instruction template outputs, this is enough. | def my_yaml_output(data):
'''
pyyaml is very inconsistent with multiline strings.
for simple instruction template outputs, this is enough.
'''
result = ""
for k in data:
result += k + ": |-\n"
for line in data[k].splitlines():
result += " " + line.rstrip(' ') + "\n"
return result |
DeepSpeed configuration
https://huggingface.co/docs/transformers/main_classes/deepspeed | def generate_ds_config(ds_bf16, train_batch_size, nvme_offload_dir):
'''
DeepSpeed configuration
https://huggingface.co/docs/transformers/main_classes/deepspeed
'''
if nvme_offload_dir:
ds_config = {
"fp16": {
"enabled": not ds_bf16,
},
"bf16": {
"enabled": ds_bf16,
},
"zero_optimization": {
"stage": 3,
"offload_param": {
"device": "nvme",
"nvme_path": nvme_offload_dir,
"pin_memory": True,
"buffer_count": 5,
"buffer_size": 1e9,
"max_in_cpu": 1e9
},
"overlap_comm": True,
"reduce_bucket_size": "auto",
"contiguous_gradients": True,
"sub_group_size": 1e8,
"stage3_prefetch_bucket_size": "auto",
"stage3_param_persistence_threshold": "auto",
"stage3_max_live_parameters": "auto",
"stage3_max_reuse_distance": "auto",
},
"aio": {
"block_size": 262144,
"queue_depth": 32,
"thread_count": 1,
"single_submit": False,
"overlap_events": True
},
"steps_per_print": 2000,
"train_batch_size": train_batch_size,
"train_micro_batch_size_per_gpu": 1,
"wall_clock_breakdown": False
}
else:
ds_config = {
"fp16": {
"enabled": not ds_bf16,
},
"bf16": {
"enabled": ds_bf16,
},
"zero_optimization": {
"stage": 3,
"offload_param": {
"device": "cpu",
"pin_memory": True
},
"overlap_comm": True,
"contiguous_gradients": True,
"reduce_bucket_size": "auto",
"stage3_prefetch_bucket_size": "auto",
"stage3_param_persistence_threshold": "auto",
"stage3_max_live_parameters": "auto",
"stage3_max_reuse_distance": "auto",
},
"steps_per_print": 2000,
"train_batch_size": train_batch_size,
"train_micro_batch_size_per_gpu": 1,
"wall_clock_breakdown": False
}
return ds_config |
Based on:
https://huggingface.co/docs/transformers/perplexity#calculating-ppl-with-fixedlength-models | def calculate_perplexity(models, input_dataset, stride, _max_length):
'''
Based on:
https://huggingface.co/docs/transformers/perplexity#calculating-ppl-with-fixedlength-models
'''
if shared.args.loader == "llama.cpp":
logger.error("llamacpp_HF is required for perplexity evaluation with GGUF models. Please reload the model with llamacpp_HF instead of llama.cpp.")
raise ValueError
if shared.args.loader == "ExLlamav2":
logger.error("ExLlamav2_HF is required for perplexity evaluation with EXL2 models. Please reload the model with ExLlamav2_HF instead of ExLlamav2.")
raise ValueError
if shared.args.loader == "llamacpp_HF" and not shared.args.logits_all:
logger.error("--logits_all is required for perplexity evaluation with GGUF models. Please reload the model with that option set/checked.")
raise ValueError
if not shared.args.no_use_fast:
logger.warning("--no_use_fast is not set. If tokenizing the input dataset takes a long time, try reloading the model with that option set/checked.")
global past_evaluations
cumulative_log = ''
cumulative_log += "Loading the input dataset...\n\n"
yield cumulative_log
# Copied from https://github.com/qwopqwop200/GPTQ-for-LLaMa/blob/triton/utils/datautils.py
if input_dataset == 'wikitext':
data = load_dataset('wikitext', 'wikitext-2-raw-v1', split='test')
text = "\n\n".join(data['text'])
elif input_dataset == 'ptb':
data = load_dataset('ptb_text_only', 'penn_treebank', split='validation')
text = "\n\n".join(data['sentence'])
elif input_dataset == 'ptb_new':
data = load_dataset('ptb_text_only', 'penn_treebank', split='test')
text = " ".join(data['sentence'])
else:
with open(Path(f'training/datasets/{input_dataset}.txt'), 'r', encoding='utf-8') as f:
text = f.read()
for model in models:
if is_in_past_evaluations(model, input_dataset, stride, _max_length):
cumulative_log += f"`{model}` has already been tested. Ignoring.\n\n"
yield cumulative_log
continue
if model != 'current model':
try:
yield cumulative_log + f"Loading `{model}`...\n\n"
model_settings = get_model_metadata(model)
shared.settings.update({k: v for k, v in model_settings.items() if k in shared.settings}) # hijacking the interface defaults
update_model_parameters(model_settings) # hijacking the command-line arguments
unload_model()
shared.model, shared.tokenizer = load_model(model)
except:
cumulative_log += f"Failed to load `{model}`. Moving on.\n\n"
yield cumulative_log
continue
cumulative_log += f"Processing `{shared.model_name}`...\n\n"
yield cumulative_log + "Tokenizing the input dataset...\n\n"
encodings = encode(text, add_special_tokens=False)
seq_len = encodings.shape[1]
if _max_length:
max_length = _max_length
elif hasattr(shared.model.config, 'max_position_embeddings'):
max_length = shared.model.config.max_position_embeddings
else:
max_length = 2048
nlls = []
prev_end_loc = 0
for begin_loc in tqdm(range(0, seq_len, stride)):
yield cumulative_log + f"Evaluating... {100*begin_loc/seq_len:.2f}%"
end_loc = min(begin_loc + max_length, seq_len)
trg_len = end_loc - prev_end_loc # may be different from stride on last loop
input_ids = encodings[:, begin_loc:end_loc]
target_ids = input_ids.clone()
target_ids[:, :-trg_len] = -100
clear_torch_cache()
with torch.no_grad():
outputs = shared.model(input_ids=input_ids, labels=target_ids)
# loss is calculated using CrossEntropyLoss which averages over valid labels
# N.B. the model only calculates loss over trg_len - 1 labels, because it internally shifts the labels
# to the left by 1.
neg_log_likelihood = outputs.loss
nlls.append(neg_log_likelihood)
prev_end_loc = end_loc
if end_loc == seq_len:
break
ppl = torch.exp(torch.stack(nlls).mean())
add_entry_to_past_evaluations(float(ppl), shared.model_name, input_dataset, stride, _max_length)
save_past_evaluations(past_evaluations)
message = f"The perplexity for `{shared.model_name}` is: {float(ppl)}"
logger.info(message)
cumulative_log += f"{message}\n\n"
yield cumulative_log |
Used to avoid caching convert_to_markdown calls during streaming. | def convert_to_markdown_wrapped(string, use_cache=True):
'''
Used to avoid caching convert_to_markdown calls during streaming.
'''
if use_cache:
return convert_to_markdown(string)
return convert_to_markdown.__wrapped__(string) |
A copy of
https://github.com/abetlen/llama-cpp-python/blob/main/llama_cpp/llama.py
with tqdm to show prompt processing progress. | def eval_with_progress(self, tokens: Sequence[int]):
"""
A copy of
https://github.com/abetlen/llama-cpp-python/blob/main/llama_cpp/llama.py
with tqdm to show prompt processing progress.
"""
assert self._ctx.ctx is not None
assert self._batch.batch is not None
self._ctx.kv_cache_seq_rm(-1, self.n_tokens, -1)
if len(tokens) > 1:
progress_bar = tqdm(range(0, len(tokens), self.n_batch), desc="Prompt evaluation", leave=False)
else:
progress_bar = range(0, len(tokens), self.n_batch)
for i in progress_bar:
batch = tokens[i : min(len(tokens), i + self.n_batch)]
n_past = self.n_tokens
n_tokens = len(batch)
self._batch.set_batch(
batch=batch, n_past=n_past, logits_all=self.context_params.logits_all
)
self._ctx.decode(self._batch)
# Save tokens
self.input_ids[n_past : n_past + n_tokens] = batch
# Save logits
if self.context_params.logits_all:
rows = n_tokens
cols = self._n_vocab
logits = self._ctx.get_logits()[: rows * cols]
self.scores[n_past : n_past + n_tokens, :].reshape(-1)[: :] = logits
else:
rows = 1
cols = self._n_vocab
logits = self._ctx.get_logits()[: rows * cols]
self.scores[n_past + n_tokens - 1, :].reshape(-1)[: :] = logits
# Update n_tokens
self.n_tokens += n_tokens |
Copied from: https://github.com/vladmandic/automatic
All credits to vladmandic. | def setup_logging():
'''
Copied from: https://github.com/vladmandic/automatic
All credits to vladmandic.
'''
class RingBuffer(logging.StreamHandler):
def __init__(self, capacity):
super().__init__()
self.capacity = capacity
self.buffer = []
self.formatter = logging.Formatter('{ "asctime":"%(asctime)s", "created":%(created)f, "facility":"%(name)s", "pid":%(process)d, "tid":%(thread)d, "level":"%(levelname)s", "module":"%(module)s", "func":"%(funcName)s", "msg":"%(message)s" }')
def emit(self, record):
msg = self.format(record)
# self.buffer.append(json.loads(msg))
self.buffer.append(msg)
if len(self.buffer) > self.capacity:
self.buffer.pop(0)
def get(self):
return self.buffer
from rich.console import Console
from rich.logging import RichHandler
from rich.pretty import install as pretty_install
from rich.theme import Theme
from rich.traceback import install as traceback_install
level = logging.DEBUG
logger.setLevel(logging.DEBUG) # log to file is always at level debug for facility `sd`
console = Console(log_time=True, log_time_format='%H:%M:%S-%f', theme=Theme({
"traceback.border": "black",
"traceback.border.syntax_error": "black",
"inspect.value.border": "black",
}))
logging.basicConfig(level=logging.ERROR, format='%(asctime)s | %(name)s | %(levelname)s | %(module)s | %(message)s', handlers=[logging.NullHandler()]) # redirect default logger to null
pretty_install(console=console)
traceback_install(console=console, extra_lines=1, max_frames=10, width=console.width, word_wrap=False, indent_guides=False, suppress=[])
while logger.hasHandlers() and len(logger.handlers) > 0:
logger.removeHandler(logger.handlers[0])
# handlers
rh = RichHandler(show_time=True, omit_repeated_times=False, show_level=True, show_path=False, markup=False, rich_tracebacks=True, log_time_format='%H:%M:%S-%f', level=level, console=console)
rh.setLevel(level)
logger.addHandler(rh)
rb = RingBuffer(100) # 100 entries default in log ring buffer
rb.setLevel(level)
logger.addHandler(rb)
logger.buffer = rb.buffer
# overrides
logging.getLogger("urllib3").setLevel(logging.ERROR)
logging.getLogger("httpx").setLevel(logging.ERROR)
logging.getLogger("diffusers").setLevel(logging.ERROR)
logging.getLogger("torch").setLevel(logging.ERROR)
logging.getLogger("lycoris").handlers = logger.handlers |
Adapted from https://github.com/Ph0rk0z/text-generation-webui-testing | def add_lora_autogptq(lora_names):
'''
Adapted from https://github.com/Ph0rk0z/text-generation-webui-testing
'''
try:
from auto_gptq import get_gptq_peft_model
from auto_gptq.utils.peft_utils import GPTQLoraConfig
except:
logger.error("This version of AutoGPTQ does not support LoRA. You need to install from source or wait for a new release.")
return
if len(lora_names) == 0:
reload_model()
shared.lora_names = []
return
else:
if len(lora_names) > 1:
logger.warning('AutoGPTQ can only work with 1 LoRA at the moment. Only the first one in the list will be loaded.')
if not shared.args.no_inject_fused_attention:
logger.warning('Fused Atttention + AutoGPTQ may break Lora loading. Disable it.')
peft_config = GPTQLoraConfig(
inference_mode=True,
)
lora_path = get_lora_path(lora_names[0])
logger.info("Applying the following LoRAs to {}: {}".format(shared.model_name, ', '.join([lora_names[0]])))
shared.model = get_gptq_peft_model(shared.model, peft_config, lora_path)
shared.lora_names = [lora_names[0]]
return |
UI: update the command-line arguments based on the interface values | def update_model_parameters(state, initial=False):
'''
UI: update the command-line arguments based on the interface values
'''
elements = ui.list_model_elements() # the names of the parameters
gpu_memories = []
for i, element in enumerate(elements):
if element not in state:
continue
value = state[element]
if element.startswith('gpu_memory'):
gpu_memories.append(value)
continue
if initial and element in shared.provided_arguments:
continue
# Setting null defaults
if element in ['wbits', 'groupsize', 'model_type'] and value == 'None':
value = vars(shared.args_defaults)[element]
elif element in ['cpu_memory'] and value == 0:
value = vars(shared.args_defaults)[element]
# Making some simple conversions
if element in ['wbits', 'groupsize', 'pre_layer']:
value = int(value)
elif element == 'cpu_memory' and value is not None:
value = f"{value}MiB"
if element in ['pre_layer']:
value = [value] if value > 0 else None
setattr(shared.args, element, value)
found_positive = False
for i in gpu_memories:
if i > 0:
found_positive = True
break
if not (initial and vars(shared.args)['gpu_memory'] != vars(shared.args_defaults)['gpu_memory']):
if found_positive:
shared.args.gpu_memory = [f"{i}MiB" for i in gpu_memories]
else:
shared.args.gpu_memory = None |
UI: update the state variable with the model settings | def apply_model_settings_to_state(model, state):
'''
UI: update the state variable with the model settings
'''
model_settings = get_model_metadata(model)
if 'loader' in model_settings:
loader = model_settings.pop('loader')
# If the user is using an alternative loader for the same model type, let them keep using it
if not (loader == 'ExLlamav2_HF' and state['loader'] in ['GPTQ-for-LLaMa', 'ExLlamav2', 'AutoGPTQ']):
state['loader'] = loader
for k in model_settings:
if k in state:
if k in ['wbits', 'groupsize']:
state[k] = str(model_settings[k])
else:
state[k] = model_settings[k]
return state |
Save the settings for this model to models/config-user.yaml | def save_model_settings(model, state):
'''
Save the settings for this model to models/config-user.yaml
'''
if model == 'None':
yield ("Not saving the settings because no model is selected in the menu.")
return
user_config = shared.load_user_config()
model_regex = model + '$' # For exact matches
if model_regex not in user_config:
user_config[model_regex] = {}
for k in ui.list_model_elements():
if k == 'loader' or k in loaders.loaders_and_params[state['loader']]:
user_config[model_regex][k] = state[k]
shared.user_config = user_config
output = yaml.dump(user_config, sort_keys=False)
p = Path(f'{shared.args.model_dir}/config-user.yaml')
with open(p, 'w') as f:
f.write(output)
yield (f"Settings for `{model}` saved to `{p}`.") |
Similar to the function above, but it saves only the instruction template. | def save_instruction_template(model, template):
'''
Similar to the function above, but it saves only the instruction template.
'''
if model == 'None':
yield ("Not saving the template because no model is selected in the menu.")
return
user_config = shared.load_user_config()
model_regex = model + '$' # For exact matches
if model_regex not in user_config:
user_config[model_regex] = {}
if template == 'None':
user_config[model_regex].pop('instruction_template', None)
else:
user_config[model_regex]['instruction_template'] = template
shared.user_config = user_config
output = yaml.dump(user_config, sort_keys=False)
p = Path(f'{shared.args.model_dir}/config-user.yaml')
with open(p, 'w') as f:
f.write(output)
if template == 'None':
yield (f"Instruction template for `{model}` unset in `{p}`, as the value for template was `{template}`.")
else:
yield (f"Instruction template for `{model}` saved to `{p}` as `{template}`.") |
Gets alpha_value from alpha_value and rope_freq_base | def get_alpha_value(alpha, base):
'''
Gets alpha_value from alpha_value and rope_freq_base
'''
if base > 0:
return (base / 10000.) ** (63 / 64.)
else:
return alpha |
Gets rope_freq_base from alpha_value and rope_freq_base | def get_rope_freq_base(alpha, base):
'''
Gets rope_freq_base from alpha_value and rope_freq_base
'''
if base > 0:
return base
else:
return 10000 * alpha ** (64 / 63.) |
Loads custom model-specific settings | def load_user_config():
'''
Loads custom model-specific settings
'''
if Path(f'{args.model_dir}/config-user.yaml').exists():
file_content = open(f'{args.model_dir}/config-user.yaml', 'r').read().strip()
if file_content:
user_config = yaml.safe_load(file_content)
else:
user_config = {}
else:
user_config = {}
return user_config |
Returns formatted outputs for the UI | def generate_reply_wrapper(question, state, stopping_strings=None):
"""
Returns formatted outputs for the UI
"""
reply = question if not shared.is_seq2seq else ''
yield formatted_outputs(reply, shared.model_name)
for reply in generate_reply(question, state, stopping_strings, is_chat=False, escape_html=True, for_ui=True):
if not shared.is_seq2seq:
reply = question + reply
yield formatted_outputs(reply, shared.model_name) |
Fix the LaTeX equations in GALACTICA | def fix_galactica(s):
"""
Fix the LaTeX equations in GALACTICA
"""
s = s.replace(r'\[', r'$')
s = s.replace(r'\]', r'$')
s = s.replace(r'\(', r'$')
s = s.replace(r'\)', r'$')
s = s.replace(r'$$', r'$')
s = re.sub(r'\n', r'\n\n', s)
s = re.sub(r"\n{3,}", "\n\n", s)
return s |
For models that do not use the transformers library for sampling | def generate_reply_custom(question, original_question, seed, state, stopping_strings=None, is_chat=False):
"""
For models that do not use the transformers library for sampling
"""
seed = set_manual_seed(state['seed'])
t0 = time.time()
reply = ''
try:
if not is_chat:
yield ''
if not state['stream']:
reply = shared.model.generate(question, state)
yield reply
else:
for reply in shared.model.generate_with_streaming(question, state):
yield reply
except Exception:
traceback.print_exc()
finally:
t1 = time.time()
original_tokens = len(encode(original_question)[0])
new_tokens = len(encode(original_question + reply)[0]) - original_tokens
print(f'Output generated in {(t1-t0):.2f} seconds ({new_tokens/(t1-t0):.2f} tokens/s, {new_tokens} tokens, context {original_tokens}, seed {seed})')
return |
Strips unusual symbols and forcibly builds a path as relative to the intended directory. | def clean_path(base_path: str, path: str):
"""Strips unusual symbols and forcibly builds a path as relative to the intended directory."""
path = path.replace('\\', '/').replace('..', '_')
if base_path is None:
return path
return f'{Path(base_path).absolute()}/{path}' |
Copied from https://github.com/AUTOMATIC1111/stable-diffusion-webui | def create_refresh_button(refresh_component, refresh_method, refreshed_args, elem_class, interactive=True):
"""
Copied from https://github.com/AUTOMATIC1111/stable-diffusion-webui
"""
def refresh():
refresh_method()
args = refreshed_args() if callable(refreshed_args) else refreshed_args
return gr.update(**(args or {}))
refresh_button = gr.Button(refresh_symbol, elem_classes=elem_class, interactive=interactive)
refresh_button.click(
fn=lambda: {k: tuple(v) if type(k) is list else v for k, v in refresh().items()},
inputs=[],
outputs=[refresh_component]
)
return refresh_button |
Skips over whitespace and comments in the input string.
This function processes the input string, skipping over any spaces, tabs,
and content following a '#' character, which denotes a comment. The parsing
of a comment continues until the end of the line (denoted by newline characters
'
' or '
'). If the 'newline_ok' parameter is set to False, the function
will stop processing and return the remaining string upon encountering a
newline character, otherwise it will skip over newline characters as well.
Parameters:
src (str): The input string to be processed.
newline_ok (bool): A flag indicating whether encountering a newline character
should stop the parsing (False) or if it should be skipped (True).
Returns:
str: The remaining portion of the input string after skipping whitespace and comments.
| def remove_leading_white_space(src, newline_ok):
"""
Skips over whitespace and comments in the input string.
This function processes the input string, skipping over any spaces, tabs,
and content following a '#' character, which denotes a comment. The parsing
of a comment continues until the end of the line (denoted by newline characters
'\r' or '\n'). If the 'newline_ok' parameter is set to False, the function
will stop processing and return the remaining string upon encountering a
newline character, otherwise it will skip over newline characters as well.
Parameters:
src (str): The input string to be processed.
newline_ok (bool): A flag indicating whether encountering a newline character
should stop the parsing (False) or if it should be skipped (True).
Returns:
str: The remaining portion of the input string after skipping whitespace and comments.
"""
pos = 0
while pos < len(src) and (src[pos].isspace() or src[pos] == "#"):
if src[pos] == "#":
while pos < len(src) and src[pos] not in ("\r", "\n"):
pos += 1
else:
if not newline_ok and src[pos] in ("\r", "\n"):
break
pos += 1
return src[pos:] |
parse the leading char from the input string
:param src:
:return: char, remaining_src | def parse_char(src):
"""
parse the leading char from the input string
:param src:
:return: char, remaining_src
"""
# if we have a backslash, it's maybe an escape
if src[0] == "\\":
esc = src[1]
if esc == "x":
first = hex_to_int(src[2])
if first > -1:
second = hex_to_int(src[3])
if second > -1:
return (first << 4) + second, src[4:]
raise RuntimeError("expecting \\xNN at " + src)
elif esc in ('"', "[", "]"):
return esc, src[2:]
elif esc == "r":
return "\r", src[2:]
elif esc == "n":
return "\n", src[2:]
elif esc == "t":
return "\t", src[2:]
raise RuntimeError("unknown escape at " + src)
elif src:
return src[0], src[1:]
raise RuntimeError("unexpected end of input") |
Parses an action string.
Args:
action: String containing action.
Raises:
ActionError: If the action has invalid syntax.
Returns:
Action name and arguments. | def parse(action: str) -> ActionParseResult:
"""Parses an action string.
Args:
action: String containing action.
Raises:
ActionError: If the action has invalid syntax.
Returns:
Action name and arguments.
"""
args_match = re_action_args.match(action)
if args_match is not None:
action_name, action_args_str = args_match.groups()
if action_args_str:
try:
# We wrap `action_args_str` to be able to disambiguate the cases where
# the list of arguments is a comma-separated list of values from the
# case where the argument is a single tuple.
action_args: tuple[Any, ...] = ast.literal_eval(f"({action_args_str},)")
except Exception:
raise ActionError(
f"unable to parse {action_args_str!r} in action {action!r}"
)
else:
action_args = ()
else:
action_name = action
action_args = ()
return action_name, action_args |
Callable to lazy load the system commands.
Returns:
System commands class. | def get_system_commands() -> type[SystemCommands]:
"""Callable to lazy load the system commands.
Returns:
System commands class.
"""
from ._system_commands import SystemCommands
return SystemCommands |
Convert name from CamelCase to snake_case.
Args:
name: A symbol name, such as a class name.
Returns:
Name in camel case. | def camel_to_snake(
name: str, _re_snake: Pattern[str] = re.compile("[a-z][A-Z]")
) -> str:
"""Convert name from CamelCase to snake_case.
Args:
name: A symbol name, such as a class name.
Returns:
Name in camel case.
"""
def repl(match: Match[str]) -> str:
lower: str
upper: str
lower, upper = match.group() # type: ignore
return f"{lower}_{upper.lower()}"
return _re_snake.sub(repl, name).lower() |
Convert an RGB color to the CIE-L*ab format.
Uses the standard RGB color space with a D65/2⁰ standard illuminant.
Conversion passes through the XYZ color space.
Cf. http://www.easyrgb.com/en/math.php. | def rgb_to_lab(rgb: Color) -> Lab:
"""Convert an RGB color to the CIE-L*ab format.
Uses the standard RGB color space with a D65/2⁰ standard illuminant.
Conversion passes through the XYZ color space.
Cf. http://www.easyrgb.com/en/math.php.
"""
r, g, b = rgb.r / 255, rgb.g / 255, rgb.b / 255
r = pow((r + 0.055) / 1.055, 2.4) if r > 0.04045 else r / 12.92
g = pow((g + 0.055) / 1.055, 2.4) if g > 0.04045 else g / 12.92
b = pow((b + 0.055) / 1.055, 2.4) if b > 0.04045 else b / 12.92
x = (r * 41.24 + g * 35.76 + b * 18.05) / 95.047
y = (r * 21.26 + g * 71.52 + b * 7.22) / 100
z = (r * 1.93 + g * 11.92 + b * 95.05) / 108.883
off = 16 / 116
x = pow(x, 1 / 3) if x > 0.008856 else 7.787 * x + off
y = pow(y, 1 / 3) if y > 0.008856 else 7.787 * y + off
z = pow(z, 1 / 3) if z > 0.008856 else 7.787 * z + off
return Lab(116 * y - 16, 500 * (x - y), 200 * (y - z)) |
Convert a CIE-L*ab color to RGB.
Uses the standard RGB color space with a D65/2⁰ standard illuminant.
Conversion passes through the XYZ color space.
Cf. http://www.easyrgb.com/en/math.php. | def lab_to_rgb(lab: Lab, alpha: float = 1.0) -> Color:
"""Convert a CIE-L*ab color to RGB.
Uses the standard RGB color space with a D65/2⁰ standard illuminant.
Conversion passes through the XYZ color space.
Cf. http://www.easyrgb.com/en/math.php.
"""
y = (lab.L + 16) / 116
x = lab.a / 500 + y
z = y - lab.b / 200
off = 16 / 116
y = pow(y, 3) if y > 0.2068930344 else (y - off) / 7.787
x = 0.95047 * pow(x, 3) if x > 0.2068930344 else 0.122059 * (x - off)
z = 1.08883 * pow(z, 3) if z > 0.2068930344 else 0.139827 * (z - off)
r = x * 3.2406 + y * -1.5372 + z * -0.4986
g = x * -0.9689 + y * 1.8758 + z * 0.0415
b = x * 0.0557 + y * -0.2040 + z * 1.0570
r = 1.055 * pow(r, 1 / 2.4) - 0.055 if r > 0.0031308 else 12.92 * r
g = 1.055 * pow(g, 1 / 2.4) - 0.055 if g > 0.0031308 else 12.92 * g
b = 1.055 * pow(b, 1 / 2.4) - 0.055 if b > 0.0031308 else 12.92 * b
return Color(int(r * 255), int(g * 255), int(b * 255), alpha) |
Check an environment variable switch.
Args:
name: Name of environment variable.
Returns:
`True` if the env var is "1", otherwise `False`. | def _get_environ_bool(name: str) -> bool:
"""Check an environment variable switch.
Args:
name: Name of environment variable.
Returns:
`True` if the env var is "1", otherwise `False`.
"""
has_environ = get_environ(name) == "1"
return has_environ |
Retrieves an integer environment variable.
Args:
name: Name of environment variable.
default: The value to use if the value is not set, or set to something other
than a valid integer.
Returns:
The integer associated with the environment variable if it's set to a valid int
or the default value otherwise. | def _get_environ_int(name: str, default: int) -> int:
"""Retrieves an integer environment variable.
Args:
name: Name of environment variable.
default: The value to use if the value is not set, or set to something other
than a valid integer.
Returns:
The integer associated with the environment variable if it's set to a valid int
or the default value otherwise.
"""
try:
return int(os.environ[name])
except KeyError:
return default
except ValueError:
return default |
Checks if a string is a valid animation level.
Args:
value: The string to check.
Returns:
Whether it's a valid level or not. | def _is_valid_animation_level(value: str) -> TypeGuard[AnimationLevel]:
"""Checks if a string is a valid animation level.
Args:
value: The string to check.
Returns:
Whether it's a valid level or not.
"""
return value in get_args(AnimationLevel) |
Get the value of the environment variable that controls textual animations.
The variable can be in any of the values defined by [`AnimationLevel`][textual.constants.AnimationLevel].
Returns:
The value that the variable was set to. If the environment variable is set to an
invalid value, we default to showing all animations. | def _get_textual_animations() -> AnimationLevel:
"""Get the value of the environment variable that controls textual animations.
The variable can be in any of the values defined by [`AnimationLevel`][textual.constants.AnimationLevel].
Returns:
The value that the variable was set to. If the environment variable is set to an
invalid value, we default to showing all animations.
"""
value: str = get_environ("TEXTUAL_ANIMATIONS", "FULL").lower()
if _is_valid_animation_level(value):
return value
return "full" |
Generate a renderable to show color systems.
Args:
light: Light ColorSystem.
dark: Dark ColorSystem
Returns:
Table showing all colors. | def show_design(light: ColorSystem, dark: ColorSystem) -> Table:
"""Generate a renderable to show color systems.
Args:
light: Light ColorSystem.
dark: Dark ColorSystem
Returns:
Table showing all colors.
"""
@group()
def make_shades(system: ColorSystem):
colors = system.generate()
for name in system.shades:
background = Color.parse(colors[name]).with_alpha(1.0)
foreground = background + background.get_contrast_text(0.9)
text = Text(f"${name}")
yield Padding(text, 1, style=f"{foreground.hex6} on {background.hex6}")
table = Table(box=None, expand=True)
table.add_column("Light", justify="center")
table.add_column("Dark", justify="center")
table.add_row(make_shades(light), make_shades(dark))
return table |
Validate identifier and raise an error if it fails.
Args:
description: Description of where identifier is used for error message.
*names: Identifiers to check. | def check_identifiers(description: str, *names: str) -> None:
"""Validate identifier and raise an error if it fails.
Args:
description: Description of where identifier is used for error message.
*names: Identifiers to check.
"""
match = _re_identifier.fullmatch
for name in names:
if match(name) is None:
raise BadIdentifier(
f"{name!r} is an invalid {description}; "
"identifiers must contain only letters, numbers, underscores, or hyphens, and must not begin with a number."
) |
Convert the key string to a name suitable for use as a Python identifier. | def _key_to_identifier(key: str) -> str:
"""Convert the key string to a name suitable for use as a Python identifier."""
if len(key) == 1 and key.isupper():
key = f"upper_{key.lower()}"
return key.replace("+", "_").lower() |
Splits a string line into tuples (str, int).
Each tuple represents a section of the line which precedes a tab character.
The string is the string text that appears before the tab character (excluding the tab).
The integer is the width that the tab character is expanded to.
Args:
line: The text to expand tabs in.
tab_size: Number of cells in a tab.
Returns:
A list of tuples representing the line split on tab characters,
and the widths of the tabs after tab expansion is applied. | def get_tab_widths(line: str, tab_size: int = 4) -> list[tuple[str, int]]:
"""Splits a string line into tuples (str, int).
Each tuple represents a section of the line which precedes a tab character.
The string is the string text that appears before the tab character (excluding the tab).
The integer is the width that the tab character is expanded to.
Args:
line: The text to expand tabs in.
tab_size: Number of cells in a tab.
Returns:
A list of tuples representing the line split on tab characters,
and the widths of the tabs after tab expansion is applied.
"""
parts: list[tuple[str, int]] = []
add_part = parts.append
cell_position = 0
matches = _TABS_SPLITTER_RE.findall(line)
for match in matches:
expansion_width = 0
if match.endswith("\t"):
# Remove the tab, and check the width of the rest of the line.
match = match[:-1]
cell_position += cell_len(match)
# Now move along the line by the width of the tab.
tab_remainder = cell_position % tab_size
expansion_width = tab_size - tab_remainder
cell_position += expansion_width
add_part((match, expansion_width))
return parts |
Expands tabs, taking into account double cell characters.
Args:
line: The text to expand tabs in.
tab_size: Number of cells in a tab.
Returns:
New string with tabs replaced with spaces. | def expand_tabs_inline(line: str, tab_size: int = 4) -> str:
"""Expands tabs, taking into account double cell characters.
Args:
line: The text to expand tabs in.
tab_size: Number of cells in a tab.
Returns:
New string with tabs replaced with spaces.
"""
tab_widths = get_tab_widths(line, tab_size)
return "".join(
[part + expansion_width * " " for part, expansion_width in tab_widths]
) |
Expand tabs to the widths defined in the `tab_widths` list.
This will return a new Text instance with tab characters expanded into a
number of spaces. Each time a tab is encountered, it's expanded into the
next integer encountered in the `tab_widths` list. Consequently, the length
of `tab_widths` should match the number of tab chracters in `line`.
Args:
line: The `Text` instance to expand tabs in.
tab_widths: The widths to expand tabs to.
Returns:
A new text instance with tab characters converted to spaces. | def expand_text_tabs_from_widths(line: Text, tab_widths: list[int]) -> Text:
"""Expand tabs to the widths defined in the `tab_widths` list.
This will return a new Text instance with tab characters expanded into a
number of spaces. Each time a tab is encountered, it's expanded into the
next integer encountered in the `tab_widths` list. Consequently, the length
of `tab_widths` should match the number of tab chracters in `line`.
Args:
line: The `Text` instance to expand tabs in.
tab_widths: The widths to expand tabs to.
Returns:
A new text instance with tab characters converted to spaces.
"""
if "\t" not in line.plain:
return line
parts = line.split("\t", include_separator=True)
tab_widths_iter = iter(tab_widths)
new_parts: list[Text] = []
append_part = new_parts.append
for part in parts:
if part.plain.endswith("\t"):
part._text[-1] = part._text[-1][:-1] + " "
spaces = next(tab_widths_iter)
part.extend_style(spaces - 1)
append_part(part)
return Text("", end="").join(new_parts) |
Parse features env var
Args:
features: Comma separated feature flags
Returns:
A frozen set of known features. | def parse_features(features: str) -> frozenset[FeatureFlag]:
"""Parse features env var
Args:
features: Comma separated feature flags
Returns:
A frozen set of known features.
"""
features_set = frozenset(
feature.strip().lower() for feature in features.split(",") if feature.strip()
).intersection(FEATURES)
return cast("frozenset[FeatureFlag]", features_set) |
Convert colors in a style to monochrome.
Args:
style: A Rich Style.
Returns:
A new Rich style. | def monochrome_style(style: Style) -> Style:
"""Convert colors in a style to monochrome.
Args:
style: A Rich Style.
Returns:
A new Rich style.
"""
style_color = style.color
style_background = style.bgcolor
color = (
None
if style_color is None
else Color.from_rich_color(style_color).monochrome.rich_color
)
background = (
None
if style_background is None
else Color.from_rich_color(style_background).monochrome.rich_color
)
return style + Style.from_color(color, background) |
Dim a color by blending towards the background
Args:
background: background color.
color: Foreground color.
factor: Blend factor
Returns:
New dimmer color. | def dim_color(background: RichColor, color: RichColor, factor: float) -> RichColor:
"""Dim a color by blending towards the background
Args:
background: background color.
color: Foreground color.
factor: Blend factor
Returns:
New dimmer color.
"""
red1, green1, blue1 = background.triplet
red2, green2, blue2 = color.triplet
return RichColor.from_rgb(
red1 + (red2 - red1) * factor,
green1 + (green2 - green1) * factor,
blue1 + (blue2 - blue1) * factor,
) |
Replace dim attribute with a dim color.
Args:
style: Style to dim.
factor: Blend factor.
Returns:
New dimmed style. | def dim_style(style: Style, background: Color, factor: float) -> Style:
"""Replace dim attribute with a dim color.
Args:
style: Style to dim.
factor: Blend factor.
Returns:
New dimmed style.
"""
return (
style
+ Style.from_color(
dim_color(
(background.rich_color if style.bgcolor.is_default else style.bgcolor),
style.color,
factor,
),
None,
)
) + NO_DIM |
Adjust a value so it is not less than a minimum and not greater
than a maximum value.
Args:
value: A value.
minimum: Minimum value.
maximum: Maximum value.
Returns:
New value that is not less than the minimum or greater than the maximum. | def clamp(value: T, minimum: T, maximum: T) -> T:
"""Adjust a value so it is not less than a minimum and not greater
than a maximum value.
Args:
value: A value.
minimum: Minimum value.
maximum: Maximum value.
Returns:
New value that is not less than the minimum or greater than the maximum.
"""
if minimum > maximum:
maximum, minimum = minimum, maximum
if value < minimum:
return minimum
elif value > maximum:
return maximum
else:
return value |
Get the best guess for the Unicode name of the char corresponding to the key.
This function can be seen as a pseudo-inverse of the function `_character_to_key`. | def _get_unicode_name_from_key(key: str) -> str:
"""Get the best guess for the Unicode name of the char corresponding to the key.
This function can be seen as a pseudo-inverse of the function `_character_to_key`.
"""
return KEY_TO_UNICODE_NAME.get(key, key.upper()) |
Return all aliases for the given key, including the key itself | def _get_key_aliases(key: str) -> list[str]:
"""Return all aliases for the given key, including the key itself"""
return [key] + KEY_ALIASES.get(key, []) |
Given a key (i.e. the `key` string argument to Binding __init__),
return the value that should be displayed in the app when referring
to this key (e.g. in the Footer widget). | def _get_key_display(key: str) -> str:
"""Given a key (i.e. the `key` string argument to Binding __init__),
return the value that should be displayed in the app when referring
to this key (e.g. in the Footer widget)."""
if "+" in key:
return "+".join([_get_key_display(key) for key in key.split("+")])
display_alias = KEY_DISPLAY_ALIASES.get(key)
if display_alias:
return display_alias
original_key = REPLACED_KEYS.get(key, key)
tentative_unicode_name = _get_unicode_name_from_key(original_key)
try:
unicode_character = unicodedata.lookup(tentative_unicode_name)
except KeyError:
return tentative_unicode_name
# Check if printable. `delete` for example maps to a control sequence
# which we don't want to write to the terminal.
if unicode_character.isprintable():
return unicode_character
return tentative_unicode_name |
Convert a single character to a key value.
This transformation can be undone by the function `_get_unicode_name_from_key`. | def _character_to_key(character: str) -> str:
"""Convert a single character to a key value.
This transformation can be undone by the function `_get_unicode_name_from_key`.
"""
if not character.isalnum():
key = unicodedata.name(character).lower().replace("-", "_").replace(" ", "_")
else:
key = character
key = KEY_NAME_REPLACEMENTS.get(key, key)
return key |
Raise exception for case where user presses a key and there are multiple candidate key handler methods for it. | def _raise_duplicate_key_handlers_error(
key_name: str, first_handler: str, second_handler: str
) -> None:
"""Raise exception for case where user presses a key and there are multiple candidate key handler methods for it."""
raise DuplicateKeyHandlers(
f"Multiple handlers for key press {key_name!r}.\n"
f"We found both {first_handler!r} and {second_handler!r}, "
f"and didn't know which to call.\n"
f"Consider combining them into a single handler.",
) |
Get the arguments to pass into mouse messages for the click and hover methods. | def _get_mouse_message_arguments(
target: Widget,
offset: tuple[int, int] = (0, 0),
button: int = 0,
shift: bool = False,
meta: bool = False,
control: bool = False,
) -> dict[str, Any]:
"""Get the arguments to pass into mouse messages for the click and hover methods."""
click_x, click_y = target.region.offset + offset
message_arguments = {
"x": click_x,
"y": click_y,
"delta_x": 0,
"delta_y": 0,
"button": button,
"shift": shift,
"meta": meta,
"ctrl": control,
"screen_x": click_x,
"screen_y": click_y,
}
return message_arguments |
Invoke a watch function.
Args:
watcher_object: The object watching for the changes.
watch_function: A watch function, which may be sync or async.
old_value: The old value of the attribute.
value: The new value of the attribute. | def invoke_watcher(
watcher_object: Reactable,
watch_function: WatchCallbackType,
old_value: object,
value: object,
) -> None:
"""Invoke a watch function.
Args:
watcher_object: The object watching for the changes.
watch_function: A watch function, which may be sync or async.
old_value: The old value of the attribute.
value: The new value of the attribute.
"""
_rich_traceback_omit = True
param_count = count_parameters(watch_function)
reset_token = active_message_pump.set(watcher_object)
try:
if param_count == 2:
watch_result = cast(WatchCallbackBothValuesType, watch_function)(
old_value, value
)
elif param_count == 1:
watch_result = cast(WatchCallbackNewValueType, watch_function)(value)
else:
watch_result = cast(WatchCallbackNoArgsType, watch_function)()
if isawaitable(watch_result):
# Result is awaitable, so we need to await it within an async context
watcher_object.call_next(
partial(await_watcher, watcher_object, watch_result)
)
finally:
active_message_pump.reset(reset_token) |
Watch a reactive variable on an object.
Args:
node: The node that created the watcher.
obj: The parent object.
attribute_name: The attribute to watch.
callback: A callable to call when the attribute changes.
init: True to call watcher initialization. | def _watch(
node: DOMNode,
obj: Reactable,
attribute_name: str,
callback: WatchCallbackType,
*,
init: bool = True,
) -> None:
"""Watch a reactive variable on an object.
Args:
node: The node that created the watcher.
obj: The parent object.
attribute_name: The attribute to watch.
callback: A callable to call when the attribute changes.
init: True to call watcher initialization.
"""
if not hasattr(obj, "__watchers"):
setattr(obj, "__watchers", {})
watchers: dict[str, list[tuple[Reactable, WatchCallbackType]]] = getattr(
obj, "__watchers"
)
watcher_list = watchers.setdefault(attribute_name, [])
if any(callback == callback_from_list for _, callback_from_list in watcher_list):
return
if init:
current_value = getattr(obj, attribute_name, None)
invoke_watcher(obj, callback, current_value, current_value)
watcher_list.append((node, callback)) |
Measure a rich renderable.
Args:
console: A console object.
renderable: Rich renderable.
default: Default width to use if renderable does not expose dimensions.
container_width: Width of container or None to use console width.
Returns:
Width in cells | def measure(
console: Console,
renderable: RenderableType,
default: int,
*,
container_width: int | None = None,
) -> int:
"""Measure a rich renderable.
Args:
console: A console object.
renderable: Rich renderable.
default: Default width to use if renderable does not expose dimensions.
container_width: Width of container or None to use console width.
Returns:
Width in cells
"""
if isinstance(renderable, str):
return cell_len(renderable)
width = default
renderable = rich_cast(renderable)
get_console_width = getattr(renderable, "__rich_measure__", None)
if get_console_width is not None:
render_width = get_console_width(
console,
(
console.options
if container_width is None
else console.options.update_width(container_width)
),
).maximum
width = max(0, render_width)
return width |
Get the line length (total length of all segments).
Args:
segments: Iterable of segments.
Returns:
Length of line in cells. | def get_line_length(segments: Iterable[Segment]) -> int:
"""Get the line length (total length of all segments).
Args:
segments: Iterable of segments.
Returns:
Length of line in cells.
"""
_cell_len = cell_len
return sum([_cell_len(text) for text, _, control in segments if not control]) |
Returns a close match of `word` amongst `possible_words`.
Args:
word: The word we want to find a close match for
possible_words: The words amongst which we want to find a close match
Returns:
The closest match amongst the `possible_words`. Returns `None` if no close matches could be found.
Example: returns "red" for word "redu" and possible words ("yellow", "red") | def get_suggestion(word: str, possible_words: Sequence[str]) -> str | None:
"""
Returns a close match of `word` amongst `possible_words`.
Args:
word: The word we want to find a close match for
possible_words: The words amongst which we want to find a close match
Returns:
The closest match amongst the `possible_words`. Returns `None` if no close matches could be found.
Example: returns "red" for word "redu" and possible words ("yellow", "red")
"""
possible_matches = get_close_matches(word, possible_words, n=1)
return None if not possible_matches else possible_matches[0] |
Returns a list of up to `count` matches of `word` amongst `possible_words`.
Args:
word: The word we want to find a close match for
possible_words: The words amongst which we want to find close matches
Returns:
The closest matches amongst the `possible_words`, from the closest to the least close.
Returns an empty list if no close matches could be found.
Example: returns ["yellow", "ellow"] for word "yllow" and possible words ("yellow", "red", "ellow") | def get_suggestions(word: str, possible_words: Sequence[str], count: int) -> list[str]:
"""
Returns a list of up to `count` matches of `word` amongst `possible_words`.
Args:
word: The word we want to find a close match for
possible_words: The words amongst which we want to find close matches
Returns:
The closest matches amongst the `possible_words`, from the closest to the least close.
Returns an empty list if no close matches could be found.
Example: returns ["yellow", "ellow"] for word "yllow" and possible words ("yellow", "red", "ellow")
"""
return get_close_matches(word, possible_words, n=count) |
Walk the tree depth first (parents first).
!!! note
Avoid changing the DOM (mounting, removing etc.) while iterating with this function.
Consider [walk_children][textual.dom.DOMNode.walk_children] which doesn't have this limitation.
Args:
root: The root note (starting point).
filter_type: Optional DOMNode subclass to filter by, or ``None`` for no filter.
with_root: Include the root in the walk.
Returns:
An iterable of DOMNodes, or the type specified in ``filter_type``. | def walk_depth_first(
root: DOMNode,
filter_type: type[WalkType] | None = None,
*,
with_root: bool = True,
) -> Iterable[DOMNode] | Iterable[WalkType]:
"""Walk the tree depth first (parents first).
!!! note
Avoid changing the DOM (mounting, removing etc.) while iterating with this function.
Consider [walk_children][textual.dom.DOMNode.walk_children] which doesn't have this limitation.
Args:
root: The root note (starting point).
filter_type: Optional DOMNode subclass to filter by, or ``None`` for no filter.
with_root: Include the root in the walk.
Returns:
An iterable of DOMNodes, or the type specified in ``filter_type``.
"""
from textual.dom import DOMNode
stack: list[Iterator[DOMNode]] = [iter(root.children)]
pop = stack.pop
push = stack.append
check_type = filter_type or DOMNode
if with_root and isinstance(root, check_type):
yield root
while stack:
node = next(stack[-1], None)
if node is None:
pop()
else:
if isinstance(node, check_type):
yield node
if node.children:
push(iter(node.children)) |
Walk the tree breadth first (children first).
!!! note
Avoid changing the DOM (mounting, removing etc.) while iterating with this function.
Consider [walk_children][textual.dom.DOMNode.walk_children] which doesn't have this limitation.
Args:
root: The root note (starting point).
filter_type: Optional DOMNode subclass to filter by, or ``None`` for no filter.
with_root: Include the root in the walk.
Returns:
An iterable of DOMNodes, or the type specified in ``filter_type``. | def walk_breadth_first(
root: DOMNode,
filter_type: type[WalkType] | None = None,
*,
with_root: bool = True,
) -> Iterable[DOMNode] | Iterable[WalkType]:
"""Walk the tree breadth first (children first).
!!! note
Avoid changing the DOM (mounting, removing etc.) while iterating with this function.
Consider [walk_children][textual.dom.DOMNode.walk_children] which doesn't have this limitation.
Args:
root: The root note (starting point).
filter_type: Optional DOMNode subclass to filter by, or ``None`` for no filter.
with_root: Include the root in the walk.
Returns:
An iterable of DOMNodes, or the type specified in ``filter_type``.
"""
from textual.dom import DOMNode
queue: deque[DOMNode] = deque()
popleft = queue.popleft
extend = queue.extend
check_type = filter_type or DOMNode
if with_root and isinstance(root, check_type):
yield root
extend(root.children)
while queue:
node = popleft()
if isinstance(node, check_type):
yield node
extend(node.children) |
Get the currently active worker.
Raises:
NoActiveWorker: If there is no active worker.
Returns:
A Worker instance. | def get_current_worker() -> Worker:
"""Get the currently active worker.
Raises:
NoActiveWorker: If there is no active worker.
Returns:
A Worker instance.
"""
try:
return active_worker.get()
except LookupError:
raise NoActiveWorker(
"There is no active worker in this task or thread."
) from None |
Organize widgets into layers.
Args:
widgets: The widgets.
Returns:
A mapping of layer name onto the widgets within the layer. | def _build_dock_layers(widgets: Iterable[Widget]) -> Mapping[str, Sequence[Widget]]:
"""Organize widgets into layers.
Args:
widgets: The widgets.
Returns:
A mapping of layer name onto the widgets within the layer.
"""
layers: defaultdict[str, list[Widget]] = defaultdict(list)
for widget in widgets:
layers[widget.layer].append(widget)
return layers |
Arrange widgets by applying docks and calling layouts
Args:
widget: The parent (container) widget.
size: The size of the available area.
viewport: The size of the viewport (terminal).
Returns:
Widget arrangement information. | def arrange(
widget: Widget, children: Sequence[Widget], size: Size, viewport: Size
) -> DockArrangeResult:
"""Arrange widgets by applying docks and calling layouts
Args:
widget: The parent (container) widget.
size: The size of the available area.
viewport: The size of the viewport (terminal).
Returns:
Widget arrangement information.
"""
placements: list[WidgetPlacement] = []
scroll_spacing = Spacing()
get_dock = attrgetter("styles.dock")
styles = widget.styles
# Widgets which will be displayed
display_widgets = [child for child in children if child.styles.display != "none"]
# Widgets organized into layers
dock_layers = _build_dock_layers(display_widgets)
layer_region = size.region
for widgets in dock_layers.values():
region = layer_region
# Partition widgets into "layout" widgets (those that appears in the normal 'flow' of the
# document), and "dock" widgets which are positioned relative to an edge
layout_widgets, dock_widgets = partition(get_dock, widgets)
# Arrange docked widgets
_dock_placements, dock_spacing = _arrange_dock_widgets(
dock_widgets, size, viewport
)
placements.extend(_dock_placements)
# Reduce the region to compensate for docked widgets
region = region.shrink(dock_spacing)
if layout_widgets:
# Arrange layout widgets (i.e. not docked)
layout_placements = widget._layout.arrange(
widget,
layout_widgets,
region.size,
)
scroll_spacing = scroll_spacing.grow_maximum(dock_spacing)
placement_offset = region.offset
# Perform any alignment of the widgets.
if styles.align_horizontal != "left" or styles.align_vertical != "top":
bounding_region = WidgetPlacement.get_bounds(layout_placements)
placement_offset += styles._align_size(
bounding_region.size, region.size
).clamped
if placement_offset:
# Translate placements if required.
layout_placements = WidgetPlacement.translate(
layout_placements, placement_offset
)
placements.extend(layout_placements)
return DockArrangeResult(placements, set(display_widgets), scroll_spacing) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.