Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -298,7 +298,7 @@ with gr.Blocks(theme=gr.themes.Base(primary_hue="pink", secondary_hue="rose"), t
|
|
298 |
button_tts.click(text_to_speech_edge, inputs=[text_tts, dropdown_tts], outputs=[sound_gui])
|
299 |
|
300 |
with gr.Accordion("Settings", open=False):
|
301 |
-
pitch_algo_conf = gr.Radio(
|
302 |
with gr.Rowe(equal_height=True):
|
303 |
pitch_lvl_conf = gr.Slider(label="Pitch level (lower -> 'male' while higher -> 'female')",minimum=-24,maximum=24,step=1,value=0,visible=True,interactive=True,)
|
304 |
index_inf_conf = gr.Slider(minimum=0,maximum=1,label="Index influence -> How much accent is applied",value=0.75,)
|
@@ -485,37 +485,7 @@ with gr.Blocks(theme=gr.themes.Base(primary_hue="pink", secondary_hue="rose"), t
|
|
485 |
with gr.Accordion("Model Quantization", open=False):
|
486 |
gr.Markdown("Quantize the model to a lower precision. - soon™ or never™ 😎")
|
487 |
|
488 |
-
|
489 |
-
def json_to_markdown_table(json_data):
|
490 |
-
table = "| Key | Value |\n| --- | --- |\n"
|
491 |
-
for key, value in json_data.items():
|
492 |
-
table += f"| {key} | {value} |\n"
|
493 |
-
return table
|
494 |
-
gr.Markdown("View the models that are currently loaded in the instance.")
|
495 |
-
|
496 |
-
gr.Markdown(json_to_markdown_table({"Models": len(MODELS), "UVR Models": len(UVR_5_MODELS)}))
|
497 |
-
|
498 |
-
gr.Markdown("View the current status of the instance.")
|
499 |
-
status = {
|
500 |
-
"Status": "Running", # duh lol
|
501 |
-
"Models": len(MODELS),
|
502 |
-
"UVR Models": len(UVR_5_MODELS),
|
503 |
-
"CPU Usage": f"{psutil.cpu_percent()}%",
|
504 |
-
"RAM Usage": f"{psutil.virtual_memory().percent}%",
|
505 |
-
"CPU": f"{cpuinfo.get_cpu_info()['brand_raw']}",
|
506 |
-
"System Uptime": f"{round(time.time() - psutil.boot_time(), 2)} seconds",
|
507 |
-
"System Load Average": f"{psutil.getloadavg()}",
|
508 |
-
"====================": "====================",
|
509 |
-
"CPU Cores": psutil.cpu_count(),
|
510 |
-
"CPU Threads": psutil.cpu_count(logical=True),
|
511 |
-
"RAM Total": f"{round(psutil.virtual_memory().total / 1024**3, 2)} GB",
|
512 |
-
"RAM Used": f"{round(psutil.virtual_memory().used / 1024**3, 2)} GB",
|
513 |
-
"CPU Frequency": f"{psutil.cpu_freq().current} MHz",
|
514 |
-
"====================": "====================",
|
515 |
-
"GPU": "A100 - Do a request (Inference, you won't see it either way)",
|
516 |
-
}
|
517 |
-
gr.Markdown(json_to_markdown_table(status))
|
518 |
-
|
519 |
with gr.Tab("Credits"):
|
520 |
gr.Markdown(
|
521 |
"""
|
|
|
298 |
button_tts.click(text_to_speech_edge, inputs=[text_tts, dropdown_tts], outputs=[sound_gui])
|
299 |
|
300 |
with gr.Accordion("Settings", open=False):
|
301 |
+
pitch_algo_conf = gr.Radio(choices=PITCH_ALGO_OPT,value=PITCH_ALGO_OPT[4],label="Pitch algorithm",visible=True,interactive=True) # Dropdown is 🤡
|
302 |
with gr.Rowe(equal_height=True):
|
303 |
pitch_lvl_conf = gr.Slider(label="Pitch level (lower -> 'male' while higher -> 'female')",minimum=-24,maximum=24,step=1,value=0,visible=True,interactive=True,)
|
304 |
index_inf_conf = gr.Slider(minimum=0,maximum=1,label="Index influence -> How much accent is applied",value=0.75,)
|
|
|
485 |
with gr.Accordion("Model Quantization", open=False):
|
486 |
gr.Markdown("Quantize the model to a lower precision. - soon™ or never™ 😎")
|
487 |
|
488 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
489 |
with gr.Tab("Credits"):
|
490 |
gr.Markdown(
|
491 |
"""
|