|
import gradio as gr |
|
import pandas as pd |
|
import numpy as np |
|
import gradio as gr |
|
import random |
|
|
|
def flip_text(x): |
|
return x[::-1] |
|
|
|
def flip_image(x): |
|
return np.fliplr(x) |
|
|
|
|
|
df = pd.DataFrame({ |
|
'Year': np.random.randint(2000, 2024, 25), |
|
'Reviews': np.random.randint(120, 320, 25), |
|
'age': np.random.randint(18, 30, 25), |
|
'ethnicity': [random.choice(["white", "black", "asian"]) for _ in range(25)] |
|
}) |
|
|
|
theme = gr.themes.Soft( |
|
primary_hue="yellow", |
|
secondary_hue="amber", |
|
spacing_size="sm", |
|
radius_size="lg", |
|
|
|
) |
|
|
|
with gr.Blocks(theme=theme) as demo: |
|
|
|
gr.ScatterPlot(df, x="Reviews", y="age", color="age") |
|
gr.LinePlot(df, x="Year", y="Reviews") |
|
gr.Slider(2000, 2024, value=2024, label="Count", info="Choose between 2000 and 2024"), |
|
gr.Markdown("Flip text or image files using this demo.") |
|
with gr.Tab("User Interface"): |
|
text_input = gr.Textbox() |
|
text_output = gr.Textbox() |
|
text_button = gr.Button("Flip") |
|
with gr.Tab("Testing Area"): |
|
with gr.Row(): |
|
image_input = gr.Image() |
|
image_output = gr.Image() |
|
image_button = gr.Button("Flip") |
|
with gr.Row("Flip Text"): |
|
text_input = gr.Textbox() |
|
text_output = gr.Textbox() |
|
text_button = gr.Button("Flip") |
|
with gr.Column(visible=False) as output_col: |
|
text_input = gr.Textbox() |
|
text_output = gr.Textbox() |
|
text_button = gr.Button("Flip") |
|
|
|
|
|
with gr.Accordion("Open for More!", open=False): |
|
gr.Markdown("Look at me...") |
|
temp_slider = gr.Slider( |
|
0, 1, |
|
value=0.1, |
|
step=0.1, |
|
interactive=True, |
|
label="Slide me", |
|
) |
|
|
|
text_button.click(flip_text, inputs=text_input, outputs=text_output) |
|
image_button.click(flip_image, inputs=image_input, outputs=image_output) |
|
|
|
track_count = gr.State(1) |
|
add_track_btn = gr.Button("Add Track") |
|
|
|
add_track_btn.click(lambda count: count + 1, track_count, track_count) |
|
|
|
@gr.render(inputs=track_count) |
|
def render_tracks(count): |
|
audios = [] |
|
volumes = [] |
|
with gr.Row(): |
|
for i in range(count): |
|
with gr.Column(variant="panel", min_width=200): |
|
gr.Textbox(placeholder="Data Name", key=f"name-{i}", show_label=False) |
|
track_audio = gr.Audio(label=f"Data {i}", key=f"track-{i}") |
|
track_volume = gr.Slider(0, 100, value=100, label="Volume", key=f"volume-{i}") |
|
audios.append(track_audio) |
|
volumes.append(track_volume) |
|
|
|
def merge(data): |
|
sr, output = None, None |
|
for audio, volume in zip(audios, volumes): |
|
sr, audio_val = data[audio] |
|
volume_val = data[volume] |
|
final_track = audio_val * (volume_val / 100) |
|
if output is None: |
|
output = final_track |
|
else: |
|
min_shape = tuple(min(s1, s2) for s1, s2 in zip(output.shape, final_track.shape)) |
|
trimmed_output = output[:min_shape[0], ...][:, :min_shape[1], ...] if output.ndim > 1 else output[:min_shape[0]] |
|
trimmed_final = final_track[:min_shape[0], ...][:, :min_shape[1], ...] if final_track.ndim > 1 else final_track[:min_shape[0]] |
|
output += trimmed_output + trimmed_final |
|
return (sr, output) |
|
|
|
merge_btn.click(merge, set(audios + volumes), output_audio) |
|
|
|
merge_btn = gr.Button("Merge Tracks") |
|
output_audio = gr.Audio(label="Output", interactive=False) |
|
|
|
|
|
|
|
demo.launch() |