Sajjo's picture
Update app.py
2b51aaa verified
raw
history blame
5.17 kB
# import gradio as gr
# import os
# HF_TOKEN = os.getenv('HW_Token')
# hf_writer = gr.HuggingFaceDatasetSaver(HF_TOKEN, "save_audio")
# import gradio as gr
# import os
# class TextFileReader:
# def __init__(self):
# self.lines = []
# self.current_index = 0
# def read_lines(self, file):
# self.lines = file.decode('utf-8').splitlines()
# self.current_index = 0
# return self.get_current_line()
# def get_current_line(self):
# if 0 <= self.current_index < len(self.lines):
# return self.lines[self.current_index]
# else:
# return "End of file reached."
# def forward_line(self):
# self.current_index = min(self.current_index + 1, len(self.lines) - 1)
# return self.get_current_line()
# def backward_line(self):
# self.current_index = max(self.current_index - 1, 0)
# return self.get_current_line()
# reader = TextFileReader()
# # Define a function to save the text lines to a file
# def save_text_lines(file):
# lines = reader.read_lines(file)
# with open("text_lines.txt", "w") as f:
# f.write("\n".join(reader.lines))
# return lines
# # Define a function to save the audio file and corresponding text
# def save_audio_text(audio, text):
# if not os.path.exists("recordings"):
# os.makedirs("/recordings")
# # Debugging to print out the structure of the audio variable
# print("Received audio data:", audio)
# # Check if audio is a dictionary and contains 'data'
# if isinstance(audio, dict) and 'data' in audio:
# audio_data = audio['data']
# audio_path = f"/recordings/line_{reader.current_index}.wav"
# text_path = f"/recordings/line_{reader.current_index}.txt"
# with open(audio_path, "wb") as f:
# f.write(audio_data)
# with open(text_path, "w") as f:
# f.write(text)
# # Move to the next line after saving
# next_line = reader.forward_line()
# return next_line
# else:
# return "Audio data is not in the expected format."
# # Define the Gradio interface
# with gr.Blocks() as demo:
# with gr.Row():
# file_upload = gr.File(label="Upload a text file", type="binary")
# generate_button = gr.Button("Generate Lines")
# current_line = gr.Textbox(label="Current Line")
# def update_output(file):
# lines = reader.read_lines(file)
# save_text_lines(file) # Save the text lines to a file
# return lines
# generate_button.click(fn=update_output, inputs=file_upload, outputs=current_line)
# with gr.Row():
# audio_record = gr.Audio(sources=["microphone","upload"], type="filepath")
# save_button = gr.Button("Save Audio and Next Line")
# save_button.click(fn=save_audio_text, inputs=[audio_record, current_line], outputs=current_line)
# demo.launch()
import gradio as gr
def calculator(num1, operation, num2):
if operation == "add":
return num1 + num2
elif operation == "subtract":
return num1 - num2
elif operation == "multiply":
return num1 * num2
elif operation == "divide":
return num1 / num2
iface = gr.Interface(
calculator,
["number", gr.Radio(["add", "subtract", "multiply", "divide"]), "number"],
"number",
allow_flagging="manual",
flagging_options=["correct", "wrong"]
)
iface.launch()
import os
HF_TOKEN = os.getenv('HF_TOKEN')
hf_writer = gr.HuggingFaceDatasetSaver(HF_TOKEN, "crowdsourced")
iface = gr.Interface(
calculator,
["number", gr.Radio(["add", "subtract", "multiply", "divide"]), "number"],
"number",
description="Check out the crowd-sourced dataset at: [https://huggingface.co/Sajjo/crowdsourced](https://huggingface.co/Sajjo/crowdsourced)",
allow_flagging="manual",
flagging_options=["wrong sign", "off by one", "other"],
flagging_callback=hf_writer
)
iface.launch()
# import numpy as np
# import gradio as gr
# def sepia(input_img, strength):
# sepia_filter = strength * np.array(
# [[0.393, 0.769, 0.189], [0.349, 0.686, 0.168], [0.272, 0.534, 0.131]]
# ) + (1-strength) * np.identity(3)
# sepia_img = input_img.dot(sepia_filter.T)
# sepia_img /= sepia_img.max()
# return sepia_img
# callback = gr.CSVLogger()
# with gr.Blocks() as demo:
# with gr.Row():
# with gr.Column():
# img_input = gr.Image()
# strength = gr.Slider(0, 1, 0.5)
# img_output = gr.Image()
# with gr.Row():
# btn = gr.Button("Flag")
# # This needs to be called at some point prior to the first call to callback.flag()
# callback.setup([img_input, strength, img_output], "flagged_data_points")
# img_input.change(sepia, [img_input, strength], img_output)
# strength.change(sepia, [img_input, strength], img_output)
# # We can choose which components to flag -- in this case, we'll flag all of them
# btn.click(lambda *args: callback.flag(args), [img_input, strength, img_output], None, preprocess=False)
# demo.launch()