Spaces:
Sleeping
Sleeping
File size: 5,169 Bytes
125809c 792da40 125809c 45ee4ac 125809c 792da40 125809c 3d84af1 125809c 3d84af1 125809c 3d84af1 125809c 3d84af1 125809c 2b51aaa 7b0cd17 2b51aaa 7b0cd17 125809c 2b51aaa 125809c 2b51aaa 3d84af1 2b51aaa 3d84af1 2b51aaa 792da40 2b51aaa 2836de6 2b51aaa 2836de6 2b51aaa 2836de6 2b51aaa 7b0cd17 2b51aaa 7b0cd17 2b51aaa 7b0cd17 2b51aaa 2836de6 2b51aaa |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 |
# import gradio as gr
# import os
# HF_TOKEN = os.getenv('HW_Token')
# hf_writer = gr.HuggingFaceDatasetSaver(HF_TOKEN, "save_audio")
# import gradio as gr
# import os
# class TextFileReader:
# def __init__(self):
# self.lines = []
# self.current_index = 0
# def read_lines(self, file):
# self.lines = file.decode('utf-8').splitlines()
# self.current_index = 0
# return self.get_current_line()
# def get_current_line(self):
# if 0 <= self.current_index < len(self.lines):
# return self.lines[self.current_index]
# else:
# return "End of file reached."
# def forward_line(self):
# self.current_index = min(self.current_index + 1, len(self.lines) - 1)
# return self.get_current_line()
# def backward_line(self):
# self.current_index = max(self.current_index - 1, 0)
# return self.get_current_line()
# reader = TextFileReader()
# # Define a function to save the text lines to a file
# def save_text_lines(file):
# lines = reader.read_lines(file)
# with open("text_lines.txt", "w") as f:
# f.write("\n".join(reader.lines))
# return lines
# # Define a function to save the audio file and corresponding text
# def save_audio_text(audio, text):
# if not os.path.exists("recordings"):
# os.makedirs("/recordings")
# # Debugging to print out the structure of the audio variable
# print("Received audio data:", audio)
# # Check if audio is a dictionary and contains 'data'
# if isinstance(audio, dict) and 'data' in audio:
# audio_data = audio['data']
# audio_path = f"/recordings/line_{reader.current_index}.wav"
# text_path = f"/recordings/line_{reader.current_index}.txt"
# with open(audio_path, "wb") as f:
# f.write(audio_data)
# with open(text_path, "w") as f:
# f.write(text)
# # Move to the next line after saving
# next_line = reader.forward_line()
# return next_line
# else:
# return "Audio data is not in the expected format."
# # Define the Gradio interface
# with gr.Blocks() as demo:
# with gr.Row():
# file_upload = gr.File(label="Upload a text file", type="binary")
# generate_button = gr.Button("Generate Lines")
# current_line = gr.Textbox(label="Current Line")
# def update_output(file):
# lines = reader.read_lines(file)
# save_text_lines(file) # Save the text lines to a file
# return lines
# generate_button.click(fn=update_output, inputs=file_upload, outputs=current_line)
# with gr.Row():
# audio_record = gr.Audio(sources=["microphone","upload"], type="filepath")
# save_button = gr.Button("Save Audio and Next Line")
# save_button.click(fn=save_audio_text, inputs=[audio_record, current_line], outputs=current_line)
# demo.launch()
import gradio as gr
def calculator(num1, operation, num2):
if operation == "add":
return num1 + num2
elif operation == "subtract":
return num1 - num2
elif operation == "multiply":
return num1 * num2
elif operation == "divide":
return num1 / num2
iface = gr.Interface(
calculator,
["number", gr.Radio(["add", "subtract", "multiply", "divide"]), "number"],
"number",
allow_flagging="manual",
flagging_options=["correct", "wrong"]
)
iface.launch()
import os
HF_TOKEN = os.getenv('HF_TOKEN')
hf_writer = gr.HuggingFaceDatasetSaver(HF_TOKEN, "crowdsourced")
iface = gr.Interface(
calculator,
["number", gr.Radio(["add", "subtract", "multiply", "divide"]), "number"],
"number",
description="Check out the crowd-sourced dataset at: [https://huggingface.co/Sajjo/crowdsourced](https://huggingface.co/Sajjo/crowdsourced)",
allow_flagging="manual",
flagging_options=["wrong sign", "off by one", "other"],
flagging_callback=hf_writer
)
iface.launch()
# import numpy as np
# import gradio as gr
# def sepia(input_img, strength):
# sepia_filter = strength * np.array(
# [[0.393, 0.769, 0.189], [0.349, 0.686, 0.168], [0.272, 0.534, 0.131]]
# ) + (1-strength) * np.identity(3)
# sepia_img = input_img.dot(sepia_filter.T)
# sepia_img /= sepia_img.max()
# return sepia_img
# callback = gr.CSVLogger()
# with gr.Blocks() as demo:
# with gr.Row():
# with gr.Column():
# img_input = gr.Image()
# strength = gr.Slider(0, 1, 0.5)
# img_output = gr.Image()
# with gr.Row():
# btn = gr.Button("Flag")
# # This needs to be called at some point prior to the first call to callback.flag()
# callback.setup([img_input, strength, img_output], "flagged_data_points")
# img_input.change(sepia, [img_input, strength], img_output)
# strength.change(sepia, [img_input, strength], img_output)
# # We can choose which components to flag -- in this case, we'll flag all of them
# btn.click(lambda *args: callback.flag(args), [img_input, strength, img_output], None, preprocess=False)
# demo.launch()
|