Spaces:
Runtime error
Runtime error
File size: 3,126 Bytes
9c0da9e d74ddfc 6d607e0 b3758b8 163f1eb 3400476 9c58f91 3400476 b07d07c d74ddfc 6d607e0 e71614a 9c0da9e b07d07c 6901dc5 b07d07c e71614a d74ddfc 44fed0e d74ddfc 3400476 27731ec cefb8d8 2f228d6 3324dec 2f228d6 cefb8d8 3400476 cefb8d8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 |
import spaces
import gradio as gr
from transformers import pipeline
import os
import torch
title = """# 🙋🏻♂️Welcome to🌟Tonic's Nexus🐦⬛Raven"""
description = """You can build with this endpoint using Nexus Raven. The demo is still a work in progress but we hope to add some endpoints for commonly used functions such as intention mappers and audiobook processing.
You can also use Nexus🐦⬛Raven on your laptop & by cloning this space. 🧬🔬🔍 Simply click here: <a style="display:inline-block" href="https://huggingface.co/spaces/Tonic1/NexusRaven2?duplicate=true"><img src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a></h3>
Join us : 🌟TeamTonic🌟 is always making cool demos! Join our active builder's🛠️community 👻 [](https://discord.gg/GWpVpekp) On 🤗Huggingface: [TeamTonic](https://huggingface.co/TeamTonic) & [MultiTransformer](https://huggingface.co/MultiTransformer) On 🌐Github: [Tonic-AI](https://github.com/tonic-ai) & contribute to 🌟 [DataTonic](https://github.com/Tonic-AI/DataTonic) 🤗Big thanks to Yuvi Sharma and all the folks at huggingface for the community grant 🤗
"""
raven_pipeline = pipeline(
"text-generation",
model="Nexusflow/NexusRaven-V2-13B",
torch_dtype="auto",
device_map="auto",
)
@spaces.GPU
def process_text(input_text: str) -> str:
prompt = f"User Query: {input_text}<human_end>"
result = raven_pipeline(prompt, "stop" : ["<bot_end>"], max_new_tokens=2000, return_full_text=False, do_sample=False)[0]["generated_text"]
# torch.cuda.empty_cache()
return result
def create_interface():
with gr.Blocks() as app:
gr.Markdown(title)
gr.Markdown(description)
with gr.Row():
input_text = gr.Textbox(label="Input Text")
submit_button = gr.Button("Submit")
output_text = gr.Textbox(label="Nexus🐦⬛Raven")
submit_button.click(converter.process_text, inputs=input_text, outputs=output_text)
return app
def main():
with gr.Blocks() as demo:
gr.Markdown(title)
gr.Markdown(description)
input_text = gr.Code( language='json', label="Input your functions then your task :")
submit_button = gr.Button("Submit")
output_text = gr.Code( language='json' , label="Nexus🐦⬛Raven")
submit_button.click(process_text, inputs=input_text, outputs=output_text)
demo.launch()
if __name__ == "__main__":
main() |