File size: 2,709 Bytes
b495cef
30b6044
 
 
b495cef
ed44d2c
 
 
9cb0363
 
 
 
30b6044
9cb0363
30b6044
9cb0363
30b6044
 
 
 
 
 
 
 
60c7070
30b6044
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9cdc179
30b6044
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
import gradio as gr
import requests
import json
import os


#os.system(f"pip install torch torchvision")
os.system(f"pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu116")
os.system(f"pip install git+https://github.com/huggingface/transformers")
#os.system(f"git clone https://github.com/camenduru/stable-diffusion-webui /home/user/app/stable-diffusion-webui")


#Import Hugging Face's Transformers
from transformers import pipeline
# This is to log our outputs in a nicer format
from pprint import pprint

# from transformers import GPTJForCausalLM
# import torch

# model = GPTJForCausalLM.from_pretrained(
#     "EleutherAI/gpt-j-6B", revision="float16", torch_dtype=torch.float16, low_cpu_mem_usage=True
# )

generator = pipeline('text-generation', model='EleutherAI/gpt-neo-1.3B')

# from transformers import GPTJForCausalLM, AutoTokenizer
# import torch

# model = GPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B", torch_dtype=torch.float16, low_cpu_mem_usage=True)
# tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-j-6B")

# prompt = (
#     "In a shocking finding, scientists discovered a herd of unicorns living in a remote, "
#     "previously unexplored valley, in the Andes Mountains. Even more surprising to the "
#     "researchers was the fact that the unicorns spoke perfect English."
# )

# input_ids = tokenizer(prompt, return_tensors="pt").input_ids

# gen_tokens = model.generate(
#     input_ids,
#     do_sample=True,
#     temperature=0.9,
#     max_length=100,
# )
# gen_text = tokenizer.batch_decode(gen_tokens)[0]

def run(prompt, max_len, temp):
    min_len = 1
    output = generator(prompt, do_sample=True, min_length=min_len, max_length=max_len, temperature=temp)
    return (output[0]['generated_text'],"")
    
if __name__ == "__main__":
    demo = gr.Blocks()
    with demo:
        with gr.Row():
            with gr.Column():
                text = gr.Textbox(
                    label="Input",
                    value=" ",  # should be set to " " when plugged into a real API
                )
                tokens = gr.Slider(1, 250, value=50, step=1, label="Tokens to generate")
                temp = gr.Slider(0.1, 1.0, value=0.7, step=0.1, label="Temperature")

                with gr.Row():
                    submit = gr.Button("Submit")
            with gr.Column():
                text_error = gr.Markdown(label="Log information")
                text_out = gr.Textbox(label="Output")
        submit.click(
            run,
            inputs=[text, tokens, temp],
            outputs=[text_out, text_error],
        )

    demo.launch()

#gr.Interface.load("models/EleutherAI/gpt-j-6B").launch()