yabramuvdi commited on
Commit
9a72c69
·
verified ·
1 Parent(s): ac72c21

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +71 -42
app.py CHANGED
@@ -13,9 +13,6 @@ AVAILABLE_MODELS = {
13
  "pythia-160m": "EleutherAI/pythia-160m"
14
  }
15
 
16
- # Access token for Hugging Face
17
- HF_TOKEN = os.getenv('HF_TOKEN')
18
-
19
  # Initialize model and tokenizer globally
20
  current_model = None
21
  current_tokenizer = None
@@ -24,8 +21,8 @@ current_model_name = None
24
  def load_model(model_name):
25
  global current_model, current_tokenizer, current_model_name
26
  if current_model_name != model_name:
27
- current_model = AutoModelForCausalLM.from_pretrained(AVAILABLE_MODELS[model_name], use_auth_token=HF_TOKEN)
28
- current_tokenizer = AutoTokenizer.from_pretrained(AVAILABLE_MODELS[model_name], use_auth_token=HF_TOKEN)
29
  current_model_name = model_name
30
 
31
  def get_next_token_predictions(text, model_name, top_k=10):
@@ -58,43 +55,75 @@ def predict_next_token(text, model_name, custom_token=""):
58
  # Format predictions
59
  predictions = "\n".join([f"'{token}' : {prob:.4f}" for token, prob in zip(tokens, probs)])
60
 
61
- return text, gr.Dropdown(choices=[f"'{t}'" for t in tokens]), predictions
62
-
63
- # Page content
64
- title = "Interactive Text Generation with Transformer Models"
65
- description = """
66
- This application allows you to interactively generate text using various transformer models.
67
- You can either select from the predicted next tokens or write your own tokens to continue the text generation.
68
-
69
- Select a model, start typing or choose from the predicted tokens, and see how the model continues your text!
70
- """
71
-
72
- # Example inputs
73
- examples = [
74
- ["The quick brown fox", "distilgpt2"],
75
- ["In a galaxy far", "gpt2-medium"],
76
- ["Once upon a time", "opt-350m"],
77
- ]
78
 
79
  # Create the interface
80
- app = gr.Interface(
81
- fn=predict_next_token,
82
- inputs=[
83
- gr.Textbox(lines=5, label="Text"),
84
- gr.Dropdown(choices=list(AVAILABLE_MODELS.keys()), value="distilgpt2", label="Model"),
85
- gr.Textbox(label="Custom token (optional)")
86
- ],
87
- outputs=[
88
- gr.Textbox(lines=5, label="Generated text"),
89
- gr.Dropdown(label="Predicted tokens"),
90
- gr.Textbox(lines=10, label="Token probabilities")
91
- ],
92
- theme="huggingface",
93
- title=title,
94
- description=description,
95
- examples=examples,
96
- allow_flagging="manual"
97
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
98
 
99
- # Launch the app
100
- app.launch()
 
13
  "pythia-160m": "EleutherAI/pythia-160m"
14
  }
15
 
 
 
 
16
  # Initialize model and tokenizer globally
17
  current_model = None
18
  current_tokenizer = None
 
21
  def load_model(model_name):
22
  global current_model, current_tokenizer, current_model_name
23
  if current_model_name != model_name:
24
+ current_model = AutoModelForCausalLM.from_pretrained(AVAILABLE_MODELS[model_name])
25
+ current_tokenizer = AutoTokenizer.from_pretrained(AVAILABLE_MODELS[model_name])
26
  current_model_name = model_name
27
 
28
  def get_next_token_predictions(text, model_name, top_k=10):
 
55
  # Format predictions
56
  predictions = "\n".join([f"'{token}' : {prob:.4f}" for token, prob in zip(tokens, probs)])
57
 
58
+ return text, gr.update(choices=[f"'{t}'" for t in tokens]), predictions
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
 
60
  # Create the interface
61
+ with gr.Blocks() as demo:
62
+ gr.Markdown("# Interactive Text Generation with Transformer Models")
63
+
64
+ gr.Markdown("""
65
+ This application allows you to interactively generate text using various transformer models.
66
+ You can either select from the predicted next tokens or write your own tokens to continue the text generation.
67
+
68
+ Select a model, start typing or choose from the predicted tokens, and see how the model continues your text!
69
+ """)
70
+
71
+ with gr.Row():
72
+ text_input = gr.Textbox(
73
+ lines=5,
74
+ label="Text",
75
+ placeholder="Type your text here...",
76
+ value="The quick brown fox"
77
+ )
78
+
79
+ with gr.Row():
80
+ model_dropdown = gr.Dropdown(
81
+ choices=list(AVAILABLE_MODELS.keys()),
82
+ value="distilgpt2",
83
+ label="Select Model"
84
+ )
85
+
86
+ with gr.Row():
87
+ custom_input = gr.Textbox(
88
+ label="Custom token (optional)",
89
+ placeholder="Type a custom token..."
90
+ )
91
+
92
+ with gr.Row():
93
+ token_dropdown = gr.Dropdown(
94
+ label="Predicted tokens",
95
+ choices=[]
96
+ )
97
+
98
+ with gr.Row():
99
+ predictions_output = gr.Textbox(
100
+ lines=10,
101
+ label="Token probabilities"
102
+ )
103
+
104
+ # Set up event handlers
105
+ text_input.change(
106
+ predict_next_token,
107
+ inputs=[text_input, model_dropdown, custom_input],
108
+ outputs=[text_input, token_dropdown, predictions_output]
109
+ )
110
+
111
+ model_dropdown.change(
112
+ predict_next_token,
113
+ inputs=[text_input, model_dropdown, custom_input],
114
+ outputs=[text_input, token_dropdown, predictions_output]
115
+ )
116
+
117
+ custom_input.change(
118
+ predict_next_token,
119
+ inputs=[text_input, model_dropdown, custom_input],
120
+ outputs=[text_input, token_dropdown, predictions_output]
121
+ )
122
+
123
+ token_dropdown.change(
124
+ predict_next_token,
125
+ inputs=[text_input, model_dropdown, gr.Textbox(value="")],
126
+ outputs=[text_input, token_dropdown, predictions_output]
127
+ )
128
 
129
+ demo.queue().launch()