sanjanatule commited on
Commit
3b55724
·
1 Parent(s): 2f9ffe4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -21
app.py CHANGED
@@ -1,14 +1,32 @@
1
  import torch
 
2
  import gradio as gr
3
- from utils import *
4
- from torch import nn
5
- import lightning.pytorch as pl
6
- from torch.nn import functional as F
7
 
8
- device = 'cuda' if torch.cuda.is_available() else 'cpu'
 
 
 
 
 
 
 
9
 
10
- HTML_TEMPLATE = """
11
- <style>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
  #app-header {
14
  text-align: center;
@@ -60,9 +78,9 @@ HTML_TEMPLATE = """
60
  <div class="artifact large"></div>
61
  <div class="artifact large"></div>
62
  <!-- Content -->
63
- <h1>GPT NEXT WORD GENERATOR</h1>
64
  <p>Generate dialogue for given some initial prompt for context.</p>
65
- <p>Model: GPT, Dataset: arxiv + book + cc, Parameter Count: 160M</p>
66
  """
67
 
68
  with gr.Blocks(theme=gr.themes.Glass(),css=".gradio-container {background: url('file=https://github.com/santule/ERA/assets/20509836/e78f2bb3-ddd8-4ce9-a941-3d3d7ef7a272')}") as interface:
@@ -89,22 +107,13 @@ with gr.Blocks(theme=gr.themes.Glass(),css=".gradio-container {background: url('
89
  value="Enter your prompt here: This text will set the context for the AI's response."
90
  )
91
 
92
- temperature_dropdown = gr.Slider(0, 1, value=0.8, label="Temperature", info="Set the creativity level: Higher values produce more varied results, lower values generate more predictable text.")
93
- top_k_dropdown = gr.Slider(200, 300, value=200, label="Top K", info="Control the randomness: Limits the AI to consider only the top K most likely next words.")
94
- max_new_tokens = gr.Slider(10, 100, value=50, label="Max Tokens", info="Choose the length: This determines the maximum number of words the AI will generate.")
95
-
96
-
97
  outputs = gr.Textbox(
98
- label="Generated Dialogue"
99
  )
100
- inputs = [input_text, temperature_dropdown, top_k_dropdown, max_new_tokens]
101
 
102
  with gr.Column():
103
- button = gr.Button("Generate")
104
  button.click(generate_dialogue, inputs=inputs, outputs=outputs)
105
 
106
- # with gr.Row():
107
- # gr.Examples(examples=examples, inputs=inputs, outputs=outputs, fn=generate_dialogue, cache_examples=True,)
108
-
109
-
110
  interface.launch()
 
1
  import torch
2
+ from transformers import pipeline, logging, AutoModelForCausalLM, AutoTokenizer
3
  import gradio as gr
 
 
 
 
4
 
5
+ ## 1 - Loading Model
6
+ model_name = "microsoft/phi-2"
7
+ model = AutoModelForCausalLM.from_pretrained(
8
+ model_name,
9
+ trust_remote_code=True,
10
+ device_map='auto',
11
+ )
12
+ model.config.use_cache = False
13
 
14
+ ## 2 - Loading Tokenizer
15
+ tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
16
+ tokenizer.pad_token = tokenizer.eos_token
17
+
18
+ ## 3 - Load adapter (trained LORA weights)
19
+ peft_model_folder = 'checkpoint700'
20
+ model.load_adapter(peft_model_folder)
21
+
22
+ def generate_dialogue(input_text):
23
+
24
+ pipe = pipeline(task="text-generation",model=model,tokenizer=tokenizer,max_length=200)
25
+ result = pipe(f"<s>[INST] {input_text} [/INST]")
26
+ return result[0]['generated_text']
27
+
28
+ HTML_TEMPLATE = """
29
+ <style>
30
 
31
  #app-header {
32
  text-align: center;
 
78
  <div class="artifact large"></div>
79
  <div class="artifact large"></div>
80
  <!-- Content -->
81
+ <h1>CHAT with fine tuned Phi-2 LLM</h1>
82
  <p>Generate dialogue for given some initial prompt for context.</p>
83
+ <p>Model: Phi-2 (https://huggingface.co/microsoft/phi-2), Dataset: oasst1 (https://huggingface.co/datasets/OpenAssistant/oasst1) </p>
84
  """
85
 
86
  with gr.Blocks(theme=gr.themes.Glass(),css=".gradio-container {background: url('file=https://github.com/santule/ERA/assets/20509836/e78f2bb3-ddd8-4ce9-a941-3d3d7ef7a272')}") as interface:
 
107
  value="Enter your prompt here: This text will set the context for the AI's response."
108
  )
109
 
 
 
 
 
 
110
  outputs = gr.Textbox(
111
+ label="Answer"
112
  )
113
+ inputs = [input_text]
114
 
115
  with gr.Column():
116
+ button = gr.Button("Ask me")
117
  button.click(generate_dialogue, inputs=inputs, outputs=outputs)
118
 
 
 
 
 
119
  interface.launch()