YANGSongsong commited on
Commit
b662be2
·
verified ·
1 Parent(s): 9ec9253

Gemma chat demo firstcommit

Browse files
Files changed (5) hide show
  1. App.py +135 -0
  2. README.md +11 -11
  3. chatAI.svg +37 -0
  4. requirements.txt +2 -0
  5. user.png +0 -0
App.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+
3
+ import gradio as gr
4
+ from huggingface_hub import InferenceClient
5
+
6
+ client = InferenceClient("google/gemma-2b-it")
7
+ isShowName = False
8
+
9
+
10
+ def format_prompt(message, history):
11
+ prompt = ""
12
+ if history:
13
+ for user_prompt, bot_response in history:
14
+ prompt += f"<start_of_turn>user{user_prompt}<end_of_turn>"
15
+ prompt += f"<start_of_turn>model{bot_response}"
16
+ prompt += f"<start_of_turn>user{message}<end_of_turn><start_of_turn>model"
17
+ return prompt
18
+
19
+
20
+ def generate(prompt, history, temperature=0.7, max_new_tokens=1024, top_p=0.90, repetition_penalty=0.9):
21
+ temperature = float(temperature)
22
+ if temperature < 1e-2:
23
+ temperature = 1e-2
24
+ top_p = float(top_p)
25
+
26
+ if not history:
27
+ history = []
28
+
29
+ rand_seed = random.randint(1, 1111111111111111)
30
+
31
+ generate_kwargs = dict(
32
+ temperature=temperature,
33
+ max_new_tokens=max_new_tokens,
34
+ top_p=top_p,
35
+ repetition_penalty=repetition_penalty,
36
+ do_sample=True,
37
+ seed=rand_seed,
38
+ )
39
+
40
+ formatted_prompt = format_prompt(prompt, history)
41
+
42
+ stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True,
43
+ return_full_text=False)
44
+ output = ""
45
+
46
+ for response in stream:
47
+ output += response.token.text
48
+ yield output
49
+ history.append((prompt, output))
50
+ return output
51
+
52
+
53
+ def upload_file(inputs):
54
+ if not inputs:
55
+ return "No files provided", ""
56
+
57
+ file = inputs[0] # Assuming you want to process only the first file if multiple are provided
58
+ print(f'FilePath:{inputs}')
59
+ filename = "D:/Develop/Python/GemmaChat/test.txt"
60
+ file_content = ""
61
+ if filename.endswith('.txt'):
62
+ try:
63
+ with open(filename, 'r', encoding='utf-8') as file:
64
+ file_content = file.read()
65
+ except FileNotFoundError:
66
+ return f"File not found: {filename}", ""
67
+ except PermissionError:
68
+ return f"Permission error: {filename}", ""
69
+ except FileNotFoundError:
70
+ return f"FileNotFoundError error: {filename}", ""
71
+ except UnicodeDecodeError:
72
+ return f"Error decoding file: {filename}. Please check the file encoding.", ""
73
+
74
+ return filename, file_content
75
+
76
+
77
+ mychatbot = gr.Chatbot(
78
+ avatar_images=["./user.png", "./chatAI.svg"], bubble_full_width=False, show_label=False, show_copy_button=True,
79
+ likeable=True, )
80
+
81
+ additional_inputs = [
82
+ gr.Slider(
83
+ label="Temperature",
84
+ value=0.7,
85
+ minimum=0.0,
86
+ maximum=1.0,
87
+ step=0.01,
88
+ interactive=True,
89
+ info="更高的价值产生更多样化的产出",
90
+ ),
91
+ gr.Slider(
92
+ label="Max new tokens",
93
+ value=6400,
94
+ minimum=0,
95
+ maximum=8000,
96
+ step=64,
97
+ interactive=True,
98
+ info="新令牌的最大数量",
99
+ ),
100
+ gr.Slider(
101
+ label="Top-p",
102
+ value=0.90,
103
+ minimum=0.0,
104
+ maximum=1,
105
+ step=0.01,
106
+ interactive=True,
107
+ info="较高的值代表更多的低概率标记",
108
+ ),
109
+ gr.Slider(
110
+ label="Repetition penalty",
111
+ value=1.0,
112
+ minimum=0.1,
113
+ maximum=2.0,
114
+ step=0.1,
115
+ interactive=True,
116
+ info="对重复令牌进行处罚",
117
+ )
118
+ ]
119
+
120
+ iface = gr.ChatInterface(fn=generate,
121
+ chatbot=mychatbot,
122
+ additional_inputs=additional_inputs,
123
+ retry_btn=None,
124
+ undo_btn=None
125
+ )
126
+
127
+ with gr.Blocks() as demo:
128
+ gr.HTML("<center><h1>Yang's Chat with Google's Gemma</h1></center>")
129
+ iface.render()
130
+ with gr.Tab(label="Text"):
131
+ upload_button = gr.File(label="File")
132
+ with gr.Tab(label="Image"):
133
+ image = gr.Image(label="图片")
134
+
135
+ demo.queue().launch(show_api=False)
README.md CHANGED
@@ -1,13 +1,13 @@
1
- ---
2
- title: GemmaChat
3
- emoji: 📚
4
- colorFrom: gray
5
- colorTo: green
6
  sdk: gradio
7
  sdk_version: 4.19.2
8
- app_file: app.py
9
- pinned: false
10
- license: apache-2.0
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
1
+ title: Gemma Chat
2
+ emoji:
3
+ colorFrom: green
4
+ colorTo: purple
 
5
  sdk: gradio
6
  sdk_version: 4.19.2
7
+ models:
8
+ - google/gemma-2b-it
9
+ - google/gemma-2b
10
+ - google/gemma-7b-it
11
+ - google/gemma-7b
12
+ app_file: app.py
13
+ pinned: false
chatAI.svg ADDED
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ gradio~=4.19.1
2
+ huggingface_hub~=0.20.3
user.png ADDED