DrBenjamin commited on
Commit
51a8c11
1 Parent(s): 4c46b52

added files

Browse files
Files changed (3) hide show
  1. app.py +291 -0
  2. pages/💁‍ Open_Assistant.py +329 -0
  3. requirements.txt +17 -0
app.py ADDED
@@ -0,0 +1,291 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ##### `app.py`
2
+ ##### AI Demo, hosted on https://huggingface.co/spaces/DrBenjamin/OpenAI
3
+ ##### Please reach out to [email protected] for any questions
4
+ #### Loading needed Python libraries
5
+ import streamlit as st
6
+ import numpy as np
7
+ import audio2numpy as a2n
8
+ from pydub import AudioSegment
9
+ import cv2
10
+ from PIL import Image
11
+ import torch
12
+ from diffusers import StableDiffusionPipeline
13
+ from diffusers import StableDiffusionImg2ImgPipeline
14
+ from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler
15
+ from transformers import pipeline, set_seed
16
+ from transformers import VisionEncoderDecoderModel, ViTImageProcessor, AutoTokenizer
17
+ import os
18
+
19
+ os.environ['COMMANDLINE_ARGS'] = '--skip-torch-cuda-test --precision full --no-half'
20
+ os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
21
+
22
+
23
+ #### Functions
24
+ ### Function predict_step = Image to Text recognition
25
+ def predict_step(image):
26
+ if image.mode != "RGB":
27
+ image = image.convert(mode = "RGB")
28
+ pixel_values = feature_extractor(images = image, return_tensors = "pt").pixel_values
29
+ pixel_values = pixel_values.to(device)
30
+ output_ids = model.generate(pixel_values, **gen_kwargs)
31
+ preds = tokenizer.batch_decode(output_ids, skip_special_tokens = True)
32
+ preds = [pred.strip() for pred in preds]
33
+ return str(preds[0]).capitalize() + '.'
34
+
35
+
36
+ #### Models
37
+ st.header('🤗 🤗 Hugging Face Diffusers')
38
+ st.write('State-of-the-art diffusion models for image, text and audio generation in PyTorch.')
39
+ devices = ["mps", "cpu", "cuda"]
40
+ device = st.selectbox(label = 'Select device', options = devices, index = 1, disabled = True)
41
+ st.write(':orange[MPS for Mac (Metal Performance Shaders), CPU for all systems and CUDA for systems with NVIDIA GPU.]')
42
+ models = ["runwayml/stable-diffusion-v1-5", "stabilityai/stable-diffusion-2-1", "hakurei/waifu-diffusion", "stabilityai/stable-diffusion-2-base",
43
+ "nlpconnect/vit-gpt2-image-captioning", "openai-gpt", "gpt2-large", "openai/whisper-large-v2"]
44
+ model_id_or_path = st.selectbox(label = 'Select model', options = models, index = 5, disabled = True)
45
+ if model_id_or_path == "runwayml/stable-diffusion-v1-5":
46
+ st.write(':orange[Stable Diffusion v1-5 is the state of the art text-to-image model.]')
47
+ elif model_id_or_path == "stabilityai/stable-diffusion-2-1":
48
+ st.write(':orange[New stable diffusion text-to-image model at 768x768 resolution.]')
49
+ elif model_id_or_path == "stabilityai/stable-diffusion-2-base":
50
+ st.write(':orange[New stable diffusion text-to-image model at 512x512 resolution.]')
51
+ elif model_id_or_path == "hakurei/waifu-diffusion":
52
+ st.write(
53
+ ':orange[waifu-diffusion is a latent text-to-image diffusion model that has been conditioned on high-quality anime images through fine-tuning.]')
54
+ elif model_id_or_path == "nlpconnect/vit-gpt2-image-captioning":
55
+ st.write(':orange[vit-gpt2 is an image captioning model.]')
56
+ elif model_id_or_path == "openai-gpt":
57
+ st.write(
58
+ ':orange[openai-gpt is a transformer-based language model created and released by OpenAI. The model is a causal (unidirectional) transformer pre-trained using language modeling on a large corpus with long range dependencies.]')
59
+ elif model_id_or_path == "gpt2-large":
60
+ st.write(
61
+ ':orange[GPT-2 Large is the 774M parameter version of GPT-2, a transformer-based language model created and released by OpenAI. The model is a pretrained model on English language using a causal language modeling (CLM) objective.]')
62
+ elif model_id_or_path == "openai/whisper-large-v2":
63
+ st.write(':orange[Whisper is a pre-trained model for automatic speech recognition (ASR) and speech translation.]')
64
+
65
+ control_net_models = ["None", "lllyasviel/sd-controlnet-canny", "lllyasviel/sd-controlnet-scribble"]
66
+ if model_id_or_path == "runwayml/stable-diffusion-v1-5":
67
+ disable = False
68
+ else:
69
+ disable = True
70
+ control_net_model = st.selectbox(label = 'Select control net model', options = control_net_models, disabled = disable)
71
+ if control_net_model == "lllyasviel/sd-controlnet-canny":
72
+ st.write(
73
+ ':orange[ControlNet is a neural network structure to control diffusion models by adding extra conditions. This checkpoint corresponds to the ControlNet conditioned on Canny edges.]')
74
+ elif control_net_model == "lllyasviel/sd-controlnet-scribble":
75
+ st.write(
76
+ ':orange[ControlNet is a neural network structure to control diffusion models by adding extra conditions. This checkpoint corresponds to the ControlNet conditioned on Scribble images.]')
77
+ if model_id_or_path != "runwayml/stable-diffusion-v1-5":
78
+ control_net_model = "None"
79
+
80
+ #### Stable diffusion image 2 image with Control Net
81
+ if model_id_or_path == "runwayml/stable-diffusion-v1-5" and control_net_model != "None":
82
+ with st.form('img2img (Control Net)'):
83
+ st.subheader('Image 2 Image (Control Net)')
84
+ st.write('Create an image from text input with an image as template.')
85
+ image = ''
86
+ uploaded_file = st.file_uploader(label = "Upload a picture", type = 'png')
87
+ prompt = st.text_input(label = 'Prompt',
88
+ value = 'A picture in comic style, bright colours, a house with red bricks, a dark sky with a full yellow moon, best quality, extremely detailed.')
89
+ submitted = st.form_submit_button('Submit')
90
+ if submitted:
91
+ # Check for image data
92
+ if uploaded_file is not None:
93
+ image = cv2.imdecode(np.frombuffer(uploaded_file.getvalue(), np.uint8), cv2.COLOR_GRAY2BGR)
94
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
95
+
96
+ # Resize image if existend and not 768x640 / 640x768 pixel
97
+ h, w = image.shape
98
+ if not (h == 768 and w == 640) and not (h == 640 and w == 768):
99
+ # Image is bigger in height than width
100
+ if h > w:
101
+ # Resize cropped image to standard dimensions
102
+ image = cv2.resize(image, (640, 768), interpolation = cv2.INTER_AREA)
103
+
104
+ # Image is smaller in height than width
105
+ else:
106
+ # Resize cropped image to standard dimensions
107
+ image = cv2.resize(image, (768, 640), interpolation = cv2.INTER_AREA)
108
+
109
+ # Get canny image
110
+ image = cv2.Canny(image, 100, 200)
111
+ image = image[:, :, None]
112
+ image = np.concatenate([image, image, image], axis = 2)
113
+ canny_image = Image.fromarray(image)
114
+ st.subheader('Preview annotator result')
115
+ st.image(canny_image)
116
+
117
+ # Load control net and stable diffusion v1-5
118
+ controlnet = ControlNetModel.from_pretrained(control_net_model, torch_dtype = torch.float32)
119
+ pipe = StableDiffusionControlNetPipeline.from_pretrained(model_id_or_path, controlnet = controlnet, torch_dtype = torch.float32)
120
+ pipe = pipe.to(device)
121
+
122
+ # Recommended if your computer has < 64 GB of RAM
123
+ pipe.enable_attention_slicing()
124
+
125
+ # Speed up diffusion process with faster scheduler and memory optimization
126
+ pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
127
+
128
+ # Generate image
129
+ generator = torch.manual_seed(0)
130
+ image = pipe(prompt = prompt, negative_prompt = "monochrome, lowres, bad anatomy, worst quality, low quality", num_inference_steps = 30,
131
+ generator = generator, image = canny_image).images[0]
132
+ st.subheader('Diffuser result')
133
+ st.write('Model :orange[' + model_id_or_path + '] + :red[' + control_net_model + ']')
134
+ st.image(image)
135
+
136
+ ## Stable-Diffusion
137
+ if model_id_or_path == "runwayml/stable-diffusion-v1-5" and control_net_model == "None":
138
+ with st.form('img2img'):
139
+ st.subheader('Image 2 Image')
140
+ st.write('Create an image from text input with an image as template.')
141
+ image = ''
142
+ uploaded_file = st.file_uploader(label = "Upload a picture", type = 'png')
143
+ prompt = st.text_input(label = 'Prompt',
144
+ value = 'A picture in comic style, bright colours, a house with red bricks, a dark sky with a full yellow moon, best quality, extremely detailed.')
145
+ submitted = st.form_submit_button('Submit')
146
+ if submitted:
147
+ # Check for image data
148
+ if uploaded_file is not None:
149
+ image = cv2.imdecode(np.frombuffer(uploaded_file.getvalue(), np.uint8), cv2.IMREAD_COLOR)
150
+
151
+ # Resize image if existend and not 768x640 / 640x768 pixel
152
+ h, w, _ = image.shape
153
+ if not (h == 768 and w == 640) and not (h == 640 and w == 768):
154
+ # Image is bigger in height than width
155
+ if h > w:
156
+ # Resize cropped image to standard dimensions
157
+ image = cv2.resize(image, (640, 768), interpolation = cv2.INTER_AREA)
158
+
159
+ # Image is smaller in height than width
160
+ else:
161
+ # Resize cropped image to standard dimensions
162
+ image = cv2.resize(image, (768, 640), interpolation = cv2.INTER_AREA)
163
+ image = Image.fromarray(image)
164
+
165
+ # Load the pipeline
166
+ pipe = StableDiffusionImg2ImgPipeline.from_pretrained(model_id_or_path, torch_dtype = torch.float32)
167
+ pipe = pipe.to(device)
168
+
169
+ # Recommended if your computer has < 64 GB of RAM
170
+ pipe.enable_attention_slicing()
171
+
172
+ # Speed up diffusion process with faster scheduler and memory optimization
173
+ pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
174
+
175
+ # Create new image
176
+ images = pipe(prompt = prompt, negative_prompt = "monochrome, lowres, bad anatomy, worst quality, low quality", num_inference_steps = 30,
177
+ image = image, strength = 0.75, guidance_scale = 7.5).images
178
+
179
+ # Show image
180
+ st.subheader('Diffuser result')
181
+ st.write('Model :orange[' + model_id_or_path + ']')
182
+ st.image(images[0])
183
+
184
+ #### Stable diffusion txt 2 image
185
+ if control_net_model == "None" and model_id_or_path != "nlpconnect/vit-gpt2-image-captioning" and model_id_or_path != "openai-gpt" and model_id_or_path != "gpt2-large" and model_id_or_path != "openai/whisper-large-v2":
186
+ with st.form('txt2img'):
187
+ st.subheader('Text 2 Image')
188
+ st.write('Create an image from text input.')
189
+ if model_id_or_path == "runwayml/stable-diffusion-v1-5" or model_id_or_path == "stabilityai/stable-diffusion-2-1":
190
+ value = 'A picture in comic style, bright colours, a house with red bricks, a dark sky with a full yellow moon, best quality, extremely detailed.'
191
+ if model_id_or_path == "hakurei/waifu-diffusion":
192
+ value = 'A picture in Anime style, bright colours, a house with red bricks, a dark sky with a full yellow moon, best quality, extremely detailed.'
193
+ if model_id_or_path == "stabilityai/stable-diffusion-2-base":
194
+ value = 'A picture in comic style, a castle with grey bricks in the background, a river is going through, a blue sky with a full yellow sun, best quality, extremely detailed.'
195
+
196
+ prompt = st.text_input(label = 'Prompt', value = value)
197
+ submitted = st.form_submit_button('Submit')
198
+ if submitted:
199
+ # Make sure you're logged in with `huggingface-cli login`
200
+ pipe = StableDiffusionPipeline.from_pretrained(model_id_or_path)
201
+ pipe = pipe.to(device)
202
+
203
+ # Recommended if your computer has < 64 GB of RAM
204
+ pipe.enable_attention_slicing()
205
+
206
+ # Speed up diffusion process with faster scheduler and memory optimization
207
+ pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
208
+
209
+ # Results
210
+ if model_id_or_path == "hakurei/waifu-diffusion":
211
+ negative = "several scenes, more than one image, split picture"
212
+ else:
213
+ negative = "monochrome, lowres, bad anatomy, worst quality, low quality"
214
+ image = pipe(prompt = prompt, negative_prompt = negative, num_inference_steps = 30, guidance_scale = 7.5).images[0]
215
+ st.subheader('Diffuser result')
216
+ st.write('Model :orange[' + model_id_or_path + ']')
217
+ st.image(image)
218
+
219
+ #### Text (OpenAI gpt models)
220
+ if model_id_or_path == "openai-gpt" or model_id_or_path == "gpt2-large":
221
+ with st.form('GPT'):
222
+ st.subheader('Text generation')
223
+ st.write('Create text which is generated from text input.')
224
+ text_input = st.text_input(label = 'Give a start of a sentence', value = 'This is a test ')
225
+ submitted = st.form_submit_button('Submit')
226
+ if submitted:
227
+ generator = pipeline('text-generation', model = model_id_or_path)
228
+ set_seed(42)
229
+ generated = generator(text_input, max_length = 150, num_return_sequences = 1)
230
+ st.subheader('Diffuser result')
231
+ st.write('Model :orange[' + model_id_or_path + ']')
232
+ st.markdown('Text: ":green[' + str(generated[0]['generated_text']) + ']"')
233
+
234
+ #### Image to text
235
+ if model_id_or_path == "nlpconnect/vit-gpt2-image-captioning":
236
+ with st.form('Image2Text'):
237
+ st.subheader('Image 2 Text')
238
+ st.write('Create a description of an image.')
239
+ image = ''
240
+ uploaded_file = st.file_uploader(label = "Upload a picture", type = 'png')
241
+ submitted = st.form_submit_button('Submit')
242
+ if submitted:
243
+ # Check for image data
244
+ if uploaded_file is not None:
245
+ image = cv2.imdecode(np.frombuffer(uploaded_file.getvalue(), np.uint8), cv2.IMREAD_COLOR)
246
+ image = Image.fromarray(image)
247
+ model = VisionEncoderDecoderModel.from_pretrained(model_id_or_path)
248
+ feature_extractor = ViTImageProcessor.from_pretrained(model_id_or_path)
249
+ tokenizer = AutoTokenizer.from_pretrained(model_id_or_path)
250
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
251
+ model.to(device)
252
+ max_length = 16
253
+ num_beams = 4
254
+ gen_kwargs = {"max_length": max_length, "num_beams": num_beams}
255
+ output = predict_step(image)
256
+ st.subheader('Diffuser result')
257
+ st.write('Model :orange[nlpconnect/vit-gpt2-image-captioning]')
258
+ st.write('Description: ":green[' + str(output) + ']"')
259
+
260
+ #### Whisper Model
261
+ if model_id_or_path == "openai/whisper-large-v2":
262
+ with st.form('Image2Text'):
263
+ st.subheader('Audio 2 Text')
264
+ st.write('Create a transcription of an audio file.')
265
+ audio_file = st.file_uploader(label = "Upload an audio file", type = 'mp3')
266
+ submitted = st.form_submit_button('Submit')
267
+ if submitted:
268
+ if audio_file is not None:
269
+ audio = audio_file.getvalue()
270
+ with open("temp.mp3", "wb") as binary_file:
271
+ # Write bytes to file
272
+ binary_file.write(audio)
273
+
274
+ # Calling the split_to_mono method on the stereo audio file
275
+ stereo_audio = AudioSegment.from_file("temp.mp3", format = "mp3")
276
+ mono_audios = stereo_audio.split_to_mono()
277
+ mono_audios[0].export("temp.mp3", format = "mp3")
278
+
279
+ # Mp3 file to numpy array
280
+ audio, sr = a2n.audio_from_file('temp.mp3')
281
+ st.audio('temp.mp3')
282
+ if os.path.exists("temp.mp3"):
283
+ os.remove("temp.mp3")
284
+
285
+ # Load model and processor
286
+ pipe = pipeline("automatic-speech-recognition", model = "openai/whisper-large-v2", chunk_length_s = 30, device = "cpu",
287
+ ignore_warning = True)
288
+ prediction = pipe(audio, sampling_rate = sr)["text"]
289
+ st.subheader('Preview used audio')
290
+ st.write('Model :orange[' + model_id_or_path + ']')
291
+ st.write('Transcript: ":green[' + str(prediction) + ']"')
pages/💁‍ Open_Assistant.py ADDED
@@ -0,0 +1,329 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ##### `💁‍ Open_Assistant.py`
2
+ ##### Chat Llm Streaming
3
+ ##### https://huggingface.co/spaces/olivierdehaene/chat-llm-streaming/blob/main/README.md
4
+ ##### Please reach out to [email protected] for any questions
5
+ #### Loading needed Python libraries
6
+ import streamlit as st
7
+ import os
8
+ import gradio as gr
9
+ from text_generation import Client, InferenceAPIClient
10
+ from text_generation import InferenceAPIClient
11
+
12
+ client = InferenceAPIClient("OpenAssistant/oasst-sft-1-pythia-12b")
13
+ text = client.generate("<|prompter|>Why is the sky blue?<|endoftext|><|assistant|>").generated_text
14
+ st.write(text)
15
+
16
+ # Token Streaming
17
+ #text = ""
18
+ #for response in client.generate_stream("<|prompter|>Why is the sky blue?<|endoftext|><|assistant|>"):
19
+ # if not response.token.special:
20
+ # print(response.token.text)
21
+ # text += response.token.text
22
+ #st.write(text)
23
+
24
+ #
25
+ # openchat_preprompt = (
26
+ # "\n<human>: Hi!\n<bot>: My name is Bot, model version is 0.15, part of an open-source kit for "
27
+ # "fine-tuning new bots! I was created by Together, LAION, and Ontocord.ai and the open-source "
28
+ # "community. I am not human, not evil and not alive, and thus have no thoughts and feelings, "
29
+ # "but I am programmed to be helpful, polite, honest, and friendly.\n"
30
+ # )
31
+ #
32
+ #
33
+ # def get_client(model: str):
34
+ # if model == "togethercomputer/GPT-NeoXT-Chat-Base-20B":
35
+ # return Client(os.getenv("OPENCHAT_API_URL"))
36
+ # return InferenceAPIClient(model, token = os.getenv("HF_TOKEN", None))
37
+ #
38
+ #
39
+ # def get_usernames(model: str):
40
+ # """
41
+ # Returns:
42
+ # (str, str, str, str): pre-prompt, username, bot name, separator
43
+ # """
44
+ # if model == "OpenAssistant/oasst-sft-1-pythia-12b":
45
+ # return "", "<|prompter|>", "<|assistant|>", "<|endoftext|>"
46
+ # if model == "togethercomputer/GPT-NeoXT-Chat-Base-20B":
47
+ # return openchat_preprompt, "<human>: ", "<bot>: ", "\n"
48
+ # return "", "User: ", "Assistant: ", "\n"
49
+ #
50
+ #
51
+ # def predict(
52
+ # model: str,
53
+ # inputs: str,
54
+ # typical_p: float,
55
+ # top_p: float,
56
+ # temperature: float,
57
+ # top_k: int,
58
+ # repetition_penalty: float,
59
+ # watermark: bool,
60
+ # chatbot,
61
+ # history,
62
+ # ):
63
+ # client = get_client(model)
64
+ # preprompt, user_name, assistant_name, sep = get_usernames(model)
65
+ #
66
+ # history.append(inputs)
67
+ #
68
+ # past = []
69
+ # for data in chatbot:
70
+ # user_data, model_data = data
71
+ #
72
+ # if not user_data.startswith(user_name):
73
+ # user_data = user_name + user_data
74
+ # if not model_data.startswith(sep + assistant_name):
75
+ # model_data = sep + assistant_name + model_data
76
+ #
77
+ # past.append(user_data + model_data.rstrip() + sep)
78
+ #
79
+ # if not inputs.startswith(user_name):
80
+ # inputs = user_name + inputs
81
+ #
82
+ # total_inputs = preprompt + "".join(past) + inputs + sep + assistant_name.rstrip()
83
+ #
84
+ # partial_words = ""
85
+ #
86
+ # if model == "OpenAssistant/oasst-sft-1-pythia-12b":
87
+ # iterator = client.generate_stream(
88
+ # total_inputs,
89
+ # typical_p = typical_p,
90
+ # truncate = 1000,
91
+ # watermark = watermark,
92
+ # max_new_tokens = 500,
93
+ # )
94
+ # else:
95
+ # iterator = client.generate_stream(
96
+ # total_inputs,
97
+ # top_p = top_p if top_p < 1.0 else None,
98
+ # top_k = top_k,
99
+ # truncate = 1000,
100
+ # repetition_penalty = repetition_penalty,
101
+ # watermark = watermark,
102
+ # temperature = temperature,
103
+ # max_new_tokens = 500,
104
+ # stop_sequences = [user_name.rstrip(), assistant_name.rstrip()],
105
+ # )
106
+ #
107
+ # for i, response in enumerate(iterator):
108
+ # if response.token.special:
109
+ # continue
110
+ #
111
+ # partial_words = partial_words + response.token.text
112
+ # if partial_words.endswith(user_name.rstrip()):
113
+ # partial_words = partial_words.rstrip(user_name.rstrip())
114
+ # if partial_words.endswith(assistant_name.rstrip()):
115
+ # partial_words = partial_words.rstrip(assistant_name.rstrip())
116
+ #
117
+ # if i == 0:
118
+ # history.append(" " + partial_words)
119
+ # elif response.token.text not in user_name:
120
+ # history[-1] = partial_words
121
+ #
122
+ # chat = [
123
+ # (history[i].strip(), history[i + 1].strip())
124
+ # for i in range(0, len(history) - 1, 2)
125
+ # ]
126
+ # yield chat, history
127
+ #
128
+ #
129
+ # def reset_textbox():
130
+ # return gr.update(value = "")
131
+ #
132
+ #
133
+ # def radio_on_change(
134
+ # value: str,
135
+ # disclaimer,
136
+ # typical_p,
137
+ # top_p,
138
+ # top_k,
139
+ # temperature,
140
+ # repetition_penalty,
141
+ # watermark,
142
+ # ):
143
+ # if value == "OpenAssistant/oasst-sft-1-pythia-12b":
144
+ # typical_p = typical_p.update(value = 0.2, visible = True)
145
+ # top_p = top_p.update(visible = False)
146
+ # top_k = top_k.update(visible = False)
147
+ # temperature = temperature.update(visible = False)
148
+ # disclaimer = disclaimer.update(visible = False)
149
+ # repetition_penalty = repetition_penalty.update(visible = False)
150
+ # watermark = watermark.update(False)
151
+ # elif value == "togethercomputer/GPT-NeoXT-Chat-Base-20B":
152
+ # typical_p = typical_p.update(visible = False)
153
+ # top_p = top_p.update(value = 0.25, visible = True)
154
+ # top_k = top_k.update(value = 50, visible = True)
155
+ # temperature = temperature.update(value = 0.6, visible = True)
156
+ # repetition_penalty = repetition_penalty.update(value = 1.01, visible = True)
157
+ # watermark = watermark.update(False)
158
+ # disclaimer = disclaimer.update(visible = True)
159
+ # else:
160
+ # typical_p = typical_p.update(visible = False)
161
+ # top_p = top_p.update(value = 0.95, visible = True)
162
+ # top_k = top_k.update(value = 4, visible = True)
163
+ # temperature = temperature.update(value = 0.5, visible = True)
164
+ # repetition_penalty = repetition_penalty.update(value = 1.03, visible = True)
165
+ # watermark = watermark.update(True)
166
+ # disclaimer = disclaimer.update(visible = False)
167
+ # return (
168
+ # disclaimer,
169
+ # typical_p,
170
+ # top_p,
171
+ # top_k,
172
+ # temperature,
173
+ # repetition_penalty,
174
+ # watermark,
175
+ # )
176
+ #
177
+ #
178
+ # title = """<h1 align="center">🔥Large Language Model API 🚀Streaming🚀</h1>"""
179
+ # description = """Language models can be conditioned to act like dialogue agents through a conversational prompt that typically takes the form:
180
+ # ```
181
+ # User: <utterance>
182
+ # Assistant: <utterance>
183
+ # User: <utterance>
184
+ # Assistant: <utterance>
185
+ # ...
186
+ # ```
187
+ # In this app, you can explore the outputs of multiple LLMs when prompted in this way.
188
+ # """
189
+ #
190
+ # openchat_disclaimer = """
191
+ # <div align="center">Checkout the official <a href=https://huggingface.co/spaces/togethercomputer/OpenChatKit>OpenChatKit feedback app</a> for the full experience.</div>
192
+ # """
193
+ #
194
+ # with gr.Blocks(
195
+ # css = """#col_container {margin-left: auto; margin-right: auto;}
196
+ # #chatbot {height: 520px; overflow: auto;}"""
197
+ # ) as demo:
198
+ # gr.HTML(title)
199
+ # with gr.Column(elem_id = "col_container"):
200
+ # model = gr.Radio(
201
+ # value = "OpenAssistant/oasst-sft-1-pythia-12b",
202
+ # choices = [
203
+ # "OpenAssistant/oasst-sft-1-pythia-12b",
204
+ # # "togethercomputer/GPT-NeoXT-Chat-Base-20B",
205
+ # "google/flan-t5-xxl",
206
+ # "google/flan-ul2",
207
+ # "bigscience/bloom",
208
+ # "bigscience/bloomz",
209
+ # "EleutherAI/gpt-neox-20b",
210
+ # ],
211
+ # label = "Model",
212
+ # interactive = True,
213
+ # )
214
+ #
215
+ # chatbot = gr.Chatbot(elem_id = "chatbot")
216
+ # inputs = gr.Textbox(
217
+ # placeholder = "Hi there!", label = "Type an input and press Enter"
218
+ # )
219
+ # disclaimer = gr.Markdown(openchat_disclaimer, visible = False)
220
+ # state = gr.State([])
221
+ # b1 = gr.Button()
222
+ #
223
+ # with gr.Accordion("Parameters", open = False):
224
+ # typical_p = gr.Slider(
225
+ # minimum = -0,
226
+ # maximum = 1.0,
227
+ # value = 0.2,
228
+ # step = 0.05,
229
+ # interactive = True,
230
+ # label = "Typical P mass",
231
+ # )
232
+ # top_p = gr.Slider(
233
+ # minimum = -0,
234
+ # maximum = 1.0,
235
+ # value = 0.25,
236
+ # step = 0.05,
237
+ # interactive = True,
238
+ # label = "Top-p (nucleus sampling)",
239
+ # visible = False,
240
+ # )
241
+ # temperature = gr.Slider(
242
+ # minimum = -0,
243
+ # maximum = 5.0,
244
+ # value = 0.6,
245
+ # step = 0.1,
246
+ # interactive = True,
247
+ # label = "Temperature",
248
+ # visible = False,
249
+ # )
250
+ # top_k = gr.Slider(
251
+ # minimum = 1,
252
+ # maximum = 50,
253
+ # value = 50,
254
+ # step = 1,
255
+ # interactive = True,
256
+ # label = "Top-k",
257
+ # visible = False,
258
+ # )
259
+ # repetition_penalty = gr.Slider(
260
+ # minimum = 0.1,
261
+ # maximum = 3.0,
262
+ # value = 1.03,
263
+ # step = 0.01,
264
+ # interactive = True,
265
+ # label = "Repetition Penalty",
266
+ # visible = False,
267
+ # )
268
+ # watermark = gr.Checkbox(value = False, label = "Text watermarking")
269
+ #
270
+ # model.change(
271
+ # lambda value: radio_on_change(
272
+ # value,
273
+ # disclaimer,
274
+ # typical_p,
275
+ # top_p,
276
+ # top_k,
277
+ # temperature,
278
+ # repetition_penalty,
279
+ # watermark,
280
+ # ),
281
+ # inputs = model,
282
+ # outputs = [
283
+ # disclaimer,
284
+ # typical_p,
285
+ # top_p,
286
+ # top_k,
287
+ # temperature,
288
+ # repetition_penalty,
289
+ # watermark,
290
+ # ],
291
+ # )
292
+ #
293
+ # inputs.submit(
294
+ # predict,
295
+ # [
296
+ # model,
297
+ # inputs,
298
+ # typical_p,
299
+ # top_p,
300
+ # temperature,
301
+ # top_k,
302
+ # repetition_penalty,
303
+ # watermark,
304
+ # chatbot,
305
+ # state,
306
+ # ],
307
+ # [chatbot, state],
308
+ # )
309
+ # b1.click(
310
+ # predict,
311
+ # [
312
+ # model,
313
+ # inputs,
314
+ # typical_p,
315
+ # top_p,
316
+ # temperature,
317
+ # top_k,
318
+ # repetition_penalty,
319
+ # watermark,
320
+ # chatbot,
321
+ # state,
322
+ # ],
323
+ # [chatbot, state],
324
+ # )
325
+ # b1.click(reset_textbox, [], [inputs])
326
+ # inputs.submit(reset_textbox, [], [inputs])
327
+ #
328
+ # gr.Markdown(description)
329
+ # demo.queue(concurrency_count = 16).launch(debug = True)
requirements.txt ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ##### `requirements.txt`
2
+ ##### OpenAI ChatGPT Demo
3
+ ##### Open-Source, hostet on https://github.com/DrBenjamin/OpenAI
4
+ ##### Please reach out to [email protected] for any questions
5
+ ### All needed libraries (and their versions)
6
+ streamlit == 1.17.0
7
+ streamlit-scrollable-textbox
8
+ openai
9
+ PyPDF2
10
+ opencv-python-headless
11
+ torch
12
+ diffusers
13
+ transformers
14
+ text_generation
15
+ datasets
16
+ audio2numpy
17
+ pydub