prithivMLmods commited on
Commit
81c4296
·
verified ·
1 Parent(s): ddf32ff

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +128 -201
app.py CHANGED
@@ -1,212 +1,139 @@
1
  import spaces
 
 
 
 
 
 
 
2
  import gradio as gr
3
- import torch
4
- from PIL import Image
5
- from diffusers import DiffusionPipeline
6
- import random
7
- import uuid
8
- from typing import Tuple
9
- import numpy as np
10
-
11
- def save_image(img):
12
- unique_name = str(uuid.uuid4()) + ".png"
13
- img.save(unique_name)
14
- return unique_name
15
-
16
- def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
17
- if randomize_seed:
18
- seed = random.randint(0, MAX_SEED)
19
- return seed
20
-
21
- MAX_SEED = np.iinfo(np.int32).max
22
-
23
- if not torch.cuda.is_available():
24
- DESCRIPTIONz += "\n<p>⚠️Running on CPU, This may not work on CPU.</p>"
25
-
26
- base_model = "black-forest-labs/FLUX.1-dev"
27
- pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=torch.bfloat16)
28
-
29
- lora_repo = "strangerzonehf/Flux-Sketch-Flat-LoRA"
30
- trigger_word = "Sketch Flat" # Leave trigger_word blank if not used.
31
-
32
- pipe.load_lora_weights(lora_repo)
33
- pipe.to("cuda")
34
-
35
- style_list = [
36
- {
37
- "name": "3840 x 2160",
38
- "prompt": "hyper-realistic 8K image of {prompt}. ultra-detailed, lifelike, high-resolution, sharp, vibrant colors, photorealistic",
39
- },
40
- {
41
- "name": "2560 x 1440",
42
- "prompt": "hyper-realistic 4K image of {prompt}. ultra-detailed, lifelike, high-resolution, sharp, vibrant colors, photorealistic",
43
- },
44
- {
45
- "name": "HD+",
46
- "prompt": "hyper-realistic 2K image of {prompt}. ultra-detailed, lifelike, high-resolution, sharp, vibrant colors, photorealistic",
47
- },
48
- {
49
- "name": "Style Zero",
50
- "prompt": "{prompt}",
51
- },
52
- ]
53
-
54
- styles = {k["name"]: k["prompt"] for k in style_list}
55
-
56
- DEFAULT_STYLE_NAME = "3840 x 2160"
57
- STYLE_NAMES = list(styles.keys())
58
-
59
- def apply_style(style_name: str, positive: str) -> str:
60
- return styles.get(style_name, styles[DEFAULT_STYLE_NAME]).replace("{prompt}", positive)
61
-
62
- @spaces.GPU(duration=60, enable_queue=True)
63
- def generate(
64
- prompt: str,
65
- seed: int = 0,
66
- width: int = 1024,
67
- height: int = 1024,
68
- guidance_scale: float = 3,
69
- randomize_seed: bool = False,
70
- style_name: str = DEFAULT_STYLE_NAME,
71
- progress=gr.Progress(track_tqdm=True),
72
  ):
73
- seed = int(randomize_seed_fn(seed, randomize_seed))
74
-
75
- positive_prompt = apply_style(style_name, prompt)
76
-
77
- if trigger_word:
78
- positive_prompt = f"{trigger_word} {positive_prompt}"
79
-
80
- images = pipe(
81
- prompt=positive_prompt,
82
- width=width,
83
- height=height,
84
- guidance_scale=guidance_scale,
85
- num_inference_steps=30,
86
- num_images_per_prompt=1,
87
- output_type="pil",
88
- ).images
89
- image_paths = [save_image(img) for img in images]
90
- print(image_paths)
91
- return image_paths, seed
92
 
93
- examples = [
 
 
 
 
 
94
 
95
- "midjourney mix, a tiny astronaut hatching from an egg on the moon",
96
- "midjourney mix, intense Red, a black cat is facing the left side of the frame. The cats head is tilted upward, with its eyes closed. Its whiskers are protruding from its mouth, adding a touch of warmth to the scene. The background is a vibrant red, creating a striking contrast with the cats fur.",
97
- "midjourney mix, a close-up shot of a womans face, the womans hair is wet, and she is wearing a cream-colored sweater. The background is blurred, and there are red and white signs visible in the background. The womans eyebrows are wet, adding a touch of color to her face. Her lips are a vibrant shade of pink, and her eyes are a darker shade of brown.",
98
- "midjourney mix, woman in a red jacket, snowy, in the style of hyper-realistic portraiture, caninecore, mountainous vistas, timeless beauty, palewave, iconic, distinctive noses --ar 72:101 --stylize 750 --v 6",
99
- "midjourney mix, an anime-style illustration of a delicious, golden-brown wiener schnitzel on a plate, served with fresh lemon slices, parsley --style raw5"
100
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
101
 
102
- css = '''
103
- .gradio-container{max-width: 888px !important}
104
- h1{text-align:center}
105
- footer {
106
- visibility: hidden
107
- }
108
- .submit-btn {
109
- background-color: #2980b9 !important;
110
- color: white !important;
111
- }
112
- .submit-btn:hover {
113
- background-color: #43d4ff !important;
114
- }
115
- '''
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
116
 
117
- with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
118
- with gr.Row():
119
- with gr.Column(scale=1):
120
- prompt = gr.Text(
121
- label="Prompt",
122
- show_label=False,
123
- max_lines=1,
124
- placeholder="Enter your prompt",
125
- container=False,
126
- )
127
- run_button = gr.Button("Generate as ( 768 x 1024 )🤗", scale=0, elem_classes="submit-btn")
128
-
129
- with gr.Accordion("Advanced options", open=True, visible=True):
130
- seed = gr.Slider(
131
- label="Seed",
132
- minimum=0,
133
- maximum=MAX_SEED,
134
- step=1,
135
- value=0,
136
- visible=True
137
- )
138
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
139
-
140
- with gr.Row(visible=True):
141
- width = gr.Slider(
142
- label="Width",
143
- minimum=512,
144
- maximum=2048,
145
- step=64,
146
- value=1280,
147
- )
148
- height = gr.Slider(
149
- label="Height",
150
- minimum=512,
151
- maximum=2048,
152
- step=64,
153
- value=832,
154
- )
155
-
156
- with gr.Row():
157
- guidance_scale = gr.Slider(
158
- label="Guidance Scale",
159
- minimum=0.1,
160
- maximum=20.0,
161
- step=0.1,
162
- value=3.0,
163
- )
164
- num_inference_steps = gr.Slider(
165
- label="Number of inference steps",
166
- minimum=1,
167
- maximum=40,
168
- step=1,
169
- value=30,
170
- )
171
 
172
- style_selection = gr.Radio(
173
- show_label=True,
174
- container=True,
175
- interactive=True,
176
- choices=STYLE_NAMES,
177
- value=DEFAULT_STYLE_NAME,
178
- label="Quality Style",
179
- )
180
-
181
- with gr.Column(scale=2):
182
- result = gr.Gallery(label="Result", columns=1, show_label=False)
183
-
184
- gr.Examples(
185
- examples=examples,
186
- inputs=prompt,
187
- outputs=[result, seed],
188
- fn=generate,
189
- cache_examples=False,
190
- )
191
 
192
- gr.on(
193
- triggers=[
194
- prompt.submit,
195
- run_button.click,
196
- ],
197
- fn=generate,
198
- inputs=[
199
- prompt,
200
- seed,
201
- width,
202
- height,
203
- guidance_scale,
204
- randomize_seed,
205
- style_selection,
206
- ],
207
- outputs=[result, seed],
208
- api_name="run",
209
- )
210
 
211
  if __name__ == "__main__":
212
- demo.queue(max_size=40).launch()
 
1
  import spaces
2
+ import json
3
+ import subprocess
4
+ from llama_cpp import Llama
5
+ from llama_cpp_agent import LlamaCppAgent, MessagesFormatterType
6
+ from llama_cpp_agent.providers import LlamaCppPythonProvider
7
+ from llama_cpp_agent.chat_history import BasicChatHistory
8
+ from llama_cpp_agent.chat_history.messages import Roles
9
  import gradio as gr
10
+ from huggingface_hub import hf_hub_download
11
+
12
+ # モデルのダウンロード
13
+ hf_hub_download(
14
+ repo_id="mradermacher/Viper-Coder-32B-Elite13-GGUF",
15
+ filename="Viper-Coder-32B-Elite13.Q4_K_M.gguf",
16
+ local_dir="./models"
17
+ )
18
+
19
+ # 推論関数
20
+ @spaces.GPU(duration=120)
21
+ def respond(
22
+ message,
23
+ history: list[tuple[str, str]],
24
+ model,
25
+ system_message,
26
+ max_tokens,
27
+ temperature,
28
+ top_p,
29
+ top_k,
30
+ repeat_penalty,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
  ):
32
+ chat_template = MessagesFormatterType.GEMMA_2
33
+
34
+ llm = Llama(
35
+ model_path=f"models/{model}",
36
+ flash_attn=True,
37
+ n_gpu_layers=81,
38
+ n_batch=1024,
39
+ n_ctx=8192,
40
+ )
41
+ provider = LlamaCppPythonProvider(llm)
 
 
 
 
 
 
 
 
 
42
 
43
+ agent = LlamaCppAgent(
44
+ provider,
45
+ system_prompt=f"{system_message}",
46
+ predefined_messages_formatter_type=chat_template,
47
+ debug_output=True
48
+ )
49
 
50
+ settings = provider.get_provider_default_settings()
51
+ settings.temperature = temperature
52
+ settings.top_k = top_k
53
+ settings.top_p = top_p
54
+ settings.max_tokens = max_tokens
55
+ settings.repeat_penalty = repeat_penalty
56
+ settings.stream = True
57
+
58
+ messages = BasicChatHistory()
59
+
60
+ for msn in history:
61
+ user = {
62
+ 'role': Roles.user,
63
+ 'content': msn[0]
64
+ }
65
+ assistant = {
66
+ 'role': Roles.assistant,
67
+ 'content': msn[1]
68
+ }
69
+ messages.add_message(user)
70
+ messages.add_message(assistant)
71
+
72
+ stream = agent.get_chat_response(
73
+ message,
74
+ llm_sampling_settings=settings,
75
+ chat_history=messages,
76
+ returns_streaming_generator=True,
77
+ print_output=False
78
+ )
79
 
80
+ outputs = ""
81
+ for output in stream:
82
+ outputs += output
83
+ yield outputs
84
+
85
+ # Gradioのインターフェースを作成
86
+ def create_interface(model_name, description):
87
+ return gr.ChatInterface(
88
+ respond,
89
+ additional_inputs=[
90
+ gr.Textbox(value=model_name, label="Model", interactive=False),
91
+ gr.Textbox(value="You are a helpful assistant. You should think step-by-step. You should answer all question by prioritizing the principle of openness", label="System message"),
92
+ gr.Slider(minimum=1, maximum=4096, value=2048, step=1, label="Max tokens"),
93
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
94
+ gr.Slider(
95
+ minimum=0.1,
96
+ maximum=1.0,
97
+ value=0.95,
98
+ step=0.05,
99
+ label="Top-p",
100
+ ),
101
+ gr.Slider(
102
+ minimum=0,
103
+ maximum=100,
104
+ value=40,
105
+ step=1,
106
+ label="Top-k",
107
+ ),
108
+ gr.Slider(
109
+ minimum=0.0,
110
+ maximum=2.0,
111
+ value=1.1,
112
+ step=0.1,
113
+ label="Repetition penalty",
114
+ ),
115
+ ],
116
+ retry_btn="Retry",
117
+ undo_btn="Undo",
118
+ clear_btn="Clear",
119
+ submit_btn="Send",
120
+ title=f"{model_name}",
121
+ description=description,
122
+ chatbot=gr.Chatbot(
123
+ scale=1,
124
+ likeable=False,
125
+ show_copy_button=True
126
+ )
127
+ )
128
 
129
+ description = """<p align="center"Viper-Coder-32B-Elite13-GGUF/p>"""
130
+ interface = create_interface('mradermacher/Viper-Coder-32B-Elite13-GGUF', description)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
131
 
132
+ # Gradio Blocksで単一のインターフェースを表示
133
+ demo = gr.Blocks()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
134
 
135
+ with demo:
136
+ interface.render()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
137
 
138
  if __name__ == "__main__":
139
+ demo.launch()