Garrett Goon commited on
Commit
384ec64
·
1 Parent(s): 4cd9bad

testing basics

Browse files
.idea/.gitignore ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # Default ignored files
2
+ /shelf/
3
+ /workspace.xml
4
+ # Editor-based HTTP Client requests
5
+ /httpRequests/
6
+ # Datasource local storage ignored files
7
+ /dataSources/
8
+ /dataSources.local.xml
.idea/hot_dog_demo_test.iml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <module type="PYTHON_MODULE" version="4">
3
+ <component name="NewModuleRootManager">
4
+ <content url="file://$MODULE_DIR$" />
5
+ <orderEntry type="inheritedJdk" />
6
+ <orderEntry type="sourceFolder" forTests="false" />
7
+ </component>
8
+ </module>
.idea/inspectionProfiles/Project_Default.xml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ <component name="InspectionProjectProfileManager">
2
+ <profile version="1.0">
3
+ <option name="myName" value="Project Default" />
4
+ <inspection_tool class="Eslint" enabled="true" level="WARNING" enabled_by_default="true" />
5
+ <inspection_tool class="Stylelint" enabled="true" level="ERROR" enabled_by_default="true" />
6
+ </profile>
7
+ </component>
.idea/inspectionProfiles/profiles_settings.xml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ <component name="InspectionProjectProfileManager">
2
+ <settings>
3
+ <option name="USE_PROJECT_PROFILE" value="false" />
4
+ <version value="1.0" />
5
+ </settings>
6
+ </component>
.idea/modules.xml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="ProjectModuleManager">
4
+ <modules>
5
+ <module fileurl="file://$PROJECT_DIR$/.idea/hot_dog_demo_test.iml" filepath="$PROJECT_DIR$/.idea/hot_dog_demo_test.iml" />
6
+ </modules>
7
+ </component>
8
+ </project>
.idea/vcs.xml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="VcsDirectoryMappings">
4
+ <mapping directory="$PROJECT_DIR$" vcs="Git" />
5
+ </component>
6
+ </project>
app.py CHANGED
@@ -1,15 +1,81 @@
1
  import gradio as gr
2
- from transformers import pipeline
 
 
3
 
4
- pipeline = pipeline(task="image-classification", model="julien-c/hotdog-not-hotdog")
 
 
5
 
6
- def predict(image):
7
- predictions = pipeline(image)
8
- return {p["label"]: p["score"] for p in predictions}
9
 
10
- gr.Interface(
11
- predict,
12
- inputs=gr.inputs.Image(label="Upload hot dog candidate", type="filepath"),
13
- outputs=gr.outputs.Label(num_top_classes=2),
14
- title="Hot Dog? Or Not?",
15
- ).launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ import torch
3
+ from diffusers import StableDiffusionPipeline
4
+ from torchvision.transforms.functional import to_pil_image
5
 
6
+ # pipeline = StableDiffusionPipeline.from_pretrained(
7
+ # pretrained_model_name_or_path="weights", torch_dtype=torch.float16
8
+ # )
9
 
10
+ # pipeline.to('cuda')
 
 
11
 
12
+ concept_to_dummy_tokens_map = torch.load("concept_to_dummy_tokens_map.pt")
13
+
14
+
15
+ def replace_concept_tokens(text: str):
16
+ for concept_token, dummy_tokens in concept_to_dummy_tokens_map.items():
17
+ text = text.replace(concept_token, dummy_tokens)
18
+ return text
19
+
20
+
21
+ # def inference(
22
+ # prompt: str, num_inference_steps: int = 50, guidance_scale: int = 3.0
23
+ # ):
24
+ # prompt = replace_concept_tokens(prompt)
25
+ # for _ in range(3):
26
+ # img_list = pipeline(
27
+ # prompt=prompt,
28
+ # num_inference_steps=num_inference_steps,
29
+ # guidance_scale=guidance_scale,
30
+ # )
31
+ # if not img_list["nsfw_content_detected"]:
32
+ # break
33
+ # return img_list["sample"]
34
+
35
+ DEFAULT_PROMPT = (
36
+ "A watercolor painting on textured paper of a <det-logo> using soft strokes,"
37
+ " pastel colors, incredible composition, masterpiece"
38
+ )
39
+
40
+
41
+ def white_imgs(prompt: str, guidance_scale: float, num_inference_steps: int, seed: int):
42
+ return [torch.ones(512, 512, 3).numpy() for _ in range(2)]
43
+
44
+
45
+ with gr.Blocks() as demo:
46
+ prompt = gr.Textbox(
47
+ label="Prompt including the token '<det-logo>'",
48
+ placeholder=DEFAULT_PROMPT,
49
+ interactive=True,
50
+ )
51
+ guidance_scale = gr.Slider(
52
+ minimum=1.0, maximum=10.0, value=3.0, label="Guidance Scale", interactive=True
53
+ )
54
+ num_inference_steps = gr.Slider(
55
+ minimum=25,
56
+ maximum=60,
57
+ value=40,
58
+ label="Num Inference Steps",
59
+ interactive=True,
60
+ step=1,
61
+ )
62
+ seed = gr.Slider(
63
+ minimum=2147483147,
64
+ maximum=2147483647,
65
+ value=2147483397,
66
+ label="Seed",
67
+ interactive=True,
68
+ )
69
+ generate_btn = gr.Button(label="Generate")
70
+ gallery = gr.Gallery(
71
+ label="Generated Images",
72
+ value=[torch.zeros(512, 512, 3).numpy() for _ in range(2)],
73
+ ).style(height="auto")
74
+
75
+ generate_btn.click(
76
+ white_imgs,
77
+ inputs=[prompt, guidance_scale, num_inference_steps, seed],
78
+ outputs=gallery,
79
+ )
80
+
81
+ demo.launch()