AZIIIIIIIIZ commited on
Commit
d02c9bc
·
verified ·
1 Parent(s): 5954e7c

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +93 -52
app.py CHANGED
@@ -1,76 +1,117 @@
1
 
2
- import gradio as gr
3
 
4
- # Use a pipeline as a high-level helper
5
- from transformers import pipeline
6
 
7
- # Use a pipeline as a high-level helper
8
- # Load model directly
9
- from transformers import AutoImageProcessor, AutoModelForImageClassification
10
 
11
- # processor = AutoImageProcessor.from_pretrained("AZIIIIIIIIZ/vit-base-patch16-224-finetuned-eurosat")
12
- # model = AutoModelForImageClassification.from_pretrained("AZIIIIIIIIZ/vit-base-patch16-224-finetuned-eurosat")
13
- pipe = pipeline("image-classification", model="AZIIIIIIIIZ/vit-base-patch16-224-finetuned-eurosat")
14
 
15
 
16
- # $ pip install gradio_client fastapi uvicorn
17
 
18
- import requests
19
- from PIL import Image
20
- from transformers import pipeline
21
- import io
22
- import base64
23
 
24
  # Initialize the pipeline
25
  # pipe = pipeline('image-classification')
26
 
27
- def load_image_from_path(image_path):
28
- return Image.open(image_path)
29
-
30
- def load_image_from_url(image_url):
31
- response = requests.get(image_url)
32
- return Image.open(io.BytesIO(response.content))
33
-
34
- def load_image_from_base64(base64_string):
35
- image_data = base64.b64decode(base64_string)
36
- return Image.open(io.BytesIO(image_data))
37
-
38
- def predict(image_input):
39
- if isinstance(image_input, str):
40
- if image_input.startswith('http'):
41
- image = load_image_from_url(image_input)
42
- elif image_input.startswith('/'):
43
- image = load_image_from_path(image_input)
44
- else:
45
- image = load_image_from_base64(image_input)
46
- elif isinstance(image_input, Image.Image):
47
- image = image_input
48
- else:
49
- raise ValueError("Incorrect format used for image. Should be an URL linking to an image, a base64 string, a local path, or a PIL image.")
50
 
51
- return pipe(image)
52
 
53
 
54
  # def predict(image):
55
  # return pipe(image)
56
 
57
- def main():
58
- # image_input = 'path_or_url_or_base64' # Update with actual input
59
- # output = predict(image_input)
60
- # print(output)
61
 
62
- demo = gr.Interface(
63
- fn=predict,
64
- inputs='image',
65
- outputs='text',
66
- )
67
-
68
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69
 
70
 
71
 
72
- if __name__ == "__main__":
73
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74
 
 
 
 
 
 
 
 
75
 
76
 
 
1
 
2
+ # import gradio as gr
3
 
4
+ # # Use a pipeline as a high-level helper
5
+ # from transformers import pipeline
6
 
7
+ # # Use a pipeline as a high-level helper
8
+ # # Load model directly
9
+ # from transformers import AutoImageProcessor, AutoModelForImageClassification
10
 
11
+ # # processor = AutoImageProcessor.from_pretrained("AZIIIIIIIIZ/vit-base-patch16-224-finetuned-eurosat")
12
+ # # model = AutoModelForImageClassification.from_pretrained("AZIIIIIIIIZ/vit-base-patch16-224-finetuned-eurosat")
13
+ # pipe = pipeline("image-classification", model="AZIIIIIIIIZ/vit-base-patch16-224-finetuned-eurosat")
14
 
15
 
16
+ # # $ pip install gradio_client fastapi uvicorn
17
 
18
+ # import requests
19
+ # from PIL import Image
20
+ # from transformers import pipeline
21
+ # import io
22
+ # import base64
23
 
24
  # Initialize the pipeline
25
  # pipe = pipeline('image-classification')
26
 
27
+ # def load_image_from_path(image_path):
28
+ # return Image.open(image_path)
29
+
30
+ # def load_image_from_url(image_url):
31
+ # response = requests.get(image_url)
32
+ # return Image.open(io.BytesIO(response.content))
33
+
34
+ # def load_image_from_base64(base64_string):
35
+ # image_data = base64.b64decode(base64_string)
36
+ # return Image.open(io.BytesIO(image_data))
37
+
38
+ # def predict(image_input):
39
+ # if isinstance(image_input, str):
40
+ # if image_input.startswith('http'):
41
+ # image = load_image_from_url(image_input)
42
+ # elif image_input.startswith('/'):
43
+ # image = load_image_from_path(image_input)
44
+ # else:
45
+ # image = load_image_from_base64(image_input)
46
+ # elif isinstance(image_input, Image.Image):
47
+ # image = image_input
48
+ # else:
49
+ # raise ValueError("Incorrect format used for image. Should be an URL linking to an image, a base64 string, a local path, or a PIL image.")
50
 
51
+ # return pipe(image)
52
 
53
 
54
  # def predict(image):
55
  # return pipe(image)
56
 
57
+ # def main():
58
+ # # image_input = 'path_or_url_or_base64' # Update with actual input
59
+ # # output = predict(image_input)
60
+ # # print(output)
61
 
62
+ # demo = gr.Interface(
63
+ # fn=predict,
64
+ # inputs='image',
65
+ # outputs='text',
66
+ # )
67
+
68
+ # demo.launch()
69
+ # import requests
70
+ # import torch
71
+ # from PIL import Image
72
+ # from torchvision import transforms
73
+
74
+ # def predict(inp):
75
+ # inp = Image.fromarray(inp.astype("uint8"), "RGB")
76
+ # inp = transforms.ToTensor()(inp).unsqueeze(0)
77
+ # with torch.no_grad():
78
+ # prediction = torch.nn.functional.softmax(model(inp.to(device))[0], dim=0)
79
+ # return {labels[i]: float(prediction[i]) for i in range(1000)}
80
+
81
+
82
+ # inputs = gr.Image()
83
+ # outputs = gr.Label(num_top_classes=3)
84
+
85
+ # io = gr.Interface(
86
+ # fn=predict, inputs=inputs, outputs=outputs, examples=["dog.jpg"]
87
+ # )
88
+ # io.launch(inline=False, share=True)
89
 
90
 
91
 
92
+ # if __name__ == "__main__":
93
+ # main()
94
+
95
+
96
+
97
+
98
+
99
+
100
+ import gradio as gr
101
+ from transformers import pipeline
102
+
103
+ pipeline = pipeline("image-classification", model="AZIIIIIIIIZ/vit-base-patch16-224-finetuned-eurosat")
104
+
105
+ def predict(image):
106
+ predictions = pipeline(image)
107
+ return {p["label"]: p["score"] for p in predictions}
108
 
109
+ gr.Interface(
110
+ predict,
111
+ inputs=gr.inputs.Image(label="Upload Image", type="filepath"),
112
+ outputs=gr.outputs.Label(num_top_classes=2),
113
+ title="AI Generated? Or Not?",
114
+ allow_flagging="manual"
115
+ ).launch()
116
 
117