Spaces:
Sleeping
Sleeping
File size: 3,353 Bytes
7cc417d d02c9bc 7cc417d d02c9bc 7cc417d d02c9bc 7cc417d d02c9bc 7cc417d d02c9bc 5954e7c d02c9bc 7cc417d 5954e7c 7cc417d d02c9bc 5954e7c d02c9bc 5954e7c d02c9bc 5954e7c d02c9bc 5954e7c 7cc417d d02c9bc 7cc417d d02c9bc 7cc417d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 |
# import gradio as gr
# # Use a pipeline as a high-level helper
# from transformers import pipeline
# # Use a pipeline as a high-level helper
# # Load model directly
# from transformers import AutoImageProcessor, AutoModelForImageClassification
# # processor = AutoImageProcessor.from_pretrained("AZIIIIIIIIZ/vit-base-patch16-224-finetuned-eurosat")
# # model = AutoModelForImageClassification.from_pretrained("AZIIIIIIIIZ/vit-base-patch16-224-finetuned-eurosat")
# pipe = pipeline("image-classification", model="AZIIIIIIIIZ/vit-base-patch16-224-finetuned-eurosat")
# # $ pip install gradio_client fastapi uvicorn
# import requests
# from PIL import Image
# from transformers import pipeline
# import io
# import base64
# Initialize the pipeline
# pipe = pipeline('image-classification')
# def load_image_from_path(image_path):
# return Image.open(image_path)
# def load_image_from_url(image_url):
# response = requests.get(image_url)
# return Image.open(io.BytesIO(response.content))
# def load_image_from_base64(base64_string):
# image_data = base64.b64decode(base64_string)
# return Image.open(io.BytesIO(image_data))
# def predict(image_input):
# if isinstance(image_input, str):
# if image_input.startswith('http'):
# image = load_image_from_url(image_input)
# elif image_input.startswith('/'):
# image = load_image_from_path(image_input)
# else:
# image = load_image_from_base64(image_input)
# elif isinstance(image_input, Image.Image):
# image = image_input
# else:
# raise ValueError("Incorrect format used for image. Should be an URL linking to an image, a base64 string, a local path, or a PIL image.")
# return pipe(image)
# def predict(image):
# return pipe(image)
# def main():
# # image_input = 'path_or_url_or_base64' # Update with actual input
# # output = predict(image_input)
# # print(output)
# demo = gr.Interface(
# fn=predict,
# inputs='image',
# outputs='text',
# )
# demo.launch()
# import requests
# import torch
# from PIL import Image
# from torchvision import transforms
# def predict(inp):
# inp = Image.fromarray(inp.astype("uint8"), "RGB")
# inp = transforms.ToTensor()(inp).unsqueeze(0)
# with torch.no_grad():
# prediction = torch.nn.functional.softmax(model(inp.to(device))[0], dim=0)
# return {labels[i]: float(prediction[i]) for i in range(1000)}
# inputs = gr.Image()
# outputs = gr.Label(num_top_classes=3)
# io = gr.Interface(
# fn=predict, inputs=inputs, outputs=outputs, examples=["dog.jpg"]
# )
# io.launch(inline=False, share=True)
# if __name__ == "__main__":
# main()
import gradio as gr
from transformers import pipeline
pipeline = pipeline("image-classification", model="AZIIIIIIIIZ/vit-base-patch16-224-finetuned-eurosat")
def predict(image):
predictions = pipeline(image)
return {p["label"]: p["score"] for p in predictions}
gr.Interface(
predict,
inputs=gr.inputs.Image(label="Upload Image", type="filepath"),
outputs=gr.outputs.Label(num_top_classes=2),
title="AI Generated? Or Not?",
allow_flagging="manual"
).launch()
|