subham1707 commited on
Commit
ae9b95e
Β·
verified Β·
1 Parent(s): 48c72f4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +50 -54
app.py CHANGED
@@ -1,58 +1,54 @@
1
  import gradio as gr
2
- from transformers import AutoModel, AutoProcessor
3
  import torch
4
- import requests
5
- from PIL import Image
6
- from io import BytesIO
7
-
8
- fashion_items = ['top', 'trousers', 'jumper']
9
-
10
- # Load model and processor
11
- model_name = 'Marqo/marqo-fashionSigLIP'
12
- model = AutoModel.from_pretrained(model_name, trust_remote_code=True)
13
- processor = AutoProcessor.from_pretrained(model_name, trust_remote_code=True)
14
-
15
- # Preprocess and normalize text data
16
- with torch.no_grad():
17
- # Ensure truncation and padding are activated
18
- processed_texts = processor(
19
- text=fashion_items,
20
- return_tensors="pt",
21
- truncation=True, # Ensure text is truncated to fit model input size
22
- padding=True # Pad shorter sequences so that all are the same length
23
- )['input_ids']
24
-
25
- text_features = model.get_text_features(processed_texts)
26
- text_features = text_features / text_features.norm(dim=-1, keepdim=True)
27
-
28
- # Prediction function
29
- def predict_from_url(url):
30
- # Check if the URL is empty
31
- if not url:
32
- return {"Error": "Please input a URL"}
33
-
34
- try:
35
- image = Image.open(BytesIO(requests.get(url).content))
36
- except Exception as e:
37
- return {"Error": f"Failed to load image: {str(e)}"}
38
-
39
- processed_image = processor(images=image, return_tensors="pt")['pixel_values']
40
-
41
- with torch.no_grad():
42
- image_features = model.get_image_features(processed_image)
43
- image_features = image_features / image_features.norm(dim=-1, keepdim=True)
44
- text_probs = (100 * image_features @ text_features.T).softmax(dim=-1)
45
-
46
- return {fashion_items[i]: float(text_probs[0, i]) for i in range(len(fashion_items))}
47
 
48
  # Gradio interface
49
- demo = gr.Interface(
50
- fn=predict_from_url,
51
- inputs=gr.Textbox(label="Enter Image URL"),
52
- outputs=gr.Label(label="Classification Results"),
53
- title="Fashion Item Classifier",
54
- allow_flagging="never"
55
- )
56
-
57
- # Launch the interface
58
- demo.launch()
 
1
  import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
  import torch
4
+
5
+ # Load model (CPU-friendly, no token required)
6
+ model_id = "replit/replit-code-v1_5-3b"
7
+
8
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
9
+ model = AutoModelForCausalLM.from_pretrained(model_id)
10
+
11
+ # Ensure it's on CPU
12
+ device = torch.device("cpu")
13
+ model.to(device)
14
+
15
+ def convert_python_to_r(python_code):
16
+ # Prompt to guide the model
17
+ prompt = f"""### Task:
18
+ Convert the following Python code to equivalent R code.
19
+
20
+ ### Python code:
21
+ {python_code}
22
+
23
+ ### R code:
24
+ """
25
+
26
+ # Tokenize input
27
+ input_ids = tokenizer(prompt, return_tensors="pt", truncation=True).input_ids.to(device)
28
+
29
+ # Generate
30
+ outputs = model.generate(
31
+ input_ids,
32
+ max_length=512,
33
+ temperature=0.2,
34
+ do_sample=True,
35
+ pad_token_id=tokenizer.eos_token_id
36
+ )
37
+
38
+ # Decode result
39
+ result = tokenizer.decode(outputs[0], skip_special_tokens=True)
40
+
41
+ # Extract R code from the result (after prompt)
42
+ if "### R code:" in result:
43
+ result = result.split("### R code:")[-1].strip()
44
+
45
+ return result
 
46
 
47
  # Gradio interface
48
+ gr.Interface(
49
+ fn=convert_python_to_r,
50
+ inputs=gr.Textbox(lines=10, placeholder="Paste your Python code here..."),
51
+ outputs="text",
52
+ title="Python to R Code Converter",
53
+ description="Converts Python code to R using Replit Code Model (3B). Optimized for Hugging Face CPU Basic tier."
54
+ ).launch()