ElPremOoO commited on
Commit
fa41e21
·
verified ·
1 Parent(s): b026e86

Create main.py

Browse files
Files changed (1) hide show
  1. main.py +58 -0
main.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from flask import Flask, request, jsonify
2
+ import torch
3
+ from transformers import RobertaTokenizer
4
+ import os
5
+
6
+ # Initialize Flask app
7
+ app = Flask(name)
8
+
9
+ # Load the trained model and tokenizer
10
+ tokenizer = RobertaTokenizer.from_pretrained("microsoft/codebert-base")
11
+ model = torch.load("model.pth", map_location=torch.device('cpu')) # Load the trained model
12
+
13
+ Ensure the model is in evaluation mode
14
+ model.eval()
15
+
16
+
17
+ @app.route("/")
18
+ def home():
19
+ return "Model is running!"
20
+
21
+
22
+ @app.route("/predict", methods=["POST"])
23
+ def predict():
24
+ try:
25
+ # Debugging: print input code to check if the request is received correctly
26
+ print("Received code:", request.get_json()["code"])
27
+
28
+ data = request.get_json()
29
+ if "code" not in data:
30
+ return jsonify({"error": "Missing 'code' parameter"}), 400
31
+
32
+ code_input = data["code"]
33
+
34
+ # Tokenize the input code using the CodeBERT tokenizer
35
+ inputs = tokenizer(
36
+ code_input,
37
+ return_tensors='pt',
38
+ truncation=True,
39
+ padding='max_length',
40
+ max_length=512
41
+ )
42
+
43
+ # Make prediction using the model
44
+ with torch.no_grad():
45
+ outputs = model(**inputs)
46
+ prediction = outputs.logits.squeeze().item() # Extract the predicted score (single float)
47
+
48
+ print(f"Predicted score: {prediction}") # Debugging: Print prediction
49
+
50
+ return jsonify({"predicted_score": prediction})
51
+
52
+ except Exception as e:
53
+ return jsonify({"error": str(e)}), 500
54
+
55
+
56
+ Run the Flask app
57
+ if name == "main":
58
+ app.run(debug=True)