mreidy3 commited on
Commit
cc16683
·
verified ·
1 Parent(s): d28a88e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -41
app.py CHANGED
@@ -19,47 +19,8 @@ model.eval() # Set model to evaluation mode
19
 
20
 
21
 
22
- def verify(psudo_id, username, display_name, tweet_content, is_verified, likes):
23
- '''
24
- Main Endpoint for URaBOT, a POST request that takes in a tweet's data and returns a "bot" score
25
-
26
- Returns: JSON object {"percent": double}
27
 
28
- payload:
29
- "psudo_id": the temporary id of the tweet (as assigned in local HTML from Twitter)
30
- "username": the profile's username (@tag)
31
- "display_name": the profiles display name
32
- "tweet_content": the text content of the tweet
33
- '''
34
-
35
- # #========== Error codes ==========#
36
-
37
- # # Confirm that full payload was sent
38
- # if 'username' not in request.form:
39
- # return make_response(jsonify({"error": "Invalid request parameters.", "message" : "No username provided"}), 400)
40
-
41
- # if 'display_name' not in request.form:
42
- # return make_response(jsonify({"error": "Invalid request parameters.", "message" : "No display_name provided"}), 400)
43
-
44
- # if 'tweet_content' not in request.form:
45
- # return make_response(jsonify({"error": "Invalid request parameters.", "message" : "No tweet_content provided"}), 400)
46
-
47
- # # Prevent multiple requests for the same tweet
48
- # if request.form["psudo_id"] in processed_tweets:
49
- # return make_response(jsonify({"error": "Conflict, tweet is already being/has been processed"}), 409)
50
-
51
-
52
- # #========== Resolve Multiple Requests ==========#
53
-
54
- # # Add tweet to internal (backend) process list
55
- # processed_tweets.append(request.form["psudo_id"])
56
-
57
-
58
- #========== Return Classification ==========#
59
-
60
- # Process the tweet through the model
61
- # input = request.form["tweet_content"] + tokenizer.sep_token + request.form["display_name"] + tokenizer.sep_token + request.form["is_verified"] + tokenizer.sep_token + request.form["likes"]
62
-
63
  input = tweet_content + tokenizer.sep_token + display_name + tokenizer.sep_token + is_verified + tokenizer.sep_token + likes
64
  tokenized_input = tokenizer(input, return_tensors='pt', padding=True, truncation=True).to(device)
65
  with torch.no_grad():
@@ -90,7 +51,7 @@ For information on how to customize the ChatInterface, peruse the gradio docs: h
90
  # Set up the Gradio Interface
91
  iface = gr.Interface(
92
  fn=verify, # Function to process input
93
- inputs=[gr.Textbox(label= "Text 1"), gr.Textbox(label= "Text 2"), gr.Textbox(label= "Text"), gr.Textbox(label= "Text 4"),gr.Textbox(label= "Text 5"),gr.Textbox(label= "Text 6")], # Input type (Textbox for text)
94
  outputs=gr.Textbox(), # Output type (Textbox for generated text)
95
  live=True # Optional: To update the result as you type
96
  )
 
19
 
20
 
21
 
22
+ def verify(display_name, tweet_content, is_verified, likes):
 
 
 
 
23
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  input = tweet_content + tokenizer.sep_token + display_name + tokenizer.sep_token + is_verified + tokenizer.sep_token + likes
25
  tokenized_input = tokenizer(input, return_tensors='pt', padding=True, truncation=True).to(device)
26
  with torch.no_grad():
 
51
  # Set up the Gradio Interface
52
  iface = gr.Interface(
53
  fn=verify, # Function to process input
54
+ inputs=[gr.Textbox(label= "Display Name"), gr.Textbox(label= "Tweet Content"), gr.Textbox(label= "IsVerified"), gr.Textbox(label= "Number of Likes")], # Input type (Textbox for text)
55
  outputs=gr.Textbox(), # Output type (Textbox for generated text)
56
  live=True # Optional: To update the result as you type
57
  )