sashtech commited on
Commit
f3e42ce
·
verified ·
1 Parent(s): 031a20c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -7
app.py CHANGED
@@ -24,19 +24,17 @@ word_vectors = api.load("glove-wiki-gigaword-50")
24
  # Check for GPU and set the device accordingly
25
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
26
 
27
- # Load GPT-3.5-turbo model and tokenizer from Hugging Face
28
- tokenizer_ai = AutoTokenizer.from_pretrained("Xenova/gpt-3.5-turbo")
29
- model_ai = AutoModel.from_pretrained("Xenova/gpt-3.5-turbo").to(device)
30
 
31
- # AI detection function using GPT-3.5-turbo-based model
32
  def detect_ai_generated(text):
33
  inputs = tokenizer_ai(text, return_tensors="pt", truncation=True, max_length=512).to(device)
34
  with torch.no_grad():
35
  outputs = model_ai(**inputs)
36
- # Since this model does not directly output classification logits, you'll need to process the hidden states
37
- # For simplicity, let's just use the first hidden state for now (you may need to adjust based on your use case)
38
  hidden_state = outputs.last_hidden_state[:, 0, :] # Use the first token's representation
39
- # Example: calculate some kind of score based on the hidden state
40
  score = torch.mean(hidden_state).item()
41
  return f"AI-Generated Content Score: {score:.2f}"
42
 
 
24
  # Check for GPU and set the device accordingly
25
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
26
 
27
+ # Load GPT-J-6B model and tokenizer from Hugging Face
28
+ tokenizer_ai = AutoTokenizer.from_pretrained("EleutherAI/gpt-j-6B")
29
+ model_ai = AutoModel.from_pretrained("EleutherAI/gpt-j-6B").to(device)
30
 
31
+ # AI detection function using GPT-J-6B-based model
32
  def detect_ai_generated(text):
33
  inputs = tokenizer_ai(text, return_tensors="pt", truncation=True, max_length=512).to(device)
34
  with torch.no_grad():
35
  outputs = model_ai(**inputs)
36
+ # Process the hidden states to generate a score (example logic, adjust as needed)
 
37
  hidden_state = outputs.last_hidden_state[:, 0, :] # Use the first token's representation
 
38
  score = torch.mean(hidden_state).item()
39
  return f"AI-Generated Content Score: {score:.2f}"
40