eljanmahammadli commited on
Commit
3ff1b7e
·
1 Parent(s): a54c1ef

deployed new XL model

Browse files
Files changed (2) hide show
  1. app.py +1 -1
  2. humanize.py +2 -3
app.py CHANGED
@@ -627,7 +627,7 @@ def create_interface():
627
  )
628
  gr.Markdown("# Search Options", elem_classes="text-center text-3xl mb-6")
629
  with gr.Row():
630
- google_search_check = gr.Checkbox(label="Enable Google Search For Recent Sources", value=True)
631
  with gr.Group(visible=True) as search_options:
632
  with gr.Row():
633
  include_sites = gr.Textbox(
 
627
  )
628
  gr.Markdown("# Search Options", elem_classes="text-center text-3xl mb-6")
629
  with gr.Row():
630
+ google_search_check = gr.Checkbox(label="Enable Google Search For Recent Sources", value=False)
631
  with gr.Group(visible=True) as search_options:
632
  with gr.Row():
633
  include_sites = gr.Textbox(
humanize.py CHANGED
@@ -26,7 +26,7 @@ batch_size = 64
26
  model_config = {
27
  "Base Model": "polygraf-ai/poly-humanizer-base",
28
  "Large Model": "polygraf-ai/poly-humanizer-large",
29
- "XL Model": "polygraf-ai/poly-humanizer-XL-adapter",
30
  }
31
 
32
  # cache the base models, tokenizers, and adapters
@@ -76,7 +76,6 @@ def paraphrase_text(
76
  Paragraphs are stored as a number of sentences per paragraph.
77
  """
78
  progress(0, desc="Starting to Humanize")
79
- progress(0.05)
80
  # Select the model, tokenizer, and adapter
81
  tokenizer = tokenizers[model_name]
82
  model = models[model_name].to(device)
@@ -93,7 +92,7 @@ def paraphrase_text(
93
 
94
  # Process all sentences in batches
95
  paraphrased_sentences = []
96
- for i in range(0, len(all_sentences), batch_size):
97
  batch_sentences = all_sentences[i : i + batch_size]
98
  paraphrased_batch = paraphrase_sentences(
99
  model, tokenizer, batch_sentences, temperature, repetition_penalty, top_k, length_penalty
 
26
  model_config = {
27
  "Base Model": "polygraf-ai/poly-humanizer-base",
28
  "Large Model": "polygraf-ai/poly-humanizer-large",
29
+ "XL Model": "polygraf-ai/poly-humanizer-XL-adapter-v2",
30
  }
31
 
32
  # cache the base models, tokenizers, and adapters
 
76
  Paragraphs are stored as a number of sentences per paragraph.
77
  """
78
  progress(0, desc="Starting to Humanize")
 
79
  # Select the model, tokenizer, and adapter
80
  tokenizer = tokenizers[model_name]
81
  model = models[model_name].to(device)
 
92
 
93
  # Process all sentences in batches
94
  paraphrased_sentences = []
95
+ for i in progress.tqdm(range(0, len(all_sentences), batch_size)):
96
  batch_sentences = all_sentences[i : i + batch_size]
97
  paraphrased_batch = paraphrase_sentences(
98
  model, tokenizer, batch_sentences, temperature, repetition_penalty, top_k, length_penalty