Text Generation
Transformers
Safetensors
qwen2
reranker
conversational
text-generation-inference
ptrdvn commited on
Commit
08d0055
·
verified ·
1 Parent(s): e86e62e

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +4 -4
README.md CHANGED
@@ -173,7 +173,7 @@ import numpy as np
173
  def make_reranker_input(t, q):
174
  return f"<<<Query>>>\n{q}\n\n<<<Context>>>\n{t}"
175
 
176
- def make_reranker_training_datum(context, question):
177
  system_message = "Given a query and a piece of text, output a score of 1-7 based on how related the query is to the text. 1 means least related and 7 is most related."
178
 
179
  return [
@@ -195,7 +195,7 @@ query_texts = [
195
  ("What is the square root of 999?", "An apple is a round, edible fruit produced by an apple tree (Malus spp., among them the domestic or orchard apple; Malus domestica)."),
196
  ]
197
 
198
- chats = [make_reranker_training_datum(c, q) for q, c in query_texts]
199
  responses = llm.chat(chats, sampling_params)
200
  probs = np.array([[get_prob(r.outputs[0].logprobs[0], y) for y in idx_tokens] for r in responses])
201
 
@@ -223,7 +223,7 @@ import numpy as np
223
  def make_reranker_input(t, q):
224
  return f"<<<Query>>>\n{q}\n\n<<<Context>>>\n{t}"
225
 
226
- def make_reranker_training_datum(context, question):
227
  system_message = "Given a query and a piece of text, output a score of 1-7 based on how related the query is to the text. 1 means least related and 7 is most related."
228
 
229
  return [
@@ -250,7 +250,7 @@ query_texts = [
250
  ("What is the square root of 999?", "An apple is a round, edible fruit produced by an apple tree (Malus spp., among them the domestic or orchard apple; Malus domestica)."),
251
  ]
252
 
253
- chats = [make_reranker_training_datum(c, q) for q, c in query_texts]
254
  responses = pipe(
255
  chats,
256
  gen_config=GenerationConfig(temperature=1.0, logprobs=14, max_new_tokens=1, do_sample=True)
 
173
  def make_reranker_input(t, q):
174
  return f"<<<Query>>>\n{q}\n\n<<<Context>>>\n{t}"
175
 
176
+ def make_reranker_inference_conversation(context, question):
177
  system_message = "Given a query and a piece of text, output a score of 1-7 based on how related the query is to the text. 1 means least related and 7 is most related."
178
 
179
  return [
 
195
  ("What is the square root of 999?", "An apple is a round, edible fruit produced by an apple tree (Malus spp., among them the domestic or orchard apple; Malus domestica)."),
196
  ]
197
 
198
+ chats = [make_reranker_inference_conversation(c, q) for q, c in query_texts]
199
  responses = llm.chat(chats, sampling_params)
200
  probs = np.array([[get_prob(r.outputs[0].logprobs[0], y) for y in idx_tokens] for r in responses])
201
 
 
223
  def make_reranker_input(t, q):
224
  return f"<<<Query>>>\n{q}\n\n<<<Context>>>\n{t}"
225
 
226
+ def make_reranker_inference_conversation(context, question):
227
  system_message = "Given a query and a piece of text, output a score of 1-7 based on how related the query is to the text. 1 means least related and 7 is most related."
228
 
229
  return [
 
250
  ("What is the square root of 999?", "An apple is a round, edible fruit produced by an apple tree (Malus spp., among them the domestic or orchard apple; Malus domestica)."),
251
  ]
252
 
253
+ chats = [make_reranker_inference_conversation(c, q) for q, c in query_texts]
254
  responses = pipe(
255
  chats,
256
  gen_config=GenerationConfig(temperature=1.0, logprobs=14, max_new_tokens=1, do_sample=True)