asataura commited on
Commit
12f69dd
·
1 Parent(s): 3a53910

Integrating the DRL and LLM

Browse files
Files changed (1) hide show
  1. app.py +32 -8
app.py CHANGED
@@ -10,6 +10,30 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
10
  import torch
11
 
12
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  def perform_training(jammer_type, channel_switching_cost):
14
  agent = train(jammer_type, channel_switching_cost)
15
  return agent
@@ -19,13 +43,13 @@ def perform_testing(agent, jammer_type, channel_switching_cost):
19
  test(agent, jammer_type, channel_switching_cost)
20
 
21
 
22
- model_name = "tiiuae/falcon-7b-instruct"
23
- model = AutoModelForCausalLM.from_pretrained(model_name)
24
- tokenizer = AutoTokenizer.from_pretrained(model_name)
25
- pipeline = transformers.pipeline("text-generation", model=model, tokenizer=tokenizer, max_length=100,
26
- temperature=0.7)
27
 
28
- st.title("Beyond the Anti-Jam: Integration of DRL with LLM")
29
 
30
  st.sidebar.header("Make Your Environment Configuration")
31
  mode = st.sidebar.radio("Choose Mode", ["Auto", "Manual"])
@@ -46,6 +70,6 @@ start_button = st.sidebar.button('Start')
46
  if start_button:
47
  agent = perform_training(jammer_type, channel_switching_cost)
48
  st.subheader("Generating Insights of the DRL-Training")
49
- text = pipeline("Discuss this topic: Integrating LLMs to DRL-based anti-jamming.")
50
- st.write(text)
51
  test(agent, jammer_type, channel_switching_cost)
 
10
  import torch
11
 
12
 
13
+ model = "tiiuae/falcon-7b-instruct"
14
+
15
+ tokenizer = AutoTokenizer.from_pretrained(model)
16
+ pipeline = transformers.pipeline(
17
+ "text-generation",
18
+ model=model,
19
+ tokenizer=tokenizer,
20
+ torch_dtype=torch.bfloat16,
21
+ trust_remote_code=True,
22
+ device_map="auto",
23
+ )
24
+ sequences = pipeline(
25
+ "Girafatron is obsessed with giraffes, the most glorious animal on the face of this Earth. Giraftron believes all other animals are irrelevant when compared to the glorious majesty of the giraffe.\nDaniel: Hello, Girafatron!\nGirafatron:",
26
+ max_length=200,
27
+ do_sample=True,
28
+ top_k=10,
29
+ num_return_sequences=1,
30
+ eos_token_id=tokenizer.eos_token_id,
31
+ )
32
+ st.title("Beyond the Anti-Jam: Integration of DRL with LLM")
33
+ for seq in sequences:
34
+ st.write(f"Result: {seq['generated_text']}")
35
+
36
+
37
  def perform_training(jammer_type, channel_switching_cost):
38
  agent = train(jammer_type, channel_switching_cost)
39
  return agent
 
43
  test(agent, jammer_type, channel_switching_cost)
44
 
45
 
46
+ # model_name = "tiiuae/falcon-7b-instruct"
47
+ # model = AutoModelForCausalLM.from_pretrained(model_name)
48
+ # tokenizer = AutoTokenizer.from_pretrained(model_name)
49
+ # pipeline = transformers.pipeline("text-generation", model=model, tokenizer=tokenizer, max_length=100,
50
+ # temperature=0.7)
51
 
52
+ # st.title("Beyond the Anti-Jam: Integration of DRL with LLM")
53
 
54
  st.sidebar.header("Make Your Environment Configuration")
55
  mode = st.sidebar.radio("Choose Mode", ["Auto", "Manual"])
 
70
  if start_button:
71
  agent = perform_training(jammer_type, channel_switching_cost)
72
  st.subheader("Generating Insights of the DRL-Training")
73
+ # text = pipeline("Discuss this topic: Integrating LLMs to DRL-based anti-jamming.")
74
+ # st.write(text)
75
  test(agent, jammer_type, channel_switching_cost)