asataura commited on
Commit
5bbb9d5
·
1 Parent(s): 7ebe03e

Commenting out the LLM part

Browse files
Files changed (2) hide show
  1. app.py +23 -23
  2. requirements.txt +1 -1
app.py CHANGED
@@ -9,29 +9,29 @@ import transformers
9
  from transformers import AutoModelForCausalLM, AutoTokenizer
10
  import torch
11
 
12
-
13
- model = "tiiuae/falcon-7b-instruct"
14
-
15
- tokenizer = AutoTokenizer.from_pretrained(model)
16
- pipeline = transformers.pipeline(
17
- "text-generation",
18
- model=model,
19
- tokenizer=tokenizer,
20
- torch_dtype=torch.bfloat16,
21
- trust_remote_code=True,
22
- device_map="auto",
23
- )
24
- sequences = pipeline(
25
- "Girafatron is obsessed with giraffes, the most glorious animal on the face of this Earth. Giraftron believes all other animals are irrelevant when compared to the glorious majesty of the giraffe.\nDaniel: Hello, Girafatron!\nGirafatron:",
26
- max_length=200,
27
- do_sample=True,
28
- top_k=10,
29
- num_return_sequences=1,
30
- eos_token_id=tokenizer.eos_token_id,
31
- )
32
- st.title("Beyond the Anti-Jam: Integration of DRL with LLM")
33
- for seq in sequences:
34
- st.write(f"Result: {seq['generated_text']}")
35
 
36
 
37
  def perform_training(jammer_type, channel_switching_cost):
 
9
  from transformers import AutoModelForCausalLM, AutoTokenizer
10
  import torch
11
 
12
+ #
13
+ # model = "tiiuae/falcon-7b-instruct"
14
+ #
15
+ # tokenizer = AutoTokenizer.from_pretrained(model)
16
+ # pipeline = transformers.pipeline(
17
+ # "text-generation",
18
+ # model=model,
19
+ # tokenizer=tokenizer,
20
+ # torch_dtype=torch.bfloat16,
21
+ # trust_remote_code=True,
22
+ # device_map="auto",
23
+ # )
24
+ # sequences = pipeline(
25
+ # "Girafatron is obsessed with giraffes, the most glorious animal on the face of this Earth. Giraftron believes all other animals are irrelevant when compared to the glorious majesty of the giraffe.\nDaniel: Hello, Girafatron!\nGirafatron:",
26
+ # max_length=200,
27
+ # do_sample=True,
28
+ # top_k=10,
29
+ # num_return_sequences=1,
30
+ # eos_token_id=tokenizer.eos_token_id,
31
+ # )
32
+ # st.title("Beyond the Anti-Jam: Integration of DRL with LLM")
33
+ # for seq in sequences:
34
+ # st.write(f"Result: {seq['generated_text']}")
35
 
36
 
37
  def perform_training(jammer_type, channel_switching_cost):
requirements.txt CHANGED
@@ -3,7 +3,7 @@ tensorflow
3
  matplotlib
4
  gym
5
  streamlit
6
- transformers==4.6.0
7
  torch
8
  einops
9
  accelerate
 
3
  matplotlib
4
  gym
5
  streamlit
6
+ transformers
7
  torch
8
  einops
9
  accelerate