asataura commited on
Commit
76efa5e
·
1 Parent(s): a86d2bc

Adding the description of the app

Browse files
Files changed (1) hide show
  1. trainer.py +10 -1
trainer.py CHANGED
@@ -24,6 +24,15 @@ llm_chain = LLMChain(prompt=prompt, verbose=True, llm=llm)
24
 
25
 
26
  def train(jammer_type, channel_switching_cost):
 
 
 
 
 
 
 
 
 
27
  st.subheader("DRL Training Progress")
28
  progress_bar = st.progress(0)
29
  status_text = st.empty()
@@ -35,7 +44,7 @@ def train(jammer_type, channel_switching_cost):
35
  s_size = ob_space.shape[0]
36
  a_size = ac_space.n
37
  max_env_steps = 100
38
- TRAIN_Episodes = 5
39
  env._max_episode_steps = max_env_steps
40
 
41
  epsilon = 1.0
 
24
 
25
 
26
  def train(jammer_type, channel_switching_cost):
27
+ st.markdown("""
28
+ In this demonstration, we address the challenge of mitigating jamming attacks using Deep Reinforcement Learning (DRL).
29
+ The process comprises three main steps:
30
+
31
+ 1. **DRL Training**: An agent is trained using DRL to tackle jamming attacks.
32
+ 2. **Training Performance Visualization**: Post-training, the performance metrics (rewards, exploration rate, etc.) are visualized to assess the agent's proficiency.
33
+ 3. **Insights Generation with Falcon 7B LLM**: Leveraging the Falcon 7B LLM, we generate insights from the training graphs, elucidating the agent's behavior and achievements.
34
+
35
+ """, unsafe_allow_html=True)
36
  st.subheader("DRL Training Progress")
37
  progress_bar = st.progress(0)
38
  status_text = st.empty()
 
44
  s_size = ob_space.shape[0]
45
  a_size = ac_space.n
46
  max_env_steps = 100
47
+ TRAIN_Episodes = 50
48
  env._max_episode_steps = max_env_steps
49
 
50
  epsilon = 1.0