Neurolingua commited on
Commit
e5ecf3c
1 Parent(s): 6451a8d

Update other_function.py

Browse files
Files changed (1) hide show
  1. other_function.py +29 -40
other_function.py CHANGED
@@ -11,12 +11,7 @@ import os
11
  from pypdf import PdfReader
12
  from ai71 import AI71
13
  import os
14
- from selenium import webdriver
15
- from selenium.webdriver.chrome.service import Service
16
- from selenium.webdriver.chrome.options import Options
17
- from selenium.webdriver.common.by import By
18
- from selenium.webdriver.support.ui import WebDriverWait
19
- from selenium.webdriver.support import expected_conditions as EC
20
  import pandas as pd
21
 
22
  from inference_sdk import InferenceHTTPClient
@@ -108,46 +103,40 @@ def get_weather(city):
108
 
109
  soup=BeautifulSoup(r.text,'html.parser')
110
  temperature=soup.find('div',attrs={'class':'BNeawe iBp4i AP7Wnd'}).text
111
- degree=temperature[:-2]
112
- celcius=str(round((int(degree) - 32)* 5/9,1))+temperature[-2]+'C'
113
- return (celcius)
114
 
115
 
 
 
 
 
 
116
 
 
 
117
 
118
- from twisted.internet import reactor, defer
119
- from scrapy.crawler import CrawlerRunner
120
- from scrapy.utils.project import get_project_settings
121
- import scrapy
122
 
123
- class RateSpider(scrapy.Spider):
124
- name = 'rates'
125
- start_urls = ['https://www.kisandeals.com/mandiprices/ALL/TAMIL-NADU/ALL']
 
126
 
127
- def parse(self, response):
128
- rows = response.xpath('//table/tbody/tr')
129
  data = {}
130
  for row in rows:
131
- commodity = row.xpath('td[1]/text()').get()
132
- price = row.xpath('td[2]/text()').get()
133
- data[commodity] = price
134
- yield data
135
-
136
- def get_rates():
137
- runner = CrawlerRunner(get_project_settings())
138
- deferred = runner.crawl(RateSpider)
139
-
140
- @defer.inlineCallbacks
141
- def wait_for_results():
142
- yield deferred
143
- reactor.stop()
144
-
145
- reactor.callWhenRunning(wait_for_results)
146
- reactor.run()
147
-
148
- # Since we're using reactor.stop(), Scrapy won't return the result immediately.
149
- # You may need to capture results using global variables or another method if needed.
150
 
151
- # Dummy data for demonstration purposes; replace with actual data handling logic.
152
- result = 'Dummy data: These prices are for 1 kg'
153
- return result
 
11
  from pypdf import PdfReader
12
  from ai71 import AI71
13
  import os
14
+
 
 
 
 
 
15
  import pandas as pd
16
 
17
  from inference_sdk import InferenceHTTPClient
 
103
 
104
  soup=BeautifulSoup(r.text,'html.parser')
105
  temperature=soup.find('div',attrs={'class':'BNeawe iBp4i AP7Wnd'}).text
106
+
107
+ return (temperature)
 
108
 
109
 
110
+ from zenrows import ZenRowsClient
111
+ from bs4 import BeautifulSoup
112
+ Zenrow_api=os.environ.get('Zenrow_api')
113
+ # Initialize ZenRows client with your API key
114
+ client = ZenRowsClient(str(Zenrow_api))
115
 
116
+ def get_rates(): # URL to scrape
117
+ url = "https://www.kisandeals.com/mandiprices/ALL/TAMIL-NADU/ALL"
118
 
119
+ # Fetch the webpage content using ZenRows
120
+ response = client.get(url)
 
 
121
 
122
+ # Check if the request was successful
123
+ if response.status_code == 200:
124
+ # Parse the raw HTML content with BeautifulSoup
125
+ soup = BeautifulSoup(response.content, 'html.parser')
126
 
127
+ # Find the table rows containing the data
128
+ rows = soup.select('table tbody tr')
129
  data = {}
130
  for row in rows:
131
+ # Extract commodity and price using BeautifulSoup
132
+ columns = row.find_all('td')
133
+ if len(columns) >= 2:
134
+ commodity = columns[0].get_text(strip=True)
135
+ price = columns[1].get_text(strip=True)
136
+ if '₹' in price:
137
+ data[commodity] = price
138
+ return str(data)+" This are the prices for 1 kg"
139
+
140
+
141
+
 
 
 
 
 
 
 
 
142