Neurolingua commited on
Commit
5323742
·
verified ·
1 Parent(s): c74b0db

Update other_function.py

Browse files
Files changed (1) hide show
  1. other_function.py +35 -15
other_function.py CHANGED
@@ -111,26 +111,46 @@ def get_weather(city):
111
  degree=temperature[:-2]
112
  celcius=str(round((int(degree) - 32)* 5/9,1))+temperature[-2]+'C'
113
  return (celcius)
114
-
115
  import scrapy
116
- from scrapy.selector import Selector
117
- from urllib.parse import urljoin
118
  import pandas as pd
119
 
120
- class KisanDealsSpider(scrapy.Spider):
121
- name = 'kisan_deals'
122
  start_urls = ['https://www.kisandeals.com/mandiprices/ALL/TAMIL-NADU/ALL']
123
 
124
  def parse(self, response):
125
- table = response.xpath('//table')
126
- rows = table.xpath('//tr')
127
 
 
128
  data = []
129
- for row in rows[1:]: # Skip the header row
130
- crop = row.xpath('//td[1]/text()').get()
131
- rate = row.xpath('//td[2]/text()').get()
132
- data.append({'crop': crop, 'rate': rate})
133
-
134
- df = pd.DataFrame(data)
135
- df = df.drop(columns=['Quintal Price'])
136
- return str(df.to_dict('index')) + ' These prices are for 1 kg'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
111
  degree=temperature[:-2]
112
  celcius=str(round((int(degree) - 32)* 5/9,1))+temperature[-2]+'C'
113
  return (celcius)
 
114
  import scrapy
115
+ from scrapy.crawler import CrawlerProcess
 
116
  import pandas as pd
117
 
118
+ class RatesSpider(scrapy.Spider):
119
+ name = "rates_spider"
120
  start_urls = ['https://www.kisandeals.com/mandiprices/ALL/TAMIL-NADU/ALL']
121
 
122
  def parse(self, response):
123
+ # Extract the table data
124
+ table_rows = response.xpath('//table/tbody/tr')
125
 
126
+ # Initialize a list to hold the data
127
  data = []
128
+
129
+ for row in table_rows:
130
+ # Extract the commodity name and price per kg
131
+ commodity_name = row.xpath('td[1]//text()').get().strip()
132
+ price_per_kg = row.xpath('td[2]//text()').get().strip()
133
+
134
+ # Append the data to the list
135
+ data.append((commodity_name, price_per_kg))
136
+
137
+ # Convert the data to a Pandas DataFrame
138
+ df = pd.DataFrame(data, columns=['Commodity', 'Price per kg'])
139
+ # Convert the DataFrame to a dictionary
140
+ rate_dict = df.set_index('Commodity')['Price per kg'].to_dict()
141
+
142
+ # Return the scraped rates
143
+ return rate_dict+' This is prices for 1 kg'
144
+
145
+ def get_rates():
146
+ process = CrawlerProcess({
147
+ 'USER_AGENT': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:89.0) Gecko/20100101 Firefox/89.0',
148
+ 'LOG_LEVEL': 'ERROR', # Suppress logging to keep the output clean
149
+ })
150
+
151
+ spider = RatesSpider()
152
+ process.crawl(spider)
153
+ process.start() # This will block until the spider is done
154
+
155
+ # Get the scraped data from the spider
156
+ return spider.parse(None)