index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
8,305 | feapder.core.scheduler | __add_task | null | def __add_task(self):
# 判断任务池中属否还有任务,若有接着抓取
todo_task_count = self._collector.get_requests_count()
if todo_task_count:
log.info("检查到有待做任务 %s 条,不重下发新任务,将接着上回异常终止处继续抓取" % todo_task_count)
else:
for parser in self._parsers:
# 启动parser 的 start_requests
results = parser.start_requests()
# 添加request到请求队列,由请求队列统一入库
if results and not isinstance(results, Iterable):
raise Exception("%s.%s返回值必须可迭代" % (parser.name, "start_requests"))
result_type = 1
for result in results or []:
if isinstance(result, Request):
result.parser_name = result.parser_name or parser.name
self._request_buffer.put_request(result)
result_type = 1
elif isinstance(result, Item):
self._item_buffer.put_item(result)
result_type = 2
elif callable(result): # callbale的request可能是更新数据库操作的函数
if result_type == 1:
self._request_buffer.put_request(result)
else:
self._item_buffer.put_item(result)
else:
raise TypeError(
"start_requests yield result type error, expect Request、Item、callback func, bug get type: {}".format(
type(result)
)
)
self._request_buffer.flush()
self._item_buffer.flush()
| (self) |
8,306 | feapder.core.spiders.batch_spider | __init__ |
@summary: 批次爬虫
必要条件
1、需有任务表
任务表中必须有id 及 任务状态字段 如 state。如指定parser_name字段,则任务会自动下发到对应的parser下, 否则会下发到所有的parser下。其他字段可根据爬虫需要的参数自行扩充
参考建表语句如下:
CREATE TABLE `table_name` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`param` varchar(1000) DEFAULT NULL COMMENT '爬虫需要的抓取数据需要的参数',
`state` int(11) DEFAULT NULL COMMENT '任务状态',
`parser_name` varchar(255) DEFAULT NULL COMMENT '任务解析器的脚本类名',
PRIMARY KEY (`id`),
UNIQUE KEY `nui` (`param`) USING BTREE
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
2、需有批次记录表 不存在自动创建
---------
@param task_table: mysql中的任务表
@param batch_record_table: mysql 中的批次记录表
@param batch_name: 批次采集程序名称
@param batch_interval: 批次间隔 天为单位。 如想一小时一批次,可写成1/24
@param task_keys: 需要获取的任务字段 列表 [] 如需指定解析的parser,则需将parser_name字段取出来。
@param task_state: mysql中任务表的任务状态字段
@param min_task_count: redis 中最少任务数, 少于这个数量会从mysql的任务表取任务
@param check_task_interval: 检查是否还有任务的时间间隔;
@param task_limit: 从数据库中取任务的数量
@param redis_key: 任务等数据存放在redis中的key前缀
@param thread_count: 线程数,默认为配置文件中的线程数
@param begin_callback: 爬虫开始回调函数
@param end_callback: 爬虫结束回调函数
@param delete_keys: 爬虫启动时删除的key,类型: 元组/bool/string。 支持正则; 常用于清空任务队列,否则重启时会断点续爬
@param keep_alive: 爬虫是否常驻,默认否
@param auto_start_next_batch: 本批次结束后,且下一批次时间已到达时,是否自动启动下一批次,默认是
@param related_redis_key: 有关联的其他爬虫任务表(redis)注意:要避免环路 如 A -> B & B -> A 。
@param related_batch_record: 有关联的其他爬虫批次表(mysql)注意:要避免环路 如 A -> B & B -> A 。
related_redis_key 与 related_batch_record 选其一配置即可;用于相关联的爬虫没结束时,本爬虫也不结束
若相关连的爬虫为批次爬虫,推荐以related_batch_record配置,
若相关连的爬虫为普通爬虫,无批次表,可以以related_redis_key配置
@param task_condition: 任务条件 用于从一个大任务表中挑选出数据自己爬虫的任务,即where后的条件语句
@param task_order_by: 取任务时的排序条件 如 id desc
---------
@result:
| def __init__(
self,
task_table,
batch_record_table,
batch_name,
batch_interval,
task_keys,
task_state="state",
min_task_count=10000,
check_task_interval=5,
task_limit=10000,
related_redis_key=None,
related_batch_record=None,
task_condition="",
task_order_by="",
redis_key=None,
thread_count=None,
begin_callback=None,
end_callback=None,
delete_keys=(),
keep_alive=None,
auto_start_next_batch=True,
**kwargs,
):
"""
@summary: 批次爬虫
必要条件
1、需有任务表
任务表中必须有id 及 任务状态字段 如 state。如指定parser_name字段,则任务会自动下发到对应的parser下, 否则会下发到所有的parser下。其他字段可根据爬虫需要的参数自行扩充
参考建表语句如下:
CREATE TABLE `table_name` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`param` varchar(1000) DEFAULT NULL COMMENT '爬虫需要的抓取数据需要的参数',
`state` int(11) DEFAULT NULL COMMENT '任务状态',
`parser_name` varchar(255) DEFAULT NULL COMMENT '任务解析器的脚本类名',
PRIMARY KEY (`id`),
UNIQUE KEY `nui` (`param`) USING BTREE
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
2、需有批次记录表 不存在自动创建
---------
@param task_table: mysql中的任务表
@param batch_record_table: mysql 中的批次记录表
@param batch_name: 批次采集程序名称
@param batch_interval: 批次间隔 天为单位。 如想一小时一批次,可写成1/24
@param task_keys: 需要获取的任务字段 列表 [] 如需指定解析的parser,则需将parser_name字段取出来。
@param task_state: mysql中任务表的任务状态字段
@param min_task_count: redis 中最少任务数, 少于这个数量会从mysql的任务表取任务
@param check_task_interval: 检查是否还有任务的时间间隔;
@param task_limit: 从数据库中取任务的数量
@param redis_key: 任务等数据存放在redis中的key前缀
@param thread_count: 线程数,默认为配置文件中的线程数
@param begin_callback: 爬虫开始回调函数
@param end_callback: 爬虫结束回调函数
@param delete_keys: 爬虫启动时删除的key,类型: 元组/bool/string。 支持正则; 常用于清空任务队列,否则重启时会断点续爬
@param keep_alive: 爬虫是否常驻,默认否
@param auto_start_next_batch: 本批次结束后,且下一批次时间已到达时,是否自动启动下一批次,默认是
@param related_redis_key: 有关联的其他爬虫任务表(redis)注意:要避免环路 如 A -> B & B -> A 。
@param related_batch_record: 有关联的其他爬虫批次表(mysql)注意:要避免环路 如 A -> B & B -> A 。
related_redis_key 与 related_batch_record 选其一配置即可;用于相关联的爬虫没结束时,本爬虫也不结束
若相关连的爬虫为批次爬虫,推荐以related_batch_record配置,
若相关连的爬虫为普通爬虫,无批次表,可以以related_redis_key配置
@param task_condition: 任务条件 用于从一个大任务表中挑选出数据自己爬虫的任务,即where后的条件语句
@param task_order_by: 取任务时的排序条件 如 id desc
---------
@result:
"""
Scheduler.__init__(
self,
redis_key=redis_key,
thread_count=thread_count,
begin_callback=begin_callback,
end_callback=end_callback,
delete_keys=delete_keys,
keep_alive=keep_alive,
auto_start_requests=False,
batch_interval=batch_interval,
task_table=task_table,
**kwargs,
)
self._redisdb = RedisDB()
self._mysqldb = MysqlDB()
self._task_table = task_table # mysql中的任务表
self._batch_record_table = batch_record_table # mysql 中的批次记录表
self._batch_name = batch_name # 批次采集程序名称
self._task_keys = task_keys # 需要获取的任务字段
self._task_state = task_state # mysql中任务表的state字段名
self._min_task_count = min_task_count # redis 中最少任务数
self._check_task_interval = check_task_interval
self._task_limit = task_limit # mysql中一次取的任务数量
self._related_task_tables = [
setting.TAB_REQUESTS.format(redis_key=redis_key)
] # 自己的task表也需要检查是否有任务
if related_redis_key:
self._related_task_tables.append(
setting.TAB_REQUESTS.format(redis_key=related_redis_key)
)
self._related_batch_record = related_batch_record
self._task_condition = task_condition
self._task_condition_prefix_and = task_condition and " and {}".format(
task_condition
)
self._task_condition_prefix_where = task_condition and " where {}".format(
task_condition
)
self._task_order_by = task_order_by and " order by {}".format(task_order_by)
self._auto_start_next_batch = auto_start_next_batch
self._batch_date_cache = None
if self._batch_interval >= 1:
self._date_format = "%Y-%m-%d"
elif self._batch_interval < 1 and self._batch_interval >= 1 / 24:
self._date_format = "%Y-%m-%d %H"
else:
self._date_format = "%Y-%m-%d %H:%M"
self._is_more_parsers = True # 多模版类爬虫
# 初始化每个配置的属性
self._spider_last_done_time = None # 爬虫最近已做任务数量时间
self._spider_last_done_count = None # 爬虫最近已做任务数量
self._spider_deal_speed_cached = None
self._batch_timeout = False # 批次是否超时或将要超时
# 重置任务
self.reset_task()
| (self, task_table, batch_record_table, batch_name, batch_interval, task_keys, task_state='state', min_task_count=10000, check_task_interval=5, task_limit=10000, related_redis_key=None, related_batch_record=None, task_condition='', task_order_by='', redis_key=None, thread_count=None, begin_callback=None, end_callback=None, delete_keys=(), keep_alive=None, auto_start_next_batch=True, **kwargs) |
8,315 | feapder.core.scheduler | _start | null | def _start(self):
self.spider_begin()
# 将失败的item入库
if setting.RETRY_FAILED_ITEMS:
handle_failed_items = HandleFailedItems(
redis_key=self._redis_key,
task_table=self._task_table,
item_buffer=self._item_buffer,
)
handle_failed_items.reput_failed_items_to_db()
# 心跳开始
self.heartbeat_start()
# 启动request_buffer
self._request_buffer.start()
# 启动item_buffer
self._item_buffer.start()
# 启动collector
self._collector.start()
# 启动parser control
for i in range(self._thread_count):
parser_control = self._parser_control_obj(
self._collector,
self._redis_key,
self._request_buffer,
self._item_buffer,
)
for parser in self._parsers:
parser_control.add_parser(parser)
parser_control.start()
self._parser_controls.append(parser_control)
# 下发任务 因为时间可能比较长,放到最后面
if setting.RETRY_FAILED_REQUESTS:
# 重设失败的任务, 不用加锁,原子性操作
handle_failed_requests = HandleFailedRequests(self._redis_key)
handle_failed_requests.reput_failed_requests_to_requests()
# 下发新任务
if self._auto_start_requests: # 自动下发
if self.wait_lock:
# 将添加任务处加锁,防止多进程之间添加重复的任务
with RedisLock(key=self._spider_name) as lock:
if lock.locked:
self.__add_task()
else:
self.__add_task()
| (self) |
8,317 | feapder.core.scheduler | _stop_all_thread | null | def _stop_all_thread(self):
self._request_buffer.stop()
self._item_buffer.stop()
# 停止 collector
self._collector.stop()
# 停止 parser_controls
for parser_control in self._parser_controls:
parser_control.stop()
self.heartbeat_stop()
self._started.clear()
| (self) |
8,319 | feapder.core.spiders.batch_spider | add_parser | null | def add_parser(self, parser, **kwargs):
parser = parser(
self._task_table,
self._batch_record_table,
self._task_state,
self._date_format,
self._mysqldb,
**kwargs,
) # parser 实例化
self._parsers.append(parser)
| (self, parser, **kwargs) |
8,321 | feapder.core.scheduler | all_thread_is_done | null | def all_thread_is_done(self):
# 降低偶然性, 因为各个环节不是并发的,很有可能当时状态为假,但检测下一条时该状态为真。一次检测很有可能遇到这种偶然性
for i in range(3):
# 检测 collector 状态
if (
self._collector.is_collector_task()
or self._collector.get_requests_count() > 0
):
return False
# 检测 parser_control 状态
for parser_control in self._parser_controls:
if not parser_control.is_not_task():
return False
# 检测 item_buffer 状态
if (
self._item_buffer.get_items_count() > 0
or self._item_buffer.is_adding_to_db()
):
return False
# 检测 request_buffer 状态
if (
self._request_buffer.get_requests_count() > 0
or self._request_buffer.is_adding_to_db()
):
return False
tools.delay_time(1)
return True
| (self) |
8,322 | feapder.core.spiders.batch_spider | check_batch |
@summary: 检查批次是否完成
---------
@param: is_first_check 是否为首次检查,若首次检查,且检查结果为批次已完成,则不发送批次完成消息。因为之前发送过了
---------
@result: 完成返回True 否则False
| def check_batch(self, is_first_check=False):
"""
@summary: 检查批次是否完成
---------
@param: is_first_check 是否为首次检查,若首次检查,且检查结果为批次已完成,则不发送批次完成消息。因为之前发送过了
---------
@result: 完成返回True 否则False
"""
sql = 'select date_format(batch_date, "{date_format}"), total_count, done_count, is_done from {batch_record_table} order by id desc limit 1'.format(
date_format=self._date_format.replace(":%M", ":%i"),
batch_record_table=self._batch_record_table,
)
batch_info = self._mysqldb.find(sql) # (('批次时间', 总量, 完成量, 批次是否完成),)
if batch_info:
batch_date, total_count, done_count, is_done = batch_info[0]
now_date = datetime.datetime.now()
last_batch_date = datetime.datetime.strptime(batch_date, self._date_format)
time_difference = now_date - last_batch_date
if total_count == done_count and time_difference < datetime.timedelta(
days=self._batch_interval
): # 若在本批次内,再次检查任务表是否有新增任务
# # 改成查询任务表 看是否真的没任务了,因为batch_record表里边的数量可能没来得及更新
task_count = self.__get_task_state_count()
total_count = task_count.get("total_count")
done_count = task_count.get("done_count")
if total_count == done_count:
if not is_done:
# 检查相关联的爬虫是否完成
related_spider_is_done = self.related_spider_is_done()
if related_spider_is_done is False:
msg = "《{}》本批次未完成, 正在等待依赖爬虫 {} 结束. 批次时间 {} 批次进度 {}/{}".format(
self._batch_name,
self._related_batch_record or self._related_task_tables,
batch_date,
done_count,
total_count,
)
log.info(msg)
# 检查是否超时 超时发出报警
if time_difference >= datetime.timedelta(
days=self._batch_interval
): # 已经超时
self.send_msg(
msg,
level="error",
message_prefix="《{}》本批次未完成, 正在等待依赖爬虫 {} 结束".format(
self._batch_name,
self._related_batch_record
or self._related_task_tables,
),
)
self._batch_timeout = True
return False
else:
self.update_is_done()
msg = "《{}》本批次完成 批次时间 {} 共处理 {} 条任务".format(
self._batch_name, batch_date, done_count
)
log.info(msg)
if not is_first_check:
if self._batch_timeout: # 之前报警过已超时,现在已完成,发出恢复消息
self._batch_timeout = False
self.send_msg(msg, level="error")
else:
self.send_msg(msg)
# 判断下一批次是否到
if time_difference >= datetime.timedelta(days=self._batch_interval):
if not is_first_check and not self._auto_start_next_batch:
return True # 下一批次不开始。因为设置了不自动开始下一批次
msg = "《{}》下一批次开始".format(self._batch_name)
log.info(msg)
self.send_msg(msg)
# 初始化任务表状态
if self.init_task() != False: # 更新失败返回False 其他返回True/None
# 初始化属性
self.init_batch_property()
is_success = (
self.record_batch()
) # 有可能插入不成功,但是任务表已经重置了,不过由于当前时间为下一批次的时间,检查批次是否结束时不会检查任务表,所以下次执行时仍然会重置
if is_success:
# 看是否有等待任务的worker,若有则需要等会再下发任务,防止work批次时间没来得及更新
if self.have_alive_spider():
log.info(
f"插入新批次记录成功,检测到有爬虫进程在等待任务,本批任务1分钟后开始下发, 防止爬虫端缓存的批次时间没来得及更新"
)
tools.delay_time(60)
else:
log.info("插入新批次记录成功")
return False # 下一批次开始
else:
return True # 下一批次不开始。先不派发任务,因为批次表新批次插入失败了,需要插入成功后再派发任务
else:
log.info("《{}》下次批次时间未到".format(self._batch_name))
if not is_first_check:
self.send_msg("《{}》下次批次时间未到".format(self._batch_name))
return True
else:
if time_difference >= datetime.timedelta(
days=self._batch_interval
): # 已经超时
time_out = time_difference - datetime.timedelta(
days=self._batch_interval
)
time_out_pretty = tools.format_seconds(time_out.total_seconds())
msg = "《{}》本批次已超时{} 批次时间 {}, 批次进度 {}/{}".format(
self._batch_name,
time_out_pretty,
batch_date,
done_count,
total_count,
)
if self._batch_interval >= 1:
msg += ", 期望时间{}天".format(self._batch_interval)
else:
msg += ", 期望时间{}小时".format(self._batch_interval * 24)
result = self.get_deal_speed(
total_count=total_count,
done_count=done_count,
last_batch_date=last_batch_date,
)
if result:
(
deal_speed,
need_time,
overflow_time,
calculate_speed_time,
) = result
msg += ", 任务处理速度于{}统计, 约 {}条/小时, 预计还需 {}".format(
calculate_speed_time,
deal_speed,
tools.format_seconds(need_time),
)
if overflow_time > 0:
msg += ", 该批次预计总超时 {}, 请及时处理".format(
tools.format_seconds(overflow_time)
)
log.info(msg)
self.send_msg(
msg,
level="error",
message_prefix="《{}》批次超时".format(self._batch_name),
)
self._batch_timeout = True
else: # 未超时
remaining_time = (
datetime.timedelta(days=self._batch_interval) - time_difference
)
remaining_time_pretty = tools.format_seconds(
remaining_time.total_seconds()
)
if self._batch_interval >= 1:
msg = "《{}》本批次正在进行, 批次时间 {}, 批次进度 {}/{}, 期望时间{}天, 剩余{}".format(
self._batch_name,
batch_date,
done_count,
total_count,
self._batch_interval,
remaining_time_pretty,
)
else:
msg = "《{}》本批次正在进行, 批次时间 {}, 批次进度 {}/{}, 期望时间{}小时, 剩余{}".format(
self._batch_name,
batch_date,
done_count,
total_count,
self._batch_interval * 24,
remaining_time_pretty,
)
result = self.get_deal_speed(
total_count=total_count,
done_count=done_count,
last_batch_date=last_batch_date,
)
if result:
(
deal_speed,
need_time,
overflow_time,
calculate_speed_time,
) = result
msg += ", 任务处理速度于{}统计, 约 {}条/小时, 预计还需 {}".format(
calculate_speed_time,
deal_speed,
tools.format_seconds(need_time),
)
if overflow_time > 0:
msg += ", 该批次可能会超时 {}, 请及时处理".format(
tools.format_seconds(overflow_time)
)
# 发送警报
self.send_msg(
msg,
level="error",
message_prefix="《{}》批次可能超时".format(self._batch_name),
)
self._batch_timeout = True
elif overflow_time < 0:
msg += ", 该批次预计提前 {} 完成".format(
tools.format_seconds(-overflow_time)
)
log.info(msg)
else:
# 插入batch_date
self.record_batch()
# 初始化任务表状态 可能有产生任务的代码
self.init_task()
return False
| (self, is_first_check=False) |
8,323 | feapder.core.scheduler | check_task_status |
检查任务状态 预警
| def __init__(
self,
redis_key=None,
thread_count=None,
begin_callback=None,
end_callback=None,
delete_keys=(),
keep_alive=None,
auto_start_requests=None,
batch_interval=0,
wait_lock=True,
task_table=None,
**kwargs,
):
"""
@summary: 调度器
---------
@param redis_key: 爬虫request及item存放redis中的文件夹
@param thread_count: 线程数,默认为配置文件中的线程数
@param begin_callback: 爬虫开始回调函数
@param end_callback: 爬虫结束回调函数
@param delete_keys: 爬虫启动时删除的key,类型: 元组/bool/string。 支持正则
@param keep_alive: 爬虫是否常驻,默认否
@param auto_start_requests: 爬虫是否自动添加任务
@param batch_interval: 抓取时间间隔 默认为0 天为单位 多次启动时,只有当前时间与第一次抓取结束的时间间隔大于指定的时间间隔时,爬虫才启动
@param wait_lock: 下发任务时否等待锁,若不等待锁,可能会存在多进程同时在下发一样的任务,因此分布式环境下请将该值设置True
@param task_table: 任务表, 批次爬虫传递
---------
@result:
"""
super(Scheduler, self).__init__()
for key, value in self.__class__.__custom_setting__.items():
if key == "AUTO_STOP_WHEN_SPIDER_DONE": # 兼容老版本的配置
setattr(setting, "KEEP_ALIVE", not value)
else:
setattr(setting, key, value)
self._redis_key = redis_key or setting.REDIS_KEY
if not self._redis_key:
raise Exception(
"""
redis_key 为redis中存放request与item的目录。不能为空,
可在setting中配置,如 REDIS_KEY = 'test'
或spider初始化时传参, 如 TestSpider(redis_key='test')
"""
)
self._request_buffer = RequestBuffer(redis_key)
self._item_buffer = ItemBuffer(redis_key, task_table)
self._collector = Collector(redis_key)
self._parsers = []
self._parser_controls = []
self._parser_control_obj = ParserControl
# 兼容老版本的参数
if "auto_stop_when_spider_done" in kwargs:
self._keep_alive = not kwargs.get("auto_stop_when_spider_done")
else:
self._keep_alive = (
keep_alive if keep_alive is not None else setting.KEEP_ALIVE
)
self._auto_start_requests = (
auto_start_requests
if auto_start_requests is not None
else setting.SPIDER_AUTO_START_REQUESTS
)
self._batch_interval = batch_interval
self._begin_callback = (
begin_callback
if begin_callback
else lambda: log.info("\n********** feapder begin **********")
)
self._end_callback = (
end_callback
if end_callback
else lambda: log.info("\n********** feapder end **********")
)
if thread_count:
setattr(setting, "SPIDER_THREAD_COUNT", thread_count)
self._thread_count = setting.SPIDER_THREAD_COUNT
self._spider_name = self.name
self._task_table = task_table
self._tab_spider_status = setting.TAB_SPIDER_STATUS.format(redis_key=redis_key)
self._tab_requests = setting.TAB_REQUESTS.format(redis_key=redis_key)
self._tab_failed_requests = setting.TAB_FAILED_REQUESTS.format(
redis_key=redis_key
)
self._is_notify_end = False # 是否已经通知结束
self._last_task_count = 0 # 最近一次任务数量
self._last_check_task_count_time = 0
self._stop_heartbeat = False # 是否停止心跳
self._redisdb = RedisDB()
# Request 缓存设置
Request.cached_redis_key = redis_key
Request.cached_expire_time = setting.RESPONSE_CACHED_EXPIRE_TIME
delete_keys = delete_keys or setting.DELETE_KEYS
if delete_keys:
self.delete_tables(delete_keys)
self._last_check_task_status_time = 0
self.wait_lock = wait_lock
self.init_metrics()
# 重置丢失的任务
self.reset_task()
self._stop_spider = False
| (self) |
8,325 | feapder.core.spiders.batch_spider | create_batch_record_table | null | def create_batch_record_table(self):
sql = (
"select table_name from information_schema.tables where table_name like '%s'"
% self._batch_record_table
)
tables_name = self._mysqldb.find(sql)
if not tables_name:
sql = """
CREATE TABLE `{table_name}` (
`id` int(11) UNSIGNED NOT NULL AUTO_INCREMENT,
`batch_date` {batch_date} DEFAULT NULL COMMENT '批次时间',
`total_count` int(11) DEFAULT NULL COMMENT '任务总数',
`done_count` int(11) DEFAULT NULL COMMENT '完成数 (1,-1)',
`fail_count` int(11) DEFAULT NULL COMMENT '失败任务数 (-1)',
`interval` float(11) DEFAULT NULL COMMENT '批次间隔',
`interval_unit` varchar(20) DEFAULT NULL COMMENT '批次间隔单位 day, hour',
`create_time` datetime DEFAULT CURRENT_TIMESTAMP COMMENT '批次开始时间',
`update_time` datetime DEFAULT CURRENT_TIMESTAMP COMMENT '本条记录更新时间',
`is_done` int(11) DEFAULT '0' COMMENT '批次是否完成 0 未完成 1 完成',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
""".format(
table_name=self._batch_record_table,
batch_date="datetime",
)
self._mysqldb.execute(sql)
| (self) |
8,326 | feapder.core.scheduler | delete_tables | null | def delete_tables(self, delete_keys):
if delete_keys == True:
delete_keys = [self._redis_key + "*"]
elif not isinstance(delete_keys, (list, tuple)):
delete_keys = [delete_keys]
for delete_key in delete_keys:
if not delete_key.startswith(self._redis_key):
delete_key = self._redis_key + delete_key
keys = self._redisdb.getkeys(delete_key)
for key in keys:
log.debug("正在删除key %s" % key)
self._redisdb.clear(key)
| (self, delete_keys) |
8,327 | feapder.core.spiders.batch_spider | distribute_task |
@summary: 分发任务
---------
@param tasks:
---------
@result:
| def distribute_task(self, tasks):
"""
@summary: 分发任务
---------
@param tasks:
---------
@result:
"""
if self._is_more_parsers: # 为多模版类爬虫,需要下发指定的parser
for task in tasks:
for parser in self._parsers: # 寻找task对应的parser
if parser.name in task:
task = PerfectDict(
_dict=dict(zip(self._task_keys, task)), _values=list(task)
)
requests = parser.start_requests(task)
if requests and not isinstance(requests, Iterable):
raise Exception(
"%s.%s返回值必须可迭代" % (parser.name, "start_requests")
)
result_type = 1
for request in requests or []:
if isinstance(request, Request):
request.parser_name = request.parser_name or parser.name
self._request_buffer.put_request(request)
result_type = 1
elif isinstance(request, Item):
self._item_buffer.put_item(request)
result_type = 2
if (
self._item_buffer.get_items_count()
>= setting.ITEM_MAX_CACHED_COUNT
):
self._item_buffer.flush()
elif callable(request): # callbale的request可能是更新数据库操作的函数
if result_type == 1:
self._request_buffer.put_request(request)
else:
self._item_buffer.put_item(request)
if (
self._item_buffer.get_items_count()
>= setting.ITEM_MAX_CACHED_COUNT
):
self._item_buffer.flush()
else:
raise TypeError(
"start_requests yield result type error, expect Request、Item、callback func, bug get type: {}".format(
type(requests)
)
)
break
else: # task没对应的parser 则将task下发到所有的parser
for task in tasks:
for parser in self._parsers:
task = PerfectDict(
_dict=dict(zip(self._task_keys, task)), _values=list(task)
)
requests = parser.start_requests(task)
if requests and not isinstance(requests, Iterable):
raise Exception(
"%s.%s返回值必须可迭代" % (parser.name, "start_requests")
)
result_type = 1
for request in requests or []:
if isinstance(request, Request):
request.parser_name = request.parser_name or parser.name
self._request_buffer.put_request(request)
result_type = 1
elif isinstance(request, Item):
self._item_buffer.put_item(request)
result_type = 2
if (
self._item_buffer.get_items_count()
>= setting.ITEM_MAX_CACHED_COUNT
):
self._item_buffer.flush()
elif callable(request): # callbale的request可能是更新数据库操作的函数
if result_type == 1:
self._request_buffer.put_request(request)
else:
self._item_buffer.put_item(request)
if (
self._item_buffer.get_items_count()
>= setting.ITEM_MAX_CACHED_COUNT
):
self._item_buffer.flush()
self._request_buffer.flush()
self._item_buffer.flush()
| (self, tasks) |
8,333 | feapder.core.spiders.batch_spider | get_deal_speed |
获取处理速度
@param total_count: 总数量
@param done_count: 做完数量
@param last_batch_date: 批次时间 datetime
@return:
deal_speed (条/小时), need_time (秒), overflow_time(秒) ( overflow_time < 0 时表示提前多少秒完成 )
或
None
| def get_deal_speed(self, total_count, done_count, last_batch_date):
"""
获取处理速度
@param total_count: 总数量
@param done_count: 做完数量
@param last_batch_date: 批次时间 datetime
@return:
deal_speed (条/小时), need_time (秒), overflow_time(秒) ( overflow_time < 0 时表示提前多少秒完成 )
或
None
"""
now_date = datetime.datetime.now()
if self._spider_last_done_count is None:
self._spider_last_done_count = done_count
self._spider_last_done_time = now_date
elif done_count > self._spider_last_done_count:
time_interval = (now_date - self._spider_last_done_time).total_seconds()
deal_speed = (
done_count - self._spider_last_done_count
) / time_interval # 条/秒
need_time = (total_count - done_count) / deal_speed # 单位秒
overflow_time = (
(now_date - last_batch_date).total_seconds()
+ need_time
- datetime.timedelta(days=self._batch_interval).total_seconds()
) # 溢出时间 秒
calculate_speed_time = now_date.strftime("%Y-%m-%d %H:%M:%S") # 统计速度时间
deal_speed = int(deal_speed * 3600) # 条/小时
# 更新最近已做任务数及时间
self._spider_last_done_count = done_count
self._spider_last_done_time = now_date
self._spider_deal_speed_cached = (
deal_speed,
need_time,
overflow_time,
calculate_speed_time,
)
return self._spider_deal_speed_cached
| (self, total_count, done_count, last_batch_date) |
8,334 | feapder.core.spiders.batch_spider | get_doing_task_from_mysql |
@summary: 取正在做的任务
---------
---------
@result:
| def get_doing_task_from_mysql(self):
"""
@summary: 取正在做的任务
---------
---------
@result:
"""
# 查询任务
task_keys = ", ".join([f"`{key}`" for key in self._task_keys])
sql = "select %s from %s where %s = 2%s%s limit %s" % (
task_keys,
self._task_table,
self._task_state,
self._task_condition_prefix_and,
self._task_order_by,
self._task_limit,
)
tasks = self._mysqldb.find(sql)
return tasks
| (self) |
8,335 | feapder.core.spiders.batch_spider | get_lose_task_count | null | def get_lose_task_count(self):
sql = 'select date_format(batch_date, "{date_format}"), total_count, done_count from {batch_record_table} order by id desc limit 1'.format(
date_format=self._date_format.replace(":%M", ":%i"),
batch_record_table=self._batch_record_table,
)
batch_info = self._mysqldb.find(sql) # (('2018-08-19', 49686, 0),)
batch_date, total_count, done_count = batch_info[0]
return total_count - done_count
| (self) |
8,336 | feapder.core.spiders.batch_spider | get_todo_task_from_mysql |
@summary: 取待做的任务
---------
---------
@result:
| def get_todo_task_from_mysql(self):
"""
@summary: 取待做的任务
---------
---------
@result:
"""
# TODO 分批取数据 每批最大取 1000000个,防止内存占用过大
# 查询任务
task_keys = ", ".join([f"`{key}`" for key in self._task_keys])
sql = "select %s from %s where %s = 0%s%s limit %s" % (
task_keys,
self._task_table,
self._task_state,
self._task_condition_prefix_and,
self._task_order_by,
self._task_limit,
)
tasks = self._mysqldb.find(sql)
if tasks:
# 更新任务状态
for i in range(0, len(tasks), 10000): # 10000 一批量更新
task_ids = str(
tuple([task[0] for task in tasks[i : i + 10000]])
).replace(",)", ")")
sql = "update %s set %s = 2 where id in %s" % (
self._task_table,
self._task_state,
task_ids,
)
self._mysqldb.update(sql)
return tasks
| (self) |
8,337 | feapder.core.scheduler | have_alive_spider | null | def have_alive_spider(self, heartbeat_interval=10):
heartbeat_time = self._redisdb.hget(self._tab_spider_status, HEARTBEAT_TIME_KEY)
if heartbeat_time:
heartbeat_time = int(heartbeat_time)
current_timestamp = tools.get_current_timestamp()
if current_timestamp - heartbeat_time < heartbeat_interval:
return True
return False
| (self, heartbeat_interval=10) |
8,338 | feapder.core.scheduler | heartbeat | null | def heartbeat(self):
while not self._stop_heartbeat:
try:
self._redisdb.hset(
self._tab_spider_status,
HEARTBEAT_TIME_KEY,
tools.get_current_timestamp(),
)
except Exception as e:
log.error("心跳异常: {}".format(e))
time.sleep(5)
| (self) |
8,339 | feapder.core.scheduler | heartbeat_start | null | def heartbeat_start(self):
threading.Thread(target=self.heartbeat).start()
| (self) |
8,340 | feapder.core.scheduler | heartbeat_stop | null | def heartbeat_stop(self):
self._stop_heartbeat = True
| (self) |
8,341 | feapder.core.spiders.batch_spider | init_batch_property |
每个批次开始时需要重置的属性
@return:
| def init_batch_property(self):
"""
每个批次开始时需要重置的属性
@return:
"""
self._spider_deal_speed_cached = None
self._spider_last_done_time = None
self._spider_last_done_count = None # 爬虫刚开始启动时已做任务数量
self._batch_timeout = False
| (self) |
8,342 | feapder.core.scheduler | init_metrics |
初始化打点系统
| def init_metrics(self):
"""
初始化打点系统
"""
metrics.init(**setting.METRICS_OTHER_ARGS)
| (self) |
8,343 | feapder.core.spiders.batch_spider | init_task |
@summary: 初始化任务表中的任务, 新一个批次开始时调用。 可能会重写
---------
---------
@result:
| def init_task(self):
"""
@summary: 初始化任务表中的任务, 新一个批次开始时调用。 可能会重写
---------
---------
@result:
"""
sql = "update {task_table} set {state} = 0 where {state} != -1{task_condition}".format(
task_table=self._task_table,
state=self._task_state,
task_condition=self._task_condition_prefix_and,
)
return self._mysqldb.update(sql)
| (self) |
8,346 | feapder.core.scheduler | is_reach_next_spider_time | null | def is_reach_next_spider_time(self):
if not self._batch_interval:
return True
last_spider_end_time = self._redisdb.hget(
self._tab_spider_status, SPIDER_END_TIME_KEY
)
if last_spider_end_time:
last_spider_end_time = int(last_spider_end_time)
current_timestamp = tools.get_current_timestamp()
time_interval = current_timestamp - last_spider_end_time
if time_interval < self._batch_interval * 86400:
log.info(
"上次运行结束时间为 {} 与当前时间间隔 为 {}, 小于规定的抓取时间间隔 {}。爬虫不执行,退出~".format(
tools.timestamp_to_date(last_spider_end_time),
tools.format_seconds(time_interval),
tools.format_seconds(self._batch_interval * 86400),
)
)
return False
return True
| (self) |
8,349 | feapder.core.spiders.batch_spider | record_batch |
@summary: 记录批次信息(初始化)
---------
---------
@result:
| def record_batch(self):
"""
@summary: 记录批次信息(初始化)
---------
---------
@result:
"""
# 查询总任务数
sql = "select count(1) from %s%s" % (
self._task_table,
self._task_condition_prefix_where,
)
total_task_count = self._mysqldb.find(sql)[0][0]
batch_date = tools.get_current_date(self._date_format)
sql = "insert into %s (batch_date, done_count, total_count, `interval`, interval_unit, create_time) values ('%s', %s, %s, %s, '%s', CURRENT_TIME)" % (
self._batch_record_table,
batch_date,
0,
total_task_count,
self._batch_interval
if self._batch_interval >= 1
else self._batch_interval * 24,
"day" if self._batch_interval >= 1 else "hour",
)
affect_count = self._mysqldb.add(sql) # None / 0 / 1 (1 为成功)
if affect_count:
# 重置批次日期
self._batch_date_cache = batch_date
# 重新刷下self.batch_date 中的 os.environ.get('batch_date') 否则日期还停留在上一个批次
os.environ["batch_date"] = self._batch_date_cache
# 爬虫开始
self.spider_begin()
else:
log.error("插入新批次失败")
return affect_count
| (self) |
8,350 | feapder.core.scheduler | record_end_time | null | def record_end_time(self):
# 记录结束时间
if self._batch_interval:
current_timestamp = tools.get_current_timestamp()
self._redisdb.hset(
self._tab_spider_status, SPIDER_END_TIME_KEY, current_timestamp
)
| (self) |
8,351 | feapder.core.spiders.batch_spider | related_spider_is_done |
相关连的爬虫是否跑完
@return: True / False / None 表示无相关的爬虫 可由自身的total_count 和 done_count 来判断
| def related_spider_is_done(self):
"""
相关连的爬虫是否跑完
@return: True / False / None 表示无相关的爬虫 可由自身的total_count 和 done_count 来判断
"""
for related_redis_task_table in self._related_task_tables:
if self._redisdb.exists_key(related_redis_task_table):
return False
if self._related_batch_record:
sql = "select is_done from {} order by id desc limit 1".format(
self._related_batch_record
)
is_done = self._mysqldb.find(sql)
is_done = is_done[0][0] if is_done else None
if is_done is None:
log.warning("相关联的批次表不存在或无批次信息")
return True
if not is_done:
return False
return True
| (self) |
8,352 | feapder.core.spiders.batch_spider | reset_lose_task_from_mysql |
@summary: 重置丢失任务为待做
---------
---------
@result:
| def reset_lose_task_from_mysql(self):
"""
@summary: 重置丢失任务为待做
---------
---------
@result:
"""
sql = "update {table} set {state} = 0 where {state} = 2{task_condition}".format(
table=self._task_table,
state=self._task_state,
task_condition=self._task_condition_prefix_and,
)
return self._mysqldb.update(sql)
| (self) |
8,353 | feapder.core.scheduler | reset_task |
重置丢失的任务
Returns:
| def reset_task(self, heartbeat_interval=10):
"""
重置丢失的任务
Returns:
"""
if self.have_alive_spider(heartbeat_interval=heartbeat_interval):
current_timestamp = tools.get_current_timestamp()
datas = self._redisdb.zrangebyscore_set_score(
self._tab_requests,
priority_min=current_timestamp,
priority_max=current_timestamp + setting.REQUEST_LOST_TIMEOUT,
score=300,
count=None,
)
lose_count = len(datas)
if lose_count:
log.info("重置丢失任务完毕,共{}条".format(len(datas)))
| (self, heartbeat_interval=10) |
8,354 | feapder.core.spiders.batch_spider | run |
@summary: 重写run方法 检查mysql中的任务是否做完, 做完停止
---------
---------
@result:
| def run(self):
"""
@summary: 重写run方法 检查mysql中的任务是否做完, 做完停止
---------
---------
@result:
"""
try:
self.create_batch_record_table()
if not self._parsers: # 不是add_parser 模式
self._parsers.append(self)
self._start()
while True:
try:
if self._stop_spider or (
self.task_is_done() and self.all_thread_is_done()
): # redis全部的任务已经做完 并且mysql中的任务已经做完(检查各个线程all_thread_is_done,防止任务没做完,就更新任务状态,导致程序结束的情况)
if not self._is_notify_end:
self.spider_end()
self._is_notify_end = True
if not self._keep_alive:
self._stop_all_thread()
break
else:
self._is_notify_end = False
self.check_task_status()
except Exception as e:
log.exception(e)
tools.delay_time(10) # 10秒钟检查一次爬虫状态
except Exception as e:
msg = "《%s》主线程异常 爬虫结束 exception: %s" % (self._batch_name, e)
log.error(msg)
self.send_msg(
msg, level="error", message_prefix="《%s》爬虫异常结束".format(self._batch_name)
)
os._exit(137) # 使退出码为35072 方便爬虫管理器重启
| (self) |
8,355 | feapder.core.scheduler | send_msg | null | def send_msg(self, msg, level="debug", message_prefix=""):
# log.debug("发送报警 level:{} msg{}".format(level, msg))
tools.send_msg(msg=msg, level=level, message_prefix=message_prefix)
| (self, msg, level='debug', message_prefix='') |
8,358 | feapder.core.scheduler | spider_begin |
@summary: start_monitor_task 方式启动,此函数与spider_end不在同一进程内,变量不可共享
---------
---------
@result:
| def spider_begin(self):
"""
@summary: start_monitor_task 方式启动,此函数与spider_end不在同一进程内,变量不可共享
---------
---------
@result:
"""
if self._begin_callback:
self._begin_callback()
for parser in self._parsers:
parser.start_callback()
# 记录开始时间
if not self._redisdb.hexists(self._tab_spider_status, SPIDER_START_TIME_KEY):
current_timestamp = tools.get_current_timestamp()
self._redisdb.hset(
self._tab_spider_status, SPIDER_START_TIME_KEY, current_timestamp
)
# 发送消息
self.send_msg("《%s》爬虫开始" % self._spider_name)
| (self) |
8,359 | feapder.core.scheduler | spider_end | null | def spider_end(self):
self.record_end_time()
if self._end_callback:
self._end_callback()
for parser in self._parsers:
if not self._keep_alive:
parser.close()
parser.end_callback()
if not self._keep_alive:
# 关闭webdirver
Request.render_downloader and Request.render_downloader.close_all()
# 关闭打点
metrics.close()
else:
metrics.flush()
# 计算抓取时长
data = self._redisdb.hget(
self._tab_spider_status, SPIDER_START_TIME_KEY, is_pop=True
)
if data:
begin_timestamp = int(data)
spand_time = tools.get_current_timestamp() - begin_timestamp
msg = "《%s》爬虫%s,采集耗时 %s" % (
self._spider_name,
"被终止" if self._stop_spider else "结束",
tools.format_seconds(spand_time),
)
log.info(msg)
self.send_msg(msg)
if self._keep_alive:
log.info("爬虫不自动结束, 等待下一轮任务...")
else:
self.delete_tables(self._tab_spider_status)
| (self) |
8,362 | feapder.core.spiders.batch_spider | start_monitor_task |
@summary: 监控任务状态
---------
---------
@result:
| def start_monitor_task(self):
"""
@summary: 监控任务状态
---------
---------
@result:
"""
if not self._parsers: # 不是多模版模式, 将自己注入到parsers,自己为模版
self._is_more_parsers = False
self._parsers.append(self)
elif len(self._parsers) <= 1:
self._is_more_parsers = False
self.create_batch_record_table()
# 添加任务
for parser in self._parsers:
parser.add_task()
is_first_check = True
while True:
try:
if self.check_batch(is_first_check): # 该批次已经做完
if self._keep_alive:
is_first_check = True
log.info("爬虫所有任务已做完,不自动结束,等待新任务...")
time.sleep(self._check_task_interval)
continue
else:
break
is_first_check = False
# 检查redis中是否有任务 任务小于_min_task_count 则从mysql中取
tab_requests = setting.TAB_REQUESTS.format(redis_key=self._redis_key)
todo_task_count = self._redisdb.zget_count(tab_requests)
tasks = []
if todo_task_count < self._min_task_count: # 从mysql中取任务
# 更新batch表的任务状态数量
self.update_task_done_count()
log.info("redis 中剩余任务%s 数量过小 从mysql中取任务追加" % todo_task_count)
tasks = self.get_todo_task_from_mysql()
if not tasks: # 状态为0的任务已经做完,需要检查状态为2的任务是否丢失
if (
todo_task_count == 0
): # redis 中无待做任务,此时mysql中状态为2的任务为丢失任务。需重新做
lose_task_count = self.get_lose_task_count()
if not lose_task_count:
time.sleep(self._check_task_interval)
continue
elif (
lose_task_count > self._task_limit * 5
): # 丢失任务太多,直接重置,否则每次等redis任务消耗完再取下一批丢失任务,速度过慢
log.info("正在重置丢失任务为待做 共 {} 条".format(lose_task_count))
# 重置正在做的任务为待做
if self.reset_lose_task_from_mysql():
log.info("重置丢失任务成功")
else:
log.info("重置丢失任务失败")
continue
else: # 丢失任务少,直接取
log.info(
"正在取丢失任务 共 {} 条, 取 {} 条".format(
lose_task_count,
self._task_limit
if self._task_limit <= lose_task_count
else lose_task_count,
)
)
tasks = self.get_doing_task_from_mysql()
else:
log.info("mysql 中取到待做任务 %s 条" % len(tasks))
else:
log.info("redis 中尚有%s条积压任务,暂时不派发新任务" % todo_task_count)
if not tasks:
if todo_task_count >= self._min_task_count:
# log.info('任务正在进行 redis中剩余任务 %s' % todo_task_count)
pass
else:
log.info("mysql 中无待做任务 redis中剩余任务 %s" % todo_task_count)
else:
# make start requests
self.distribute_task(tasks)
log.info("添加任务到redis成功")
except Exception as e:
log.exception(e)
time.sleep(self._check_task_interval)
| (self) |
8,365 | feapder.core.spiders.batch_spider | task_is_done |
@summary: 检查任务状态 是否做完 同时更新批次时间 (不能挂 挂了批次时间就不更新了)
---------
---------
@result: True / False (做完 / 未做完)
| def task_is_done(self):
"""
@summary: 检查任务状态 是否做完 同时更新批次时间 (不能挂 挂了批次时间就不更新了)
---------
---------
@result: True / False (做完 / 未做完)
"""
is_done = False
# 查看批次记录表任务状态
sql = 'select date_format(batch_date, "{date_format}"), total_count, done_count, is_done from {batch_record_table} order by id desc limit 1'.format(
date_format=self._date_format.replace(":%M", ":%i"),
batch_record_table=self._batch_record_table,
)
batch_info = self._mysqldb.find(sql)
if batch_info is None:
raise Exception("查询批次信息失败")
if batch_info:
self._batch_date_cache, total_count, done_count, is_done = batch_info[
0
] # 更新self._batch_date_cache, 防止新批次已经开始了,但self._batch_date_cache还是原来的批次时间
log.info(
"《%s》 批次时间%s 批次进度 %s/%s 完成状态 %d"
% (
self._batch_name,
self._batch_date_cache,
done_count,
total_count,
is_done,
)
)
os.environ["batch_date"] = self._batch_date_cache # 更新BatchParser里边的批次时间
if is_done: # 检查任务表中是否有没做的任务 若有则is_done 为 False
# 比较耗时 加锁防止多进程同时查询
with RedisLock(key=self._spider_name) as lock:
if lock.locked:
log.info("批次表标记已完成,正在检查任务表是否有未完成的任务")
sql = "select 1 from %s where (%s = 0 or %s=2)%s limit 1" % (
self._task_table,
self._task_state,
self._task_state,
self._task_condition_prefix_and,
)
tasks = self._mysqldb.find(sql) # [(1,)] / []
if tasks:
log.info("检测到任务表中有未完成任务,等待任务下发")
is_done = False
# 更新batch_record 表的is_done 状态,减少查询任务表的次数
sql = 'update {batch_record_table} set is_done = 0 where batch_date = "{batch_date}"'.format(
batch_record_table=self._batch_record_table,
batch_date=self._batch_date_cache,
)
self._mysqldb.update(sql)
else:
log.info("任务表中任务均已完成,爬虫结束")
else:
log.info("批次表标记已完成,其他爬虫进程正在检查任务表是否有未完成的任务,本进程跳过检查,继续等待")
is_done = False
return is_done
| (self) |
8,366 | feapder.core.spiders.batch_spider | update_is_done | null | def update_is_done(self):
sql = "update {} set is_done = 1, update_time = CURRENT_TIME where batch_date = '{}' and is_done = 0".format(
self._batch_record_table, self.batch_date
)
self._mysqldb.update(sql)
| (self) |
8,369 | feapder.core.spiders.batch_spider | update_task_done_count |
@summary: 更新批次表中的任务状态
---------
---------
@result:
| def update_task_done_count(self):
"""
@summary: 更新批次表中的任务状态
---------
---------
@result:
"""
task_count = self.__get_task_state_count()
# log.info('《%s》 批次进度 %s/%s' % (self._batch_name, done_task_count, total_task_count))
# 更新批次表
sql = "update {} set done_count = {}, total_count = {}, fail_count = {}, update_time = CURRENT_TIME, is_done=0, `interval` = {}, interval_unit = '{}' where batch_date = '{}'".format(
self._batch_record_table,
task_count.get("done_count"),
task_count.get("total_count"),
task_count.get("failed_count"),
self._batch_interval
if self._batch_interval >= 1
else self._batch_interval * 24,
"day" if self._batch_interval >= 1 else "hour",
self.batch_date,
)
self._mysqldb.update(sql)
| (self) |
8,372 | feapder.network.item | Item | null | class Item(metaclass=ItemMetaclass):
__unique_key__ = []
def __init__(self, **kwargs):
self.__dict__ = kwargs
def __repr__(self):
return "<{}: {}>".format(self.item_name, tools.dumps_json(self.to_dict))
def __getitem__(self, key):
return self.__dict__[key]
def __setitem__(self, key, value):
self.__dict__[key] = value
def update(self, *args, **kwargs):
"""
更新字段,与字典使用方法一致
"""
self.__dict__.update(*args, **kwargs)
def update_strict(self, *args, **kwargs):
"""
更新严格更新,只更新item中有的字段
"""
for key, value in dict(*args, **kwargs).items():
if key in self.__dict__:
self.__dict__[key] = value
def pre_to_db(self):
"""
入库前的处理
"""
pass
@property
def to_dict(self):
propertys = {}
for key, value in self.__dict__.items():
if key not in (
"__name__",
"__table_name__",
"__name_underline__",
"__update_key__",
"__unique_key__",
):
if key.startswith(f"_{self.__class__.__name__}"):
key = key.replace(f"_{self.__class__.__name__}", "")
propertys[key] = value
return propertys
def to_sql(self, auto_update=False, update_columns=()):
return tools.make_insert_sql(
self.table_name, self.to_dict, auto_update, update_columns
)
@property
def item_name(self):
return self.__name__ or self.__class__.__name__
@item_name.setter
def item_name(self, name):
self.__name__ = name
self.__table_name__ = re.sub("_item$", "", self.name_underline)
@property
def table_name(self):
if not self.__table_name__:
self.__table_name__ = re.sub("_item$", "", self.name_underline)
return self.__table_name__
@table_name.setter
def table_name(self, name):
self.__table_name__ = name
self.__name__ = tools.key2hump(name) + "Item"
@property
def name_underline(self):
if not self.__name_underline__:
self.__name_underline__ = tools.key2underline(self.item_name)
return self.__name_underline__
@name_underline.setter
def name_underline(self, name):
self.__name_underline__ = name
@property
def unique_key(self):
return self.__unique_key__ or self.__class__.__unique_key__
@unique_key.setter
def unique_key(self, keys):
if isinstance(keys, (tuple, list)):
self.__unique_key__ = keys
else:
self.__unique_key__ = (keys,)
@property
def fingerprint(self):
args = []
for key, value in self.to_dict.items():
if value:
if (self.unique_key and key in self.unique_key) or not self.unique_key:
args.append(str(value))
if args:
args = sorted(args)
return tools.get_md5(*args)
else:
return None
def to_UpdateItem(self):
update_item = UpdateItem(**self.__dict__)
update_item.item_name = self.item_name
return update_item
| (**kwargs) |
8,373 | feapder.network.item | __getitem__ | null | def __getitem__(self, key):
return self.__dict__[key]
| (self, key) |
8,374 | feapder.network.item | __init__ | null | def __init__(self, **kwargs):
self.__dict__ = kwargs
| (self, **kwargs) |
8,375 | feapder.network.item | __repr__ | null | def __repr__(self):
return "<{}: {}>".format(self.item_name, tools.dumps_json(self.to_dict))
| (self) |
8,376 | feapder.network.item | __setitem__ | null | def __setitem__(self, key, value):
self.__dict__[key] = value
| (self, key, value) |
8,377 | feapder.network.item | pre_to_db |
入库前的处理
| def pre_to_db(self):
"""
入库前的处理
"""
pass
| (self) |
8,378 | feapder.network.item | to_UpdateItem | null | def to_UpdateItem(self):
update_item = UpdateItem(**self.__dict__)
update_item.item_name = self.item_name
return update_item
| (self) |
8,379 | feapder.network.item | to_sql | null | def to_sql(self, auto_update=False, update_columns=()):
return tools.make_insert_sql(
self.table_name, self.to_dict, auto_update, update_columns
)
| (self, auto_update=False, update_columns=()) |
8,380 | feapder.network.item | update |
更新字段,与字典使用方法一致
| def update(self, *args, **kwargs):
"""
更新字段,与字典使用方法一致
"""
self.__dict__.update(*args, **kwargs)
| (self, *args, **kwargs) |
8,381 | feapder.network.item | update_strict |
更新严格更新,只更新item中有的字段
| def update_strict(self, *args, **kwargs):
"""
更新严格更新,只更新item中有的字段
"""
for key, value in dict(*args, **kwargs).items():
if key in self.__dict__:
self.__dict__[key] = value
| (self, *args, **kwargs) |
8,382 | feapder.network.request | Request | null | class Request:
user_agent_pool = user_agent
proxies_pool: BaseProxyPool = None
cache_db = None # redis / pika
cached_redis_key = None # 缓存response的文件文件夹 response_cached:cached_redis_key:md5
cached_expire_time = 1200 # 缓存过期时间
# 下载器
downloader: Downloader = None
session_downloader: Downloader = None
render_downloader: RenderDownloader = None
__REQUEST_ATTRS__ = {
# "method",
# "url",
"params",
"data",
"headers",
"cookies",
"files",
"auth",
"timeout",
"allow_redirects",
"proxies",
"hooks",
"stream",
"verify",
"cert",
"json",
}
_DEFAULT_KEY_VALUE_ = dict(
url="",
method=None,
retry_times=0,
priority=300,
parser_name=None,
callback=None,
filter_repeat=True,
auto_request=True,
request_sync=False,
use_session=None,
random_user_agent=True,
download_midware=None,
is_abandoned=False,
render=False,
render_time=0,
make_absolute_links=None,
)
_CUSTOM_PROPERTIES_ = {
"requests_kwargs",
"custom_ua",
"custom_proxies",
}
def __init__(
self,
url="",
retry_times=0,
priority=300,
parser_name=None,
callback=None,
filter_repeat=True,
auto_request=True,
request_sync=False,
use_session=None,
random_user_agent=True,
download_midware=None,
is_abandoned=False,
render=False,
render_time=0,
make_absolute_links=None,
**kwargs,
):
"""
@summary: Request参数
---------
框架参数
@param url: 待抓取url
@param retry_times: 当前重试次数
@param priority: 优先级 越小越优先 默认300
@param parser_name: 回调函数所在的类名 默认为当前类
@param callback: 回调函数 可以是函数 也可是函数名(如想跨类回调时,parser_name指定那个类名,callback指定那个类想回调的方法名即可)
@param filter_repeat: 是否需要去重 (True/False) 当setting中的REQUEST_FILTER_ENABLE设置为True时该参数生效 默认True
@param auto_request: 是否需要自动请求下载网页 默认是。设置为False时返回的response为空,需要自己去请求网页
@param request_sync: 是否同步请求下载网页,默认异步。如果该请求url过期时间快,可设置为True,相当于yield的reqeust会立即响应,而不是去排队
@param use_session: 是否使用session方式
@param random_user_agent: 是否随机User-Agent (True/False) 当setting中的RANDOM_HEADERS设置为True时该参数生效 默认True
@param download_midware: 下载中间件。默认为parser中的download_midware
@param is_abandoned: 当发生异常时是否放弃重试 True/False. 默认False
@param render: 是否用浏览器渲染
@param render_time: 渲染时长,即打开网页等待指定时间后再获取源码
@param make_absolute_links: 是否转成绝对连接,默认是
--
以下参数与requests参数使用方式一致
@param method: 请求方式,如POST或GET,默认根据data值是否为空来判断
@param params: 请求参数
@param data: 请求body
@param json: 请求json字符串,同 json.dumps(data)
@param headers:
@param cookies: 字典 或 CookieJar 对象
@param files:
@param auth:
@param timeout: (浮点或元组)等待服务器数据的超时限制,是一个浮点数,或是一个(connect timeout, read timeout) 元组
@param allow_redirects : Boolean. True 表示允许跟踪 POST/PUT/DELETE 方法的重定向
@param proxies: 代理 {"http":"http://xxx", "https":"https://xxx"}
@param verify: 为 True 时将会验证 SSL 证书
@param stream: 如果为 False,将会立即下载响应内容
@param cert:
--
@param **kwargs: 其他值: 如 Request(item=item) 则item可直接用 request.item 取出
---------
@result:
"""
self.url = url
self.method = None
self.retry_times = retry_times
self.priority = priority
self.parser_name = parser_name
self.callback = callback
self.filter_repeat = filter_repeat
self.auto_request = auto_request
self.request_sync = request_sync
self.use_session = use_session
self.random_user_agent = random_user_agent
self.download_midware = download_midware
self.is_abandoned = is_abandoned
self.render = render
self.render_time = render_time
self.make_absolute_links = (
make_absolute_links
if make_absolute_links is not None
else setting.MAKE_ABSOLUTE_LINKS
)
# 自定义属性,不参与序列化
self.requests_kwargs = {}
for key, value in kwargs.items():
if key in self.__class__.__REQUEST_ATTRS__: # 取requests参数
self.requests_kwargs[key] = value
self.__dict__[key] = value
self.custom_ua = False
self.custom_proxies = False
def __repr__(self):
try:
return "<Request {}>".format(self.url)
except:
return "<Request {}>".format(str(self.to_dict)[:40])
def __setattr__(self, key, value):
"""
针对 request.xxx = xxx 的形式,更新reqeust及内部参数值
@param key:
@param value:
@return:
"""
self.__dict__[key] = value
if key in self.__class__.__REQUEST_ATTRS__:
self.requests_kwargs[key] = value
# def __getattr__(self, item):
# try:
# return self.__dict__[item]
# except:
# raise AttributeError("Request has no attribute %s" % item)
def __lt__(self, other):
return self.priority < other.priority
@property
def _proxies_pool(self):
if not self.__class__.proxies_pool:
self.__class__.proxies_pool = tools.import_cls(setting.PROXY_POOL)()
return self.__class__.proxies_pool
@property
def _downloader(self):
if not self.__class__.downloader:
self.__class__.downloader = tools.import_cls(setting.DOWNLOADER)()
return self.__class__.downloader
@property
def _session_downloader(self):
if not self.__class__.session_downloader:
self.__class__.session_downloader = tools.import_cls(
setting.SESSION_DOWNLOADER
)()
return self.__class__.session_downloader
@property
def _render_downloader(self):
if not self.__class__.render_downloader:
try:
self.__class__.render_downloader = tools.import_cls(
setting.RENDER_DOWNLOADER
)()
except AttributeError:
log.error('当前是渲染模式,请安装 pip install "feapder[render]"')
os._exit(0)
return self.__class__.render_downloader
@property
def to_dict(self):
request_dict = {}
self.callback = (
getattr(self.callback, "__name__")
if callable(self.callback)
else self.callback
)
if isinstance(self.download_midware, (tuple, list)):
self.download_midware = [
getattr(download_midware, "__name__")
if callable(download_midware)
and download_midware.__class__.__name__ == "method"
else download_midware
for download_midware in self.download_midware
]
else:
self.download_midware = (
getattr(self.download_midware, "__name__")
if callable(self.download_midware)
and self.download_midware.__class__.__name__ == "method"
else self.download_midware
)
for key, value in self.__dict__.items():
if (
key in self.__class__._DEFAULT_KEY_VALUE_
and self.__class__._DEFAULT_KEY_VALUE_.get(key) == value
or key in self.__class__._CUSTOM_PROPERTIES_
):
continue
if value is not None:
if key in self.__class__.__REQUEST_ATTRS__:
if not isinstance(
value, (bool, float, int, str, tuple, list, dict)
):
value = tools.dumps_obj(value)
else:
if not isinstance(value, (bool, float, int, str)):
value = tools.dumps_obj(value)
request_dict[key] = value
return request_dict
@property
def callback_name(self):
return (
getattr(self.callback, "__name__")
if callable(self.callback)
else self.callback
)
def make_requests_kwargs(self):
"""
处理参数
"""
# 设置超时默认时间
self.requests_kwargs.setdefault(
"timeout", setting.REQUEST_TIMEOUT
) # connect=22 read=22
# 设置stream
# 默认情况下,当你进行网络请求后,响应体会立即被下载。
# stream=True是,调用Response.content 才会下载响应体,默认只返回header。
# 缺点: stream 设为 True,Requests 无法将连接释放回连接池,除非消耗了所有的数据,或者调用了 Response.close。 这样会带来连接效率低下的问题。
self.requests_kwargs.setdefault("stream", True)
# 关闭证书验证
self.requests_kwargs.setdefault("verify", False)
# 设置请求方法
method = self.__dict__.get("method")
if not method:
if "data" in self.requests_kwargs or "json" in self.requests_kwargs:
method = "POST"
else:
method = "GET"
self.method = method
# 设置user—agent
headers = self.requests_kwargs.get("headers", {})
if "user-agent" not in headers and "User-Agent" not in headers:
if self.random_user_agent and setting.RANDOM_HEADERS:
# 随机user—agent
ua = self.__class__.user_agent_pool.get(setting.USER_AGENT_TYPE)
headers.update({"User-Agent": ua})
self.requests_kwargs.update(headers=headers)
else:
# 使用默认的user—agent
self.requests_kwargs.setdefault(
"headers", {"User-Agent": setting.DEFAULT_USERAGENT}
)
else:
self.custom_ua = True
# 代理
proxies = self.requests_kwargs.get("proxies", -1)
if proxies == -1 and setting.PROXY_ENABLE and setting.PROXY_EXTRACT_API:
while True:
proxies = self._proxies_pool.get_proxy()
if proxies:
self.requests_kwargs.update(proxies=proxies)
break
else:
log.debug("暂无可用代理 ...")
else:
self.custom_proxies = True
def get_response(self, save_cached=False):
"""
获取带有selector功能的response
@param save_cached: 保存缓存 方便调试时不用每次都重新下载
@return:
"""
self.make_requests_kwargs()
log.debug(
"""
-------------- %srequest for ----------------
url = %s
method = %s
args = %s
"""
% (
""
if not self.parser_name
else "%s.%s "
% (
self.parser_name,
(
self.callback
and callable(self.callback)
and getattr(self.callback, "__name__")
or self.callback
)
or "parse",
),
self.url,
self.method,
self.requests_kwargs,
)
)
# def hooks(response, *args, **kwargs):
# print(response.url)
#
# self.requests_kwargs.update(hooks={'response': hooks})
# self.use_session 优先级高
use_session = (
setting.USE_SESSION if self.use_session is None else self.use_session
)
if self.render:
response = self._render_downloader.download(self)
elif use_session:
response = self._session_downloader.download(self)
else:
response = self._downloader.download(self)
response.make_absolute_links = self.make_absolute_links
if save_cached:
self.save_cached(response, expire_time=self.__class__.cached_expire_time)
return response
def get_params(self):
return self.requests_kwargs.get("params")
def get_proxies(self) -> dict:
"""
Returns: {"https": "https://ip:port", "http": "http://ip:port"}
"""
return self.requests_kwargs.get("proxies")
def get_proxy(self) -> str:
"""
Returns: ip:port
"""
proxies = self.get_proxies()
if proxies:
return re.sub(
"http.*?//", "", proxies.get("http", "") or proxies.get("https", "")
)
def del_proxy(self):
proxy = self.get_proxy()
if proxy:
self._proxies_pool.del_proxy(proxy)
del self.requests_kwargs["proxies"]
def get_headers(self) -> dict:
return self.requests_kwargs.get("headers", {})
def get_user_agent(self) -> str:
return self.get_headers().get("user_agent") or self.get_headers().get(
"User-Agent"
)
def get_cookies(self) -> dict:
cookies = self.requests_kwargs.get("cookies")
if cookies and isinstance(cookies, RequestsCookieJar):
cookies = cookies.get_dict()
if not cookies:
cookie_str = self.get_headers().get("Cookie") or self.get_headers().get(
"cookie"
)
if cookie_str:
cookies = tools.get_cookies_from_str(cookie_str)
return cookies
@property
def fingerprint(self):
"""
request唯一表识
@return:
"""
url = self.__dict__.get("url", "")
# url 归一化
url = tools.canonicalize_url(url)
args = [url]
for arg in ["params", "data", "files", "auth", "cert", "json"]:
if self.requests_kwargs.get(arg):
args.append(self.requests_kwargs.get(arg))
return tools.get_md5(*args)
@property
def _cache_db(self):
if not self.__class__.cache_db:
self.__class__.cache_db = RedisDB() # .from_url(setting.pika_spider_1_uri)
return self.__class__.cache_db
@property
def _cached_redis_key(self):
if self.__class__.cached_redis_key:
return (
f"response_cached:{self.__class__.cached_redis_key}:{self.fingerprint}"
)
else:
return f"response_cached:test:{self.fingerprint}"
def save_cached(self, response, expire_time=1200):
"""
使用redis保存response 用于调试 不用每回都下载
@param response:
@param expire_time: 过期时间
@return:
"""
self._cache_db.strset(self._cached_redis_key, response.to_dict, ex=expire_time)
def get_response_from_cached(self, save_cached=True):
"""
从缓存中获取response
注意:
属性值为空:
-raw : urllib3.response.HTTPResponse
-connection:requests.adapters.HTTPAdapter
-history
属性含义改变:
- request 由requests 改为Request
@param: save_cached 当无缓存 直接下载 下载完是否保存缓存
@return:
"""
response_dict = self._cache_db.strget(self._cached_redis_key)
if not response_dict:
log.info("无response缓存 重新下载")
response_obj = self.get_response(save_cached=save_cached)
else:
response_dict = eval(response_dict)
response_obj = Response.from_dict(response_dict)
return response_obj
def del_response_cached(self):
self._cache_db.clear(self._cached_redis_key)
@classmethod
def from_dict(cls, request_dict):
for key, value in request_dict.items():
if isinstance(value, bytes): # 反序列化 如item
request_dict[key] = tools.loads_obj(value)
return cls(**request_dict)
def copy(self):
return self.__class__.from_dict(copy.deepcopy(self.to_dict))
| (url='', retry_times=0, priority=300, parser_name=None, callback=None, filter_repeat=True, auto_request=True, request_sync=False, use_session=None, random_user_agent=True, download_midware=None, is_abandoned=False, render=False, render_time=0, make_absolute_links=None, **kwargs) |
8,383 | feapder.network.request | __init__ |
@summary: Request参数
---------
框架参数
@param url: 待抓取url
@param retry_times: 当前重试次数
@param priority: 优先级 越小越优先 默认300
@param parser_name: 回调函数所在的类名 默认为当前类
@param callback: 回调函数 可以是函数 也可是函数名(如想跨类回调时,parser_name指定那个类名,callback指定那个类想回调的方法名即可)
@param filter_repeat: 是否需要去重 (True/False) 当setting中的REQUEST_FILTER_ENABLE设置为True时该参数生效 默认True
@param auto_request: 是否需要自动请求下载网页 默认是。设置为False时返回的response为空,需要自己去请求网页
@param request_sync: 是否同步请求下载网页,默认异步。如果该请求url过期时间快,可设置为True,相当于yield的reqeust会立即响应,而不是去排队
@param use_session: 是否使用session方式
@param random_user_agent: 是否随机User-Agent (True/False) 当setting中的RANDOM_HEADERS设置为True时该参数生效 默认True
@param download_midware: 下载中间件。默认为parser中的download_midware
@param is_abandoned: 当发生异常时是否放弃重试 True/False. 默认False
@param render: 是否用浏览器渲染
@param render_time: 渲染时长,即打开网页等待指定时间后再获取源码
@param make_absolute_links: 是否转成绝对连接,默认是
--
以下参数与requests参数使用方式一致
@param method: 请求方式,如POST或GET,默认根据data值是否为空来判断
@param params: 请求参数
@param data: 请求body
@param json: 请求json字符串,同 json.dumps(data)
@param headers:
@param cookies: 字典 或 CookieJar 对象
@param files:
@param auth:
@param timeout: (浮点或元组)等待服务器数据的超时限制,是一个浮点数,或是一个(connect timeout, read timeout) 元组
@param allow_redirects : Boolean. True 表示允许跟踪 POST/PUT/DELETE 方法的重定向
@param proxies: 代理 {"http":"http://xxx", "https":"https://xxx"}
@param verify: 为 True 时将会验证 SSL 证书
@param stream: 如果为 False,将会立即下载响应内容
@param cert:
--
@param **kwargs: 其他值: 如 Request(item=item) 则item可直接用 request.item 取出
---------
@result:
| def __init__(
self,
url="",
retry_times=0,
priority=300,
parser_name=None,
callback=None,
filter_repeat=True,
auto_request=True,
request_sync=False,
use_session=None,
random_user_agent=True,
download_midware=None,
is_abandoned=False,
render=False,
render_time=0,
make_absolute_links=None,
**kwargs,
):
"""
@summary: Request参数
---------
框架参数
@param url: 待抓取url
@param retry_times: 当前重试次数
@param priority: 优先级 越小越优先 默认300
@param parser_name: 回调函数所在的类名 默认为当前类
@param callback: 回调函数 可以是函数 也可是函数名(如想跨类回调时,parser_name指定那个类名,callback指定那个类想回调的方法名即可)
@param filter_repeat: 是否需要去重 (True/False) 当setting中的REQUEST_FILTER_ENABLE设置为True时该参数生效 默认True
@param auto_request: 是否需要自动请求下载网页 默认是。设置为False时返回的response为空,需要自己去请求网页
@param request_sync: 是否同步请求下载网页,默认异步。如果该请求url过期时间快,可设置为True,相当于yield的reqeust会立即响应,而不是去排队
@param use_session: 是否使用session方式
@param random_user_agent: 是否随机User-Agent (True/False) 当setting中的RANDOM_HEADERS设置为True时该参数生效 默认True
@param download_midware: 下载中间件。默认为parser中的download_midware
@param is_abandoned: 当发生异常时是否放弃重试 True/False. 默认False
@param render: 是否用浏览器渲染
@param render_time: 渲染时长,即打开网页等待指定时间后再获取源码
@param make_absolute_links: 是否转成绝对连接,默认是
--
以下参数与requests参数使用方式一致
@param method: 请求方式,如POST或GET,默认根据data值是否为空来判断
@param params: 请求参数
@param data: 请求body
@param json: 请求json字符串,同 json.dumps(data)
@param headers:
@param cookies: 字典 或 CookieJar 对象
@param files:
@param auth:
@param timeout: (浮点或元组)等待服务器数据的超时限制,是一个浮点数,或是一个(connect timeout, read timeout) 元组
@param allow_redirects : Boolean. True 表示允许跟踪 POST/PUT/DELETE 方法的重定向
@param proxies: 代理 {"http":"http://xxx", "https":"https://xxx"}
@param verify: 为 True 时将会验证 SSL 证书
@param stream: 如果为 False,将会立即下载响应内容
@param cert:
--
@param **kwargs: 其他值: 如 Request(item=item) 则item可直接用 request.item 取出
---------
@result:
"""
self.url = url
self.method = None
self.retry_times = retry_times
self.priority = priority
self.parser_name = parser_name
self.callback = callback
self.filter_repeat = filter_repeat
self.auto_request = auto_request
self.request_sync = request_sync
self.use_session = use_session
self.random_user_agent = random_user_agent
self.download_midware = download_midware
self.is_abandoned = is_abandoned
self.render = render
self.render_time = render_time
self.make_absolute_links = (
make_absolute_links
if make_absolute_links is not None
else setting.MAKE_ABSOLUTE_LINKS
)
# 自定义属性,不参与序列化
self.requests_kwargs = {}
for key, value in kwargs.items():
if key in self.__class__.__REQUEST_ATTRS__: # 取requests参数
self.requests_kwargs[key] = value
self.__dict__[key] = value
self.custom_ua = False
self.custom_proxies = False
| (self, url='', retry_times=0, priority=300, parser_name=None, callback=None, filter_repeat=True, auto_request=True, request_sync=False, use_session=None, random_user_agent=True, download_midware=None, is_abandoned=False, render=False, render_time=0, make_absolute_links=None, **kwargs) |
8,384 | feapder.network.request | __lt__ | null | def __lt__(self, other):
return self.priority < other.priority
| (self, other) |
8,385 | feapder.network.request | __repr__ | null | def __repr__(self):
try:
return "<Request {}>".format(self.url)
except:
return "<Request {}>".format(str(self.to_dict)[:40])
| (self) |
8,386 | feapder.network.request | __setattr__ |
针对 request.xxx = xxx 的形式,更新reqeust及内部参数值
@param key:
@param value:
@return:
| def __setattr__(self, key, value):
"""
针对 request.xxx = xxx 的形式,更新reqeust及内部参数值
@param key:
@param value:
@return:
"""
self.__dict__[key] = value
if key in self.__class__.__REQUEST_ATTRS__:
self.requests_kwargs[key] = value
| (self, key, value) |
8,387 | feapder.network.request | copy | null | def copy(self):
return self.__class__.from_dict(copy.deepcopy(self.to_dict))
| (self) |
8,388 | feapder.network.request | del_proxy | null | def del_proxy(self):
proxy = self.get_proxy()
if proxy:
self._proxies_pool.del_proxy(proxy)
del self.requests_kwargs["proxies"]
| (self) |
8,389 | feapder.network.request | del_response_cached | null | def del_response_cached(self):
self._cache_db.clear(self._cached_redis_key)
| (self) |
8,390 | feapder.network.request | get_cookies | null | def get_cookies(self) -> dict:
cookies = self.requests_kwargs.get("cookies")
if cookies and isinstance(cookies, RequestsCookieJar):
cookies = cookies.get_dict()
if not cookies:
cookie_str = self.get_headers().get("Cookie") or self.get_headers().get(
"cookie"
)
if cookie_str:
cookies = tools.get_cookies_from_str(cookie_str)
return cookies
| (self) -> dict |
8,391 | feapder.network.request | get_headers | null | def get_headers(self) -> dict:
return self.requests_kwargs.get("headers", {})
| (self) -> dict |
8,392 | feapder.network.request | get_params | null | def get_params(self):
return self.requests_kwargs.get("params")
| (self) |
8,393 | feapder.network.request | get_proxies |
Returns: {"https": "https://ip:port", "http": "http://ip:port"}
| def get_proxies(self) -> dict:
"""
Returns: {"https": "https://ip:port", "http": "http://ip:port"}
"""
return self.requests_kwargs.get("proxies")
| (self) -> dict |
8,394 | feapder.network.request | get_proxy |
Returns: ip:port
| def get_proxy(self) -> str:
"""
Returns: ip:port
"""
proxies = self.get_proxies()
if proxies:
return re.sub(
"http.*?//", "", proxies.get("http", "") or proxies.get("https", "")
)
| (self) -> str |
8,395 | feapder.network.request | get_response |
获取带有selector功能的response
@param save_cached: 保存缓存 方便调试时不用每次都重新下载
@return:
| def get_response(self, save_cached=False):
"""
获取带有selector功能的response
@param save_cached: 保存缓存 方便调试时不用每次都重新下载
@return:
"""
self.make_requests_kwargs()
log.debug(
"""
-------------- %srequest for ----------------
url = %s
method = %s
args = %s
"""
% (
""
if not self.parser_name
else "%s.%s "
% (
self.parser_name,
(
self.callback
and callable(self.callback)
and getattr(self.callback, "__name__")
or self.callback
)
or "parse",
),
self.url,
self.method,
self.requests_kwargs,
)
)
# def hooks(response, *args, **kwargs):
# print(response.url)
#
# self.requests_kwargs.update(hooks={'response': hooks})
# self.use_session 优先级高
use_session = (
setting.USE_SESSION if self.use_session is None else self.use_session
)
if self.render:
response = self._render_downloader.download(self)
elif use_session:
response = self._session_downloader.download(self)
else:
response = self._downloader.download(self)
response.make_absolute_links = self.make_absolute_links
if save_cached:
self.save_cached(response, expire_time=self.__class__.cached_expire_time)
return response
| (self, save_cached=False) |
8,396 | feapder.network.request | get_response_from_cached |
从缓存中获取response
注意:
属性值为空:
-raw : urllib3.response.HTTPResponse
-connection:requests.adapters.HTTPAdapter
-history
属性含义改变:
- request 由requests 改为Request
@param: save_cached 当无缓存 直接下载 下载完是否保存缓存
@return:
| def get_response_from_cached(self, save_cached=True):
"""
从缓存中获取response
注意:
属性值为空:
-raw : urllib3.response.HTTPResponse
-connection:requests.adapters.HTTPAdapter
-history
属性含义改变:
- request 由requests 改为Request
@param: save_cached 当无缓存 直接下载 下载完是否保存缓存
@return:
"""
response_dict = self._cache_db.strget(self._cached_redis_key)
if not response_dict:
log.info("无response缓存 重新下载")
response_obj = self.get_response(save_cached=save_cached)
else:
response_dict = eval(response_dict)
response_obj = Response.from_dict(response_dict)
return response_obj
| (self, save_cached=True) |
8,397 | feapder.network.request | get_user_agent | null | def get_user_agent(self) -> str:
return self.get_headers().get("user_agent") or self.get_headers().get(
"User-Agent"
)
| (self) -> str |
8,398 | feapder.network.request | make_requests_kwargs |
处理参数
| def make_requests_kwargs(self):
"""
处理参数
"""
# 设置超时默认时间
self.requests_kwargs.setdefault(
"timeout", setting.REQUEST_TIMEOUT
) # connect=22 read=22
# 设置stream
# 默认情况下,当你进行网络请求后,响应体会立即被下载。
# stream=True是,调用Response.content 才会下载响应体,默认只返回header。
# 缺点: stream 设为 True,Requests 无法将连接释放回连接池,除非消耗了所有的数据,或者调用了 Response.close。 这样会带来连接效率低下的问题。
self.requests_kwargs.setdefault("stream", True)
# 关闭证书验证
self.requests_kwargs.setdefault("verify", False)
# 设置请求方法
method = self.__dict__.get("method")
if not method:
if "data" in self.requests_kwargs or "json" in self.requests_kwargs:
method = "POST"
else:
method = "GET"
self.method = method
# 设置user—agent
headers = self.requests_kwargs.get("headers", {})
if "user-agent" not in headers and "User-Agent" not in headers:
if self.random_user_agent and setting.RANDOM_HEADERS:
# 随机user—agent
ua = self.__class__.user_agent_pool.get(setting.USER_AGENT_TYPE)
headers.update({"User-Agent": ua})
self.requests_kwargs.update(headers=headers)
else:
# 使用默认的user—agent
self.requests_kwargs.setdefault(
"headers", {"User-Agent": setting.DEFAULT_USERAGENT}
)
else:
self.custom_ua = True
# 代理
proxies = self.requests_kwargs.get("proxies", -1)
if proxies == -1 and setting.PROXY_ENABLE and setting.PROXY_EXTRACT_API:
while True:
proxies = self._proxies_pool.get_proxy()
if proxies:
self.requests_kwargs.update(proxies=proxies)
break
else:
log.debug("暂无可用代理 ...")
else:
self.custom_proxies = True
| (self) |
8,399 | feapder.network.request | save_cached |
使用redis保存response 用于调试 不用每回都下载
@param response:
@param expire_time: 过期时间
@return:
| def save_cached(self, response, expire_time=1200):
"""
使用redis保存response 用于调试 不用每回都下载
@param response:
@param expire_time: 过期时间
@return:
"""
self._cache_db.strset(self._cached_redis_key, response.to_dict, ex=expire_time)
| (self, response, expire_time=1200) |
8,400 | feapder.network.response | Response | null | class Response(res):
def __init__(self, response, make_absolute_links=None):
"""
Args:
response: requests请求返回的response
make_absolute_links: 是否自动补全url
"""
super(Response, self).__init__()
self.__dict__.update(response.__dict__)
self.make_absolute_links = (
make_absolute_links
if make_absolute_links is not None
else setting.MAKE_ABSOLUTE_LINKS
)
self._cached_selector = None
self._cached_text = None
self._cached_json = None
self._encoding = None
self.encoding_errors = "strict" # strict / replace / ignore
self.browser = self.driver = None
@classmethod
def from_text(
cls,
text: str,
url: str = "",
cookies: dict = None,
headers: dict = None,
encoding="utf-8",
):
response_dict = {
"_content": text.encode(encoding=encoding),
"cookies": cookies or {},
"encoding": encoding,
"headers": headers or {},
"status_code": 200,
"elapsed": 0,
"url": url,
}
return cls.from_dict(response_dict)
@classmethod
def from_dict(cls, response_dict):
"""
利用字典获取Response对象
@param response_dict: 原生的response.__dict__
@return:
"""
cookie_jar = RequestsCookieJar()
cookie_jar.update(other=response_dict["cookies"])
response_dict["cookies"] = cookie_jar
response_dict["elapsed"] = datetime.timedelta(
0, 0, response_dict["elapsed"]
) # 耗时
response_dict["connection"] = None
response_dict["_content_consumed"] = True
response = res()
response.__dict__.update(response_dict)
return cls(response)
@property
def to_dict(self):
response_dict = {
"_content": self.content,
"cookies": self.cookies.get_dict(),
"encoding": self.encoding,
"headers": self.headers,
"status_code": self.status_code,
"elapsed": self.elapsed.microseconds, # 耗时
"url": self.url,
}
return response_dict
def __clear_cache(self):
self.__dict__["_cached_selector"] = None
self.__dict__["_cached_text"] = None
self.__dict__["_cached_json"] = None
@property
def encoding(self):
"""
编码优先级:自定义编码 > header中编码 > 页面编码 > 根据content猜测的编码
"""
self._encoding = (
self._encoding
or self._headers_encoding()
or self._body_declared_encoding()
or self.apparent_encoding
)
return self._encoding
@encoding.setter
def encoding(self, val):
self.__clear_cache()
self._encoding = val
code = encoding
def _headers_encoding(self):
"""
从headers获取头部charset编码
"""
content_type = self.headers.get("Content-Type") or self.headers.get(
"content-type"
)
if content_type:
return (
http_content_type_encoding(content_type) or "utf-8"
if "application/json" in content_type
else None
)
def _body_declared_encoding(self):
"""
从html xml等获取<meta charset="编码">
"""
return html_body_declared_encoding(self.content)
def _get_unicode_html(self, html):
if not html or not isinstance(html, bytes):
return html
converted = UnicodeDammit(html, is_html=True)
if not converted.unicode_markup:
raise Exception(
"Failed to detect encoding of article HTML, tried: %s"
% ", ".join(converted.tried_encodings)
)
html = converted.unicode_markup
return html
def _make_absolute(self, link):
"""Makes a given link absolute."""
try:
link = link.strip()
# Parse the link with stdlib.
parsed = urlparse(link)._asdict()
# If link is relative, then join it with base_url.
if not parsed["netloc"]:
return urljoin(self.url, link)
# Link is absolute; if it lacks a scheme, add one from base_url.
if not parsed["scheme"]:
parsed["scheme"] = urlparse(self.url).scheme
# Reconstruct the URL to incorporate the new scheme.
parsed = (v for v in parsed.values())
return urlunparse(parsed)
except Exception as e:
log.error(
"Invalid URL <{}> can't make absolute_link. exception: {}".format(
link, e
)
)
# Link is absolute and complete with scheme; nothing to be done here.
return link
def _absolute_links(self, text):
regexs = [
r'(<a.*?href\s*?=\s*?["\'])(.+?)(["\'])', # a
r'(<img.*?src\s*?=\s*?["\'])(.+?)(["\'])', # img
r'(<link.*?href\s*?=\s*?["\'])(.+?)(["\'])', # css
r'(<script.*?src\s*?=\s*?["\'])(.+?)(["\'])', # js
]
for regex in regexs:
def replace_href(text):
# html = text.group(0)
link = text.group(2)
absolute_link = self._make_absolute(link)
# return re.sub(regex, r'\1{}\3'.format(absolute_link), html) # 使用正则替换,个别字符不支持。如该网址源代码http://permit.mep.gov.cn/permitExt/syssb/xxgk/xxgk!showImage.action?dataid=0b092f8115ff45c5a50947cdea537726
return text.group(1) + absolute_link + text.group(3)
text = re.sub(regex, replace_href, text, flags=re.S | re.I)
return text
def _del_special_character(self, text):
"""
删除特殊字符
"""
for special_character_pattern in SPECIAL_CHARACTER_PATTERNS:
text = special_character_pattern.sub("", text)
return text
@property
def __text(self):
"""Content of the response, in unicode.
If Response.encoding is None, encoding will be guessed using
``chardet``.
The encoding of the response content is determined based solely on HTTP
headers, following RFC 2616 to the letter. If you can take advantage of
non-HTTP knowledge to make a better guess at the encoding, you should
set ``r.encoding`` appropriately before accessing this property.
"""
if not self.content:
return ""
# Decode unicode from given encoding.
try:
content = str(self.content, self.encoding, errors=self.encoding_errors)
except (LookupError, TypeError):
# A LookupError is raised if the encoding was not found which could
# indicate a misspelling or similar mistake.
#
# A TypeError can be raised if encoding is None
#
# So we try blindly encoding.
content = str(self.content, errors=self.encoding_errors)
return content
@property
def text(self):
if self._cached_text is None:
if self.encoding and self.encoding.upper() != FAIL_ENCODING:
try:
self._cached_text = self.__text
except UnicodeDecodeError:
self._cached_text = self._get_unicode_html(self.content)
else:
self._cached_text = self._get_unicode_html(self.content)
if self._cached_text:
if self.make_absolute_links:
self._cached_text = self._absolute_links(self._cached_text)
self._cached_text = self._del_special_character(self._cached_text)
return self._cached_text
@text.setter
def text(self, html):
self._cached_text = html
if self.make_absolute_links:
self._cached_text = self._absolute_links(self._cached_text)
self._cached_text = self._del_special_character(self._cached_text)
self._cached_selector = Selector(self.text)
@property
def json(self, **kwargs):
if self._cached_json is None:
self.encoding = self.encoding or "utf-8"
self._cached_json = super(Response, self).json(**kwargs)
return self._cached_json
@property
def content(self):
content = super(Response, self).content
return content
@property
def is_html(self):
content_type = self.headers.get("Content-Type", "")
if "text/html" in content_type:
return True
else:
return False
@property
def selector(self):
if self._cached_selector is None:
self._cached_selector = Selector(self.text)
return self._cached_selector
def bs4(self, features="html.parser"):
soup = BeautifulSoup(self.text, features)
return soup
def extract(self):
return self.selector.get()
def xpath(self, query, **kwargs):
return self.selector.xpath(query, **kwargs)
def css(self, query):
return self.selector.css(query)
def re(self, regex, replace_entities=False):
"""
@summary: 正则匹配
注意:网页源码<a class='page-numbers'... 会被处理成<a class="page-numbers" ; 写正则时要写<a class="(.*?)"。 但不会改非html的文本引号格式
为了使用方便,正则单双引号自动处理为不敏感
---------
@param regex: 正则或者re.compile
@param replace_entities: 为True时 去掉 等字符, 转义"为 " 等, 会使网页结构发生变化。如在网页源码中提取json, 建议设置成False
---------
@result: 列表
"""
# 将单双引号设置为不敏感
if isinstance(regex, str):
regex = re.sub("['\"]", "['\"]", regex)
return self.selector.re(regex, replace_entities)
def re_first(self, regex, default=None, replace_entities=False):
"""
@summary: 正则匹配
注意:网页源码<a class='page-numbers'... 会被处理成<a class="page-numbers" ; 写正则时要写<a class="(.*?)"。 但不会改非html的文本引号格式
为了使用方便,正则单双引号自动处理为不敏感
---------
@param regex: 正则或者re.compile
@param default: 未匹配到, 默认值
@param replace_entities: 为True时 去掉 等字符, 转义"为 " 等, 会使网页结构发生变化。如在网页源码中提取json, 建议设置成False
---------
@result: 第一个值或默认值
"""
# 将单双引号设置为不敏感
if isinstance(regex, str):
regex = re.sub("['\"]", "['\"]", regex)
return self.selector.re_first(regex, default, replace_entities)
def close_browser(self, request):
if self.browser:
request.render_downloader.close(self.browser)
def __del__(self):
self.close()
def open(self):
body = self.content
if b"<base" not in body:
# <head> 标签后插入一个<base href="url">标签
repl = fr'\1<base href="{self.url}">'
body = re.sub(rb"(<head(?:>|\s.*?>))", repl.encode("utf-8"), body)
fd, fname = tempfile.mkstemp(".html")
os.write(fd, body)
os.close(fd)
return webbrowser.open(f"file://{fname}")
| (response, make_absolute_links=None) |
8,401 | feapder.network.response | __clear_cache | null | def __clear_cache(self):
self.__dict__["_cached_selector"] = None
self.__dict__["_cached_text"] = None
self.__dict__["_cached_json"] = None
| (self) |
8,402 | requests.models | __bool__ | Returns True if :attr:`status_code` is less than 400.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code, is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
| def __bool__(self):
"""Returns True if :attr:`status_code` is less than 400.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code, is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
"""
return self.ok
| (self) |
8,403 | feapder.network.response | __del__ | null | def __del__(self):
self.close()
| (self) |
8,405 | requests.models | __exit__ | null | def __exit__(self, *args):
self.close()
| (self, *args) |
8,406 | requests.models | __getstate__ | null | def __getstate__(self):
# Consume everything; accessing the content attribute makes
# sure the content has been fully read.
if not self._content_consumed:
self.content
return {attr: getattr(self, attr, None) for attr in self.__attrs__}
| (self) |
8,407 | feapder.network.response | __init__ |
Args:
response: requests请求返回的response
make_absolute_links: 是否自动补全url
| def __init__(self, response, make_absolute_links=None):
"""
Args:
response: requests请求返回的response
make_absolute_links: 是否自动补全url
"""
super(Response, self).__init__()
self.__dict__.update(response.__dict__)
self.make_absolute_links = (
make_absolute_links
if make_absolute_links is not None
else setting.MAKE_ABSOLUTE_LINKS
)
self._cached_selector = None
self._cached_text = None
self._cached_json = None
self._encoding = None
self.encoding_errors = "strict" # strict / replace / ignore
self.browser = self.driver = None
| (self, response, make_absolute_links=None) |
8,408 | requests.models | __iter__ | Allows you to use a response as an iterator. | def __iter__(self):
"""Allows you to use a response as an iterator."""
return self.iter_content(128)
| (self) |
8,409 | requests.models | __nonzero__ | Returns True if :attr:`status_code` is less than 400.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code, is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
| def __nonzero__(self):
"""Returns True if :attr:`status_code` is less than 400.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code, is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
"""
return self.ok
| (self) |
8,410 | requests.models | __repr__ | null | def __repr__(self):
return f"<Response [{self.status_code}]>"
| (self) |
8,411 | requests.models | __setstate__ | null | def __setstate__(self, state):
for name, value in state.items():
setattr(self, name, value)
# pickled objects do not have .raw
setattr(self, "_content_consumed", True)
setattr(self, "raw", None)
| (self, state) |
8,412 | feapder.network.response | _absolute_links | null | def _absolute_links(self, text):
regexs = [
r'(<a.*?href\s*?=\s*?["\'])(.+?)(["\'])', # a
r'(<img.*?src\s*?=\s*?["\'])(.+?)(["\'])', # img
r'(<link.*?href\s*?=\s*?["\'])(.+?)(["\'])', # css
r'(<script.*?src\s*?=\s*?["\'])(.+?)(["\'])', # js
]
for regex in regexs:
def replace_href(text):
# html = text.group(0)
link = text.group(2)
absolute_link = self._make_absolute(link)
# return re.sub(regex, r'\1{}\3'.format(absolute_link), html) # 使用正则替换,个别字符不支持。如该网址源代码http://permit.mep.gov.cn/permitExt/syssb/xxgk/xxgk!showImage.action?dataid=0b092f8115ff45c5a50947cdea537726
return text.group(1) + absolute_link + text.group(3)
text = re.sub(regex, replace_href, text, flags=re.S | re.I)
return text
| (self, text) |
8,413 | feapder.network.response | _body_declared_encoding |
从html xml等获取<meta charset="编码">
| def _body_declared_encoding(self):
"""
从html xml等获取<meta charset="编码">
"""
return html_body_declared_encoding(self.content)
| (self) |
8,414 | feapder.network.response | _del_special_character |
删除特殊字符
| def _del_special_character(self, text):
"""
删除特殊字符
"""
for special_character_pattern in SPECIAL_CHARACTER_PATTERNS:
text = special_character_pattern.sub("", text)
return text
| (self, text) |
8,415 | feapder.network.response | _get_unicode_html | null | def _get_unicode_html(self, html):
if not html or not isinstance(html, bytes):
return html
converted = UnicodeDammit(html, is_html=True)
if not converted.unicode_markup:
raise Exception(
"Failed to detect encoding of article HTML, tried: %s"
% ", ".join(converted.tried_encodings)
)
html = converted.unicode_markup
return html
| (self, html) |
8,416 | feapder.network.response | _headers_encoding |
从headers获取头部charset编码
| def _headers_encoding(self):
"""
从headers获取头部charset编码
"""
content_type = self.headers.get("Content-Type") or self.headers.get(
"content-type"
)
if content_type:
return (
http_content_type_encoding(content_type) or "utf-8"
if "application/json" in content_type
else None
)
| (self) |
8,417 | feapder.network.response | _make_absolute | Makes a given link absolute. | def _make_absolute(self, link):
"""Makes a given link absolute."""
try:
link = link.strip()
# Parse the link with stdlib.
parsed = urlparse(link)._asdict()
# If link is relative, then join it with base_url.
if not parsed["netloc"]:
return urljoin(self.url, link)
# Link is absolute; if it lacks a scheme, add one from base_url.
if not parsed["scheme"]:
parsed["scheme"] = urlparse(self.url).scheme
# Reconstruct the URL to incorporate the new scheme.
parsed = (v for v in parsed.values())
return urlunparse(parsed)
except Exception as e:
log.error(
"Invalid URL <{}> can't make absolute_link. exception: {}".format(
link, e
)
)
# Link is absolute and complete with scheme; nothing to be done here.
return link
| (self, link) |
8,418 | feapder.network.response | bs4 | null | def bs4(self, features="html.parser"):
soup = BeautifulSoup(self.text, features)
return soup
| (self, features='html.parser') |
8,419 | requests.models | close | Releases the connection back to the pool. Once this method has been
called the underlying ``raw`` object must not be accessed again.
*Note: Should not normally need to be called explicitly.*
| def close(self):
"""Releases the connection back to the pool. Once this method has been
called the underlying ``raw`` object must not be accessed again.
*Note: Should not normally need to be called explicitly.*
"""
if not self._content_consumed:
self.raw.close()
release_conn = getattr(self.raw, "release_conn", None)
if release_conn is not None:
release_conn()
| (self) |
8,420 | feapder.network.response | close_browser | null | def close_browser(self, request):
if self.browser:
request.render_downloader.close(self.browser)
| (self, request) |
8,421 | feapder.network.response | css | null | def css(self, query):
return self.selector.css(query)
| (self, query) |
8,422 | feapder.network.response | extract | null | def extract(self):
return self.selector.get()
| (self) |
8,423 | requests.models | iter_content | Iterates over the response data. When stream=True is set on the
request, this avoids reading the content at once into memory for
large responses. The chunk size is the number of bytes it should
read into memory. This is not necessarily the length of each item
returned as decoding can take place.
chunk_size must be of type int or None. A value of None will
function differently depending on the value of `stream`.
stream=True will read data as it arrives in whatever size the
chunks are received. If stream=False, data is returned as
a single chunk.
If decode_unicode is True, content will be decoded using the best
available encoding based on the response.
| def iter_content(self, chunk_size=1, decode_unicode=False):
"""Iterates over the response data. When stream=True is set on the
request, this avoids reading the content at once into memory for
large responses. The chunk size is the number of bytes it should
read into memory. This is not necessarily the length of each item
returned as decoding can take place.
chunk_size must be of type int or None. A value of None will
function differently depending on the value of `stream`.
stream=True will read data as it arrives in whatever size the
chunks are received. If stream=False, data is returned as
a single chunk.
If decode_unicode is True, content will be decoded using the best
available encoding based on the response.
"""
def generate():
# Special case for urllib3.
if hasattr(self.raw, "stream"):
try:
yield from self.raw.stream(chunk_size, decode_content=True)
except ProtocolError as e:
raise ChunkedEncodingError(e)
except DecodeError as e:
raise ContentDecodingError(e)
except ReadTimeoutError as e:
raise ConnectionError(e)
except SSLError as e:
raise RequestsSSLError(e)
else:
# Standard file-like object.
while True:
chunk = self.raw.read(chunk_size)
if not chunk:
break
yield chunk
self._content_consumed = True
if self._content_consumed and isinstance(self._content, bool):
raise StreamConsumedError()
elif chunk_size is not None and not isinstance(chunk_size, int):
raise TypeError(
f"chunk_size must be an int, it is instead a {type(chunk_size)}."
)
# simulate reading small chunks of the content
reused_chunks = iter_slices(self._content, chunk_size)
stream_chunks = generate()
chunks = reused_chunks if self._content_consumed else stream_chunks
if decode_unicode:
chunks = stream_decode_response_unicode(chunks, self)
return chunks
| (self, chunk_size=1, decode_unicode=False) |
8,424 | requests.models | iter_lines | Iterates over the response data, one line at a time. When
stream=True is set on the request, this avoids reading the
content at once into memory for large responses.
.. note:: This method is not reentrant safe.
| def iter_lines(
self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=False, delimiter=None
):
"""Iterates over the response data, one line at a time. When
stream=True is set on the request, this avoids reading the
content at once into memory for large responses.
.. note:: This method is not reentrant safe.
"""
pending = None
for chunk in self.iter_content(
chunk_size=chunk_size, decode_unicode=decode_unicode
):
if pending is not None:
chunk = pending + chunk
if delimiter:
lines = chunk.split(delimiter)
else:
lines = chunk.splitlines()
if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
pending = lines.pop()
else:
pending = None
yield from lines
if pending is not None:
yield pending
| (self, chunk_size=512, decode_unicode=False, delimiter=None) |
8,425 | feapder.network.response | open | null | def open(self):
body = self.content
if b"<base" not in body:
# <head> 标签后插入一个<base href="url">标签
repl = fr'\1<base href="{self.url}">'
body = re.sub(rb"(<head(?:>|\s.*?>))", repl.encode("utf-8"), body)
fd, fname = tempfile.mkstemp(".html")
os.write(fd, body)
os.close(fd)
return webbrowser.open(f"file://{fname}")
| (self) |
8,426 | requests.models | raise_for_status | Raises :class:`HTTPError`, if one occurred. | def raise_for_status(self):
"""Raises :class:`HTTPError`, if one occurred."""
http_error_msg = ""
if isinstance(self.reason, bytes):
# We attempt to decode utf-8 first because some servers
# choose to localize their reason strings. If the string
# isn't utf-8, we fall back to iso-8859-1 for all other
# encodings. (See PR #3538)
try:
reason = self.reason.decode("utf-8")
except UnicodeDecodeError:
reason = self.reason.decode("iso-8859-1")
else:
reason = self.reason
if 400 <= self.status_code < 500:
http_error_msg = (
f"{self.status_code} Client Error: {reason} for url: {self.url}"
)
elif 500 <= self.status_code < 600:
http_error_msg = (
f"{self.status_code} Server Error: {reason} for url: {self.url}"
)
if http_error_msg:
raise HTTPError(http_error_msg, response=self)
| (self) |
8,427 | feapder.network.response | re |
@summary: 正则匹配
注意:网页源码<a class='page-numbers'... 会被处理成<a class="page-numbers" ; 写正则时要写<a class="(.*?)"。 但不会改非html的文本引号格式
为了使用方便,正则单双引号自动处理为不敏感
---------
@param regex: 正则或者re.compile
@param replace_entities: 为True时 去掉 等字符, 转义"为 " 等, 会使网页结构发生变化。如在网页源码中提取json, 建议设置成False
---------
@result: 列表
| def re(self, regex, replace_entities=False):
"""
@summary: 正则匹配
注意:网页源码<a class='page-numbers'... 会被处理成<a class="page-numbers" ; 写正则时要写<a class="(.*?)"。 但不会改非html的文本引号格式
为了使用方便,正则单双引号自动处理为不敏感
---------
@param regex: 正则或者re.compile
@param replace_entities: 为True时 去掉 等字符, 转义"为 " 等, 会使网页结构发生变化。如在网页源码中提取json, 建议设置成False
---------
@result: 列表
"""
# 将单双引号设置为不敏感
if isinstance(regex, str):
regex = re.sub("['\"]", "['\"]", regex)
return self.selector.re(regex, replace_entities)
| (self, regex, replace_entities=False) |
8,428 | feapder.network.response | re_first |
@summary: 正则匹配
注意:网页源码<a class='page-numbers'... 会被处理成<a class="page-numbers" ; 写正则时要写<a class="(.*?)"。 但不会改非html的文本引号格式
为了使用方便,正则单双引号自动处理为不敏感
---------
@param regex: 正则或者re.compile
@param default: 未匹配到, 默认值
@param replace_entities: 为True时 去掉 等字符, 转义"为 " 等, 会使网页结构发生变化。如在网页源码中提取json, 建议设置成False
---------
@result: 第一个值或默认值
| def re_first(self, regex, default=None, replace_entities=False):
"""
@summary: 正则匹配
注意:网页源码<a class='page-numbers'... 会被处理成<a class="page-numbers" ; 写正则时要写<a class="(.*?)"。 但不会改非html的文本引号格式
为了使用方便,正则单双引号自动处理为不敏感
---------
@param regex: 正则或者re.compile
@param default: 未匹配到, 默认值
@param replace_entities: 为True时 去掉 等字符, 转义"为 " 等, 会使网页结构发生变化。如在网页源码中提取json, 建议设置成False
---------
@result: 第一个值或默认值
"""
# 将单双引号设置为不敏感
if isinstance(regex, str):
regex = re.sub("['\"]", "['\"]", regex)
return self.selector.re_first(regex, default, replace_entities)
| (self, regex, default=None, replace_entities=False) |
8,429 | feapder.network.response | xpath | null | def xpath(self, query, **kwargs):
return self.selector.xpath(query, **kwargs)
| (self, query, **kwargs) |
8,430 | feapder.core.spiders.spider | Spider |
@summary: 为了简化搭建爬虫
---------
| class Spider(
BaseParser, Scheduler
): # threading 中有name函数, 必须先继承BaseParser 否则其内部的name会被Schedule的基类threading.Thread的name覆盖
"""
@summary: 为了简化搭建爬虫
---------
"""
def __init__(
self,
redis_key=None,
min_task_count=1,
check_task_interval=5,
thread_count=None,
begin_callback=None,
end_callback=None,
delete_keys=(),
keep_alive=None,
auto_start_requests=None,
batch_interval=0,
wait_lock=True,
**kwargs
):
"""
@summary: 爬虫
---------
@param redis_key: 任务等数据存放在redis中的key前缀
@param min_task_count: 任务队列中最少任务数, 少于这个数量才会添加任务,默认1。start_monitor_task 模式下生效
@param check_task_interval: 检查是否还有任务的时间间隔;默认5秒
@param thread_count: 线程数,默认为配置文件中的线程数
@param begin_callback: 爬虫开始回调函数
@param end_callback: 爬虫结束回调函数
@param delete_keys: 爬虫启动时删除的key,类型: 元组/bool/string。 支持正则; 常用于清空任务队列,否则重启时会断点续爬
@param keep_alive: 爬虫是否常驻
@param auto_start_requests: 爬虫是否自动添加任务
@param batch_interval: 抓取时间间隔 默认为0 天为单位 多次启动时,只有当前时间与第一次抓取结束的时间间隔大于指定的时间间隔时,爬虫才启动
@param wait_lock: 下发任务时否等待锁,若不等待锁,可能会存在多进程同时在下发一样的任务,因此分布式环境下请将该值设置True
---------
@result:
"""
super(Spider, self).__init__(
redis_key=redis_key,
thread_count=thread_count,
begin_callback=begin_callback,
end_callback=end_callback,
delete_keys=delete_keys,
keep_alive=keep_alive,
auto_start_requests=auto_start_requests,
batch_interval=batch_interval,
wait_lock=wait_lock,
**kwargs
)
self._min_task_count = min_task_count
self._check_task_interval = check_task_interval
self._is_distributed_task = False
self._is_show_not_task = False
def start_monitor_task(self, *args, **kws):
if not self.is_reach_next_spider_time():
return
self._auto_start_requests = False
redisdb = RedisDB()
if not self._parsers: # 不是add_parser 模式
self._parsers.append(self)
while True:
try:
# 检查redis中是否有任务
tab_requests = setting.TAB_REQUESTS.format(redis_key=self._redis_key)
todo_task_count = redisdb.zget_count(tab_requests)
if todo_task_count < self._min_task_count: # 添加任务
# make start requests
self.distribute_task(*args, **kws)
else:
log.info("redis 中尚有%s条积压任务,暂时不派发新任务" % todo_task_count)
except Exception as e:
log.exception(e)
if not self._keep_alive:
break
time.sleep(self._check_task_interval)
def distribute_task(self, *args, **kws):
"""
@summary: 分发任务 并将返回的request入库
---------
@param tasks:
---------
@result:
"""
self._is_distributed_task = False
for parser in self._parsers:
requests = parser.start_requests(*args, **kws)
if requests and not isinstance(requests, Iterable):
raise Exception("%s.%s返回值必须可迭代" % (parser.name, "start_requests"))
result_type = 1
for request in requests or []:
if isinstance(request, Request):
request.parser_name = request.parser_name or parser.name
self._request_buffer.put_request(request)
self._is_distributed_task = True
result_type = 1
elif isinstance(request, Item):
self._item_buffer.put_item(request)
result_type = 2
elif callable(request): # callbale的request可能是更新数据库操作的函数
if result_type == 1:
self._request_buffer.put_request(request)
else:
self._item_buffer.put_item(request)
else:
raise TypeError(
"start_requests yield result type error, expect Request、Item、callback func, bug get type: {}".format(
type(request)
)
)
self._request_buffer.flush()
self._item_buffer.flush()
if self._is_distributed_task: # 有任务时才提示启动爬虫
# begin
self.spider_begin()
# 重置已经提示无任务状态为False
self._is_show_not_task = False
elif not self._is_show_not_task: # 无任务,且没推送过无任务信息
# 发送无任务消息
msg = "《%s》start_requests无任务添加" % (self._spider_name)
log.info(msg)
# self.send_msg(msg)
self._is_show_not_task = True
def run(self):
if not self.is_reach_next_spider_time():
return
if not self._parsers: # 不是add_parser 模式
self._parsers.append(self)
self._start()
while True:
try:
if self._stop_spider or self.all_thread_is_done():
if not self._is_notify_end:
self.spider_end() # 跑完一轮
self._is_notify_end = True
if not self._keep_alive:
self._stop_all_thread()
break
else:
self._is_notify_end = False
self.check_task_status()
except Exception as e:
log.exception(e)
tools.delay_time(1) # 1秒钟检查一次爬虫状态
@classmethod
def to_DebugSpider(cls, *args, **kwargs):
# DebugSpider 继承 cls
DebugSpider.__bases__ = (cls,)
DebugSpider.__name__ = cls.__name__
return DebugSpider(*args, **kwargs)
| (redis_key=None, min_task_count=1, check_task_interval=5, thread_count=None, begin_callback=None, end_callback=None, delete_keys=(), keep_alive=None, auto_start_requests=None, batch_interval=0, wait_lock=True, **kwargs) |
8,432 | feapder.core.spiders.spider | __init__ |
@summary: 爬虫
---------
@param redis_key: 任务等数据存放在redis中的key前缀
@param min_task_count: 任务队列中最少任务数, 少于这个数量才会添加任务,默认1。start_monitor_task 模式下生效
@param check_task_interval: 检查是否还有任务的时间间隔;默认5秒
@param thread_count: 线程数,默认为配置文件中的线程数
@param begin_callback: 爬虫开始回调函数
@param end_callback: 爬虫结束回调函数
@param delete_keys: 爬虫启动时删除的key,类型: 元组/bool/string。 支持正则; 常用于清空任务队列,否则重启时会断点续爬
@param keep_alive: 爬虫是否常驻
@param auto_start_requests: 爬虫是否自动添加任务
@param batch_interval: 抓取时间间隔 默认为0 天为单位 多次启动时,只有当前时间与第一次抓取结束的时间间隔大于指定的时间间隔时,爬虫才启动
@param wait_lock: 下发任务时否等待锁,若不等待锁,可能会存在多进程同时在下发一样的任务,因此分布式环境下请将该值设置True
---------
@result:
| def __init__(
self,
redis_key=None,
min_task_count=1,
check_task_interval=5,
thread_count=None,
begin_callback=None,
end_callback=None,
delete_keys=(),
keep_alive=None,
auto_start_requests=None,
batch_interval=0,
wait_lock=True,
**kwargs
):
"""
@summary: 爬虫
---------
@param redis_key: 任务等数据存放在redis中的key前缀
@param min_task_count: 任务队列中最少任务数, 少于这个数量才会添加任务,默认1。start_monitor_task 模式下生效
@param check_task_interval: 检查是否还有任务的时间间隔;默认5秒
@param thread_count: 线程数,默认为配置文件中的线程数
@param begin_callback: 爬虫开始回调函数
@param end_callback: 爬虫结束回调函数
@param delete_keys: 爬虫启动时删除的key,类型: 元组/bool/string。 支持正则; 常用于清空任务队列,否则重启时会断点续爬
@param keep_alive: 爬虫是否常驻
@param auto_start_requests: 爬虫是否自动添加任务
@param batch_interval: 抓取时间间隔 默认为0 天为单位 多次启动时,只有当前时间与第一次抓取结束的时间间隔大于指定的时间间隔时,爬虫才启动
@param wait_lock: 下发任务时否等待锁,若不等待锁,可能会存在多进程同时在下发一样的任务,因此分布式环境下请将该值设置True
---------
@result:
"""
super(Spider, self).__init__(
redis_key=redis_key,
thread_count=thread_count,
begin_callback=begin_callback,
end_callback=end_callback,
delete_keys=delete_keys,
keep_alive=keep_alive,
auto_start_requests=auto_start_requests,
batch_interval=batch_interval,
wait_lock=wait_lock,
**kwargs
)
self._min_task_count = min_task_count
self._check_task_interval = check_task_interval
self._is_distributed_task = False
self._is_show_not_task = False
| (self, redis_key=None, min_task_count=1, check_task_interval=5, thread_count=None, begin_callback=None, end_callback=None, delete_keys=(), keep_alive=None, auto_start_requests=None, batch_interval=0, wait_lock=True, **kwargs) |
8,445 | feapder.core.scheduler | add_parser | null | def add_parser(self, parser, **kwargs):
parser = parser(**kwargs) # parser 实例化
if isinstance(parser, BaseParser):
self._parsers.append(parser)
else:
raise ValueError("类型错误,爬虫需继承feapder.BaseParser或feapder.BatchParser")
| (self, parser, **kwargs) |
8,450 | feapder.core.spiders.spider | distribute_task |
@summary: 分发任务 并将返回的request入库
---------
@param tasks:
---------
@result:
| def distribute_task(self, *args, **kws):
"""
@summary: 分发任务 并将返回的request入库
---------
@param tasks:
---------
@result:
"""
self._is_distributed_task = False
for parser in self._parsers:
requests = parser.start_requests(*args, **kws)
if requests and not isinstance(requests, Iterable):
raise Exception("%s.%s返回值必须可迭代" % (parser.name, "start_requests"))
result_type = 1
for request in requests or []:
if isinstance(request, Request):
request.parser_name = request.parser_name or parser.name
self._request_buffer.put_request(request)
self._is_distributed_task = True
result_type = 1
elif isinstance(request, Item):
self._item_buffer.put_item(request)
result_type = 2
elif callable(request): # callbale的request可能是更新数据库操作的函数
if result_type == 1:
self._request_buffer.put_request(request)
else:
self._item_buffer.put_item(request)
else:
raise TypeError(
"start_requests yield result type error, expect Request、Item、callback func, bug get type: {}".format(
type(request)
)
)
self._request_buffer.flush()
self._item_buffer.flush()
if self._is_distributed_task: # 有任务时才提示启动爬虫
# begin
self.spider_begin()
# 重置已经提示无任务状态为False
self._is_show_not_task = False
elif not self._is_show_not_task: # 无任务,且没推送过无任务信息
# 发送无任务消息
msg = "《%s》start_requests无任务添加" % (self._spider_name)
log.info(msg)
# self.send_msg(msg)
self._is_show_not_task = True
| (self, *args, **kws) |
8,468 | feapder.core.spiders.spider | run | null | def run(self):
if not self.is_reach_next_spider_time():
return
if not self._parsers: # 不是add_parser 模式
self._parsers.append(self)
self._start()
while True:
try:
if self._stop_spider or self.all_thread_is_done():
if not self._is_notify_end:
self.spider_end() # 跑完一轮
self._is_notify_end = True
if not self._keep_alive:
self._stop_all_thread()
break
else:
self._is_notify_end = False
self.check_task_status()
except Exception as e:
log.exception(e)
tools.delay_time(1) # 1秒钟检查一次爬虫状态
| (self) |
8,476 | feapder.core.spiders.spider | start_monitor_task | null | def start_monitor_task(self, *args, **kws):
if not self.is_reach_next_spider_time():
return
self._auto_start_requests = False
redisdb = RedisDB()
if not self._parsers: # 不是add_parser 模式
self._parsers.append(self)
while True:
try:
# 检查redis中是否有任务
tab_requests = setting.TAB_REQUESTS.format(redis_key=self._redis_key)
todo_task_count = redisdb.zget_count(tab_requests)
if todo_task_count < self._min_task_count: # 添加任务
# make start requests
self.distribute_task(*args, **kws)
else:
log.info("redis 中尚有%s条积压任务,暂时不派发新任务" % todo_task_count)
except Exception as e:
log.exception(e)
if not self._keep_alive:
break
time.sleep(self._check_task_interval)
| (self, *args, **kws) |
8,480 | feapder.core.base_parser | TaskParser | null | class TaskParser(BaseParser):
def __init__(self, task_table, task_state, mysqldb=None):
self._mysqldb = mysqldb or MysqlDB() # mysqldb
self._task_state = task_state # mysql中任务表的state字段名
self._task_table = task_table # mysql中的任务表
def add_task(self):
"""
@summary: 添加任务, 每次启动start_monitor 都会调用,且在init_task之前调用
---------
---------
@result:
"""
def start_requests(self, task: PerfectDict):
"""
@summary:
---------
@param task: 任务信息 list
---------
@result:
"""
def update_task_state(self, task_id, state=1, **kwargs):
"""
@summary: 更新任务表中任务状态,做完每个任务时代码逻辑中要主动调用。可能会重写
调用方法为 yield lambda : self.update_task_state(task_id, state)
---------
@param task_id:
@param state:
---------
@result:
"""
kwargs["id"] = task_id
kwargs[self._task_state] = state
sql = tools.make_update_sql(
self._task_table, kwargs, condition="id = {task_id}".format(task_id=task_id)
)
if self._mysqldb.update(sql):
log.debug("置任务%s状态成功" % task_id)
else:
log.error("置任务%s状态失败 sql=%s" % (task_id, sql))
update_task = update_task_state
def update_task_batch(self, task_id, state=1, **kwargs):
"""
批量更新任务 多处调用,更新的字段必须一致
注意:需要 写成 yield update_task_batch(...) 否则不会更新
@param task_id:
@param state:
@param kwargs:
@return:
"""
kwargs["id"] = task_id
kwargs[self._task_state] = state
update_item = UpdateItem(**kwargs)
update_item.table_name = self._task_table
update_item.name_underline = self._task_table + "_item"
return update_item
| (task_table, task_state, mysqldb=None) |
Subsets and Splits