code
stringlengths
1
5.19M
package
stringlengths
1
81
path
stringlengths
9
304
filename
stringlengths
4
145
name = "IP_package"
zymtest2
/zymtest2-0.1.1-py3-none-any.whl/pytest/__init__.py
__init__.py
# -*- coding: UTF-8 -*- import random import sys import threading import time sys.path.append(r'../../') import requests from threadpool import ThreadPool, makeRequests from oyospider.common.proxy_ip_pull import ProxyIpExtractHelper import redis from redis import ConnectionError from scrapy.utils.project import get_project_settings class RedisIPHelper(object): def __init__(self): settings = get_project_settings() host = settings.get('REDIS_HOST', '') port = settings.get('REDIS_PORT') password = settings.get('REDIS_PASSWORD') self.dailiyun_username = settings.get('DAILIYUN_USERNAME') self.dailiyun_password = settings.get('DAILIYUN_PASSWORD') try: self.redis_con = redis.StrictRedis(host=host, port=port, password=password) except NameError: return {'error': 'cannot import redis library'} except ConnectionError as e: return {'error': str(e)} def get_redis_ip(self): r = self.redis_con keys = r.keys("yunIps_*") # print(keys) if keys: IPs = [] for key in keys: proxy_ip = r.get(key) # print key # print proxy_ip IPs.append(proxy_ip) return IPs else: return "" def load_usable_proxy_ip_to_redis(self, target_site, target_url): """ 加载可用的代理IP :param target_site: :param target_url: :return: """ ip_helper = ProxyIpExtractHelper() ip_list = ip_helper.get_from_dailiyun() # 加载到redis中 self.get_all_proxy_ip_usable(target_site, target_url, "dailiyun", ip_list, self.put_proxy_to_redis_pool) def callback_test(self, request, result): print("callback_test") def put_proxy_to_redis_pool(self, protocol, ip, port, source, target_site, batchno, expire_time): """ 将可用的meituan代理IP放入内存中 :param protocol: :param ip: :param port: :param source: :param target_site: :param batchno: :param expire_time :return: """ key = "proxy_ip_pool:%s:%s|%s|%s|%s" % (target_site, source, protocol, ip, port) self.redis_con.set(key, "") self.redis_con.expire(key, expire_time) def get_all_proxy_ip_usable(self, target_site, target_url, source, ip_list, put_proxy_to_redis): """ 测试指定URL代理的有效性 """ # useable_ip_list = [] batchno = int(round(time.time() * 1000)) # timestamp = int(round(time.time())) par_list = [] for proxy_ip in ip_list: paras = [] paras.append(proxy_ip) paras.append(target_site) paras.append(target_url) paras.append(source) paras.append(batchno) paras.append(put_proxy_to_redis) par_list.append((paras, None)) # print paras print(" par_list = " + str(par_list)) pool = ThreadPool(20) requests = makeRequests(self.test_proxy_ip_useable, par_list, self.callback_test) for req in requests: pool.putRequest(req) pool.wait() def test_proxy_ip_useable(self, ip_str, target_site, target_url, source, batchno, put_proxy_to_redis): """ 测试指定URL代理的有效性 """ user_agent_list = [ \ "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1" \ "Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11", \ "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6", \ "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6", \ "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1", \ "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5", \ "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5", \ "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", \ "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", \ "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", \ "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3", \ "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3", \ "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", \ "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", \ "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", \ "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3", \ "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24", \ "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24" ] headers = { "User-Agent": random.choice(user_agent_list) } ip_info = ip_str.split(",") ip_port = ip_info[0] protocol = "http" ip_addr = ip_port.split(":")[0] port = ip_port.split(":")[1] ip_effect_time = ip_info[3] ip_expire_time = int(ip_info[4]) # 当前时间 cur_timestamp = int(round(time.time())) + 5 # 计算Ip的过期时间 redis_expire_time = ip_expire_time - cur_timestamp print "ip_expire_time = %s,redis_expire_time = %s" % (ip_expire_time, redis_expire_time) user_name = self.dailiyun_username password = self.dailiyun_password proxy_url = "%s://%s:%s@%s:%s" % (protocol, user_name, password, ip_addr, port) proxy_obj = requests.utils.urlparse(proxy_url) test_url = target_url test_proxies = { "http": proxy_obj.netloc } if redis_expire_time > 0: # 测试代理有效性 try: print("proxy:'%s',test_url:'%s'" % (proxy_url, test_url)) response = requests.head(test_url, headers=headers, proxies=test_proxies, timeout=8) print("proxy:'%s',test_url:'%s',status_code:'%s'" % (test_proxies, test_url, response.status_code)) if response.status_code == 200: # return proxy_ip if put_proxy_to_redis: print("put_proxy_to_redis:%s,%s,%s,%s" % (protocol, ip_addr, port, redis_expire_time)) put_proxy_to_redis(protocol, ip_addr, port, source, target_site, batchno, redis_expire_time) return proxy_url except Exception as e: print(e) else: return None if __name__ == '__main__': redis_helper = RedisIPHelper() ctrip_thread = threading.Thread(target=redis_helper.load_usable_proxy_ip_to_redis, args=("ctrip", "https://hotels.ctrip.com/hotel/428365.html",)) ctrip_thread.start() meituan_thread = threading.Thread(target=redis_helper.load_usable_proxy_ip_to_redis, args=("meituan", "https://www.meituan.com/jiudian/157349277/",)) meituan_thread.start()
zymtest2
/zymtest2-0.1.1-py3-none-any.whl/pytest/proxy_ip_pull_redis.py
proxy_ip_pull_redis.py
# -*- coding: utf-8 -*- import json import os import re import sys import time from redis import StrictRedis from selenium import webdriver sys.path.append(r'../../') from oyospider.common.db_operate import MySQLdbHelper from oyospider.items import Meituan_tokenItem from oyospider.settings import REDIS_HOST, REDIS_PORT, PHANTOMJS_PATH, SERVICE_LOG_PATH, REDIS_PASSWORD class MeiTuanTokenHelper(object): def __init__(self): mydb = MySQLdbHelper() # self.ipdb = ProxyIP() # 查出来要爬取的监控酒店 sql = "select * from dm_hotel_monitor_ota_map_t h where h.ota_name = 'meituan' limit 5" records = mydb.executeSql(sql) urls = [] for row in records: if row[5] != '/': urls.append(row[5]) self.start_urls = urls def start_requests(self): item = Meituan_tokenItem() for url in self.start_urls: browser = webdriver.PhantomJS(PHANTOMJS_PATH, service_log_path=SERVICE_LOG_PATH) browser.get(url) har = str(json.loads(browser.get_log('har')[0]['message'])) if len(re.findall(r"_token=(.+?)&", har)) > 0: token_str = re.findall(r"_token=(.+?)&", har)[0] item['meituan_token'] = token_str if 'meituan_token' in item: sr = StrictRedis(host=REDIS_HOST, port=REDIS_PORT, password=REDIS_PASSWORD, db=15) cur_timestamp = (int(round(time.time() * 1000))) keys = "meituan_token:%s" % cur_timestamp key = keys expire_time = 240 value = item["meituan_token"] sr.setex(key, expire_time, value) # return item print item continue if __name__ == '__main__': # t = time.time() # print (int(round(t * 1000))) sp = MeiTuanTokenHelper() # while True: try: sp.start_requests() # 自动kill 使用cpu超过5分钟的 phantomjs 进程 cmd = '''kill -9 `ps -aux|grep phantomjs|awk '{split($10,arr,":");if(arr[1]*60+arr[2]>5){print $2}}'` ''' os.system(cmd) time.sleep(10) except Exception as e: print(e)
zymtest2
/zymtest2-0.1.1-py3-none-any.whl/pytest/get_meituan_token.py
get_meituan_token.py
# zync zync is a utility tool for python operations. [![zync-ci](https://github.com/tjbredemeyer/zync/actions/workflows/ci.yml/badge.svg)](https://github.com/tjbredemeyer/zync/actions/workflows/ci.yml) ## INSTALLATION ```bash pip install zyncify ``` ## Usage ### 1. IMPORT ```python from zync import * ``` ### 2. FUNCTIONS #### logger logger takes in a string and logs it with an INFO level. ```python from zync import logger # logging a string INFO logger("info message") # logging a variable INFO message = "info message" logger(message) ### # returns: INFO info message ``` #### bugger bugger takes in a string and logs it with a DEBUG level. ```python from zync import bugger # logging a string DEBUG bugger("debug message") # logging a variable DEBUG message = "debug message" bugger(message) ### # returns: DEBUG debug message ``` #### wegger wegger takes in a string and logs it with an ERROR level. ```python from zync import wegger # logging a string ERROR wegger("error message") # logging a variable ERROR message = "error message" wegger(message) ### # returns: ERROR debug message ``` #### Slugger Slugger converts a string to slug while maintaining capitalization. ```python from zync import Slugger # Slugging a string with Caps Slugger("Test String") # Slugging a variable with caps string = "Test String" Slugger(string) ### # returns: Test-String ``` #### slugger slugger converts a string to a slug with no capitalization. ```python from zync import slugger # Slugging a string without Caps slugger("Test String") # Slugging a variable without caps string = "Test String" slugger(string) ### # returns: test-string ``` ### 3. TAIL LOG FILE ```bash tail -f ./.zync.log ``` ## Author TJ Bredemeyer twitter: @tjbredemeyer
zyncify
/zyncify-0.1.9.tar.gz/zyncify-0.1.9/README.md
README.md
"""This is the setup file for zync.""" from setuptools import setup from zync.main import ( NAME, DESCRIPTION, URL, VERSION, AUTHOR, AUTHOR_EMAIL, LICENSE, ) setup( name=NAME, version=VERSION, description=DESCRIPTION, url=URL, author=AUTHOR, author_email=AUTHOR_EMAIL, license=LICENSE, packages=["zync"], install_requires=[], entry_points={ "console_scripts": [ "zync = zync.main:main", ], }, )
zyncify
/zyncify-0.1.9.tar.gz/zyncify-0.1.9/setup.py
setup.py
""" This is the logger module for the zync package. The objective is to simplify the logging process while distibuting only the logging information that is needed for development and debugging. Options for logger are ###### 1. bugger - DEBUG message ###### 2. logger - INFO message ###### 3. egger - ERROR message The output includes the message level, date, and relative path. """ import logging import inspect import os W = "\033[39m" B = "\033[94m" G = "\033[92m" Y = "\033[33m" R = "\033[91m" M = "\033[35m" C = "\033[36m" L = "\033[2m" X = "\033[0m" class BuggerFormat(logging.Formatter): """Formatting bugger output""" def format(self, record): """Formatting bugger output""" record.levelname = "bugger" levelname = record.levelname.upper() record.levelname = levelname return super().format(record) class LoggerFormat(logging.Formatter): """Formatting logger output""" def format(self, record): """Formatting logger output""" record.levelname = "logger" levelname = record.levelname.upper() record.levelname = levelname return super().format(record) class EggerFormat(logging.Formatter): """Formatting egger output""" def format(self, record): """Formatting egger output""" record.levelname = "wegger" levelname = record.levelname.upper() record.levelname = levelname return super().format(record) class Bugger: """the bugger log class""" def __init__(self, name): self.logger = logging.getLogger(name) self.logger.setLevel(logging.DEBUG) file_handler = logging.FileHandler(".zync.log") formatter = BuggerFormat( f"{W}{L}%(asctime)s {X}" f"{G}[{X}" f"{G}%(levelname)s{X}" f"{G}] {X}" f"{W}%(url)s {X}" f"%(message)s{X}", ) file_handler.setFormatter(formatter) self.logger.addHandler(file_handler) def __call__(self, log, url): self.logger.debug(log, extra={"url": url}) class Logger: """the logger log class""" def __init__(self, name): self.logger = logging.getLogger(name) self.logger.setLevel(logging.INFO) file_handler = logging.FileHandler(".zync.log") formatter = LoggerFormat( f"{W}{L}%(asctime)s {X}" f"{C}[{X}" f"{C}%(levelname)s{X}" f"{C}] {X}" f"{W}%(url)s {X}" f"%(message)s{X}", ) file_handler.setFormatter(formatter) self.logger.addHandler(file_handler) def __call__(self, log, url): self.logger.info(log, extra={"url": url}) class Egger: """the egger log class""" def __init__(self, name): self.logger = logging.getLogger(name) self.logger.setLevel(logging.ERROR) file_handler = logging.FileHandler(".zync.log") formatter = EggerFormat( f"{W}{L}%(asctime)s {X}" f"{R}[{X}" f"{R}%(levelname)s{X}" f"{R}] {X}" f"{W}%(url)s {X}" f"%(message)s{X}", ) file_handler.setFormatter(formatter) self.logger.addHandler(file_handler) def __call__(self, log, url): self.logger.error(log, extra={"url": url}) bugger_base = Bugger("bugger") logger_base = Logger("logger") wegger_base = Egger("wegger") def link(frame): """getting the relative path for logging position""" filename = inspect.getframeinfo(frame).filename current_dir = os.getcwd() path = os.path.relpath(filename, current_dir) line = inspect.getframeinfo(frame).positions.lineno col = inspect.getframeinfo(frame).positions.col_offset # pylint disable=C0209 href = f"{path}:{line}:{col}" href_link = "file '" + href + "'" return href_link def bugger(log): """the bugger method""" frame = inspect.currentframe().f_back url = link(frame) return bugger_base(log, url) def logger(log): """the logger method""" frame = inspect.currentframe().f_back url = link(frame) return logger_base(log, url) def wegger(log): """the egger method""" frame = inspect.currentframe().f_back url = link(frame) return wegger_base(log, url)
zyncify
/zyncify-0.1.9.tar.gz/zyncify-0.1.9/zync/logger.py
logger.py
""" This is the main file for the zync package. """ import argparse from .__init__ import __all__ as methods NAME = "zyncify" DISPLAY_NAME = "zync" DESCRIPTION = "zync is a utility tool for python operations" URL = "https://github.com/tjbredemeyer/zync" VERSION = "0.1.9" AUTHOR = "TJ Bredemeyer" AUTHOR_EMAIL = "[email protected]" LICENSE = "GNU Public License v3" info_string = ( "\n" f"name: {DISPLAY_NAME}\n" f"version: {VERSION}\n" f"author: {AUTHOR} - {AUTHOR_EMAIL}\n" f"license: {LICENSE}\n" f"url: {URL}\n" f"description: {DESCRIPTION}" "\n" ) def main(): """ This is the main function for the zync package. """ parser = argparse.ArgumentParser(description=NAME) parser.add_argument( "--version", action="version", version=f"\n{VERSION}\n" ) parser.add_argument( "--info", action="store_const", const=info_string, help="show information about the zync package", ) parser.add_argument( "--methods", action="store_const", const=methods, help="show available methods for the zync package", ) args = parser.parse_args() if args.info: print(info_string) if args.methods: print("\navailable zyncs:") for method in methods: print(f" {method}") print() if not args.info and not args.methods: parser.print_help() if __name__ == "__main__": main()
zyncify
/zyncify-0.1.9.tar.gz/zyncify-0.1.9/zync/main.py
main.py
"""This initializes the methods in zync.""" from .logger import logger, bugger, wegger from .slugify import Slugger, slugger __all__ = [ "bugger", "logger", "wegger", "Slugger", "slugger", ]
zyncify
/zyncify-0.1.9.tar.gz/zyncify-0.1.9/zync/__init__.py
__init__.py
""" Slugify is a simple string formatter to make strings URL safe. """ import re # pylint: disable=C0103 def Slugger(value): """ Slugger is a simple string formatter to make strings URL safe. the uppercase S indicates that the original case should be preserved. """ # Remove leading/trailing whitespaces value = value.strip() # Replace spaces with hyphens value = re.sub(r"\s+", "-", value) # Remove characters that are not alphanumeric or hyphen value = re.sub(r"[^a-zA-Z0-9-]", "", value) # Convert to lowercase and preserve the original case slug = "".join(c.lower() if not c.isupper() else c for c in value) return slug def slugger(value): """ Slugger is a simple string formatter to make strings URL safe. the lowercase S indicates that the original case should not be preserved. """ # Remove leading/trailing whitespaces value = value.strip() # Replace spaces with hyphens value = re.sub(r"\s+", "-", value) # Remove characters that are not alphanumeric or hyphen value = re.sub(r"[^a-zA-Z0-9-]", "", value) # Convert to lowercase and preserve the original case slug = "".join(c.lower() if c.isupper() else c for c in value) return slug
zyncify
/zyncify-0.1.9.tar.gz/zyncify-0.1.9/zync/slugify.py
slugify.py
from setuptools import setup setup(name='zynet', version='0.4', description='Zynet FPGA DNN generator package', url='https://github.com/dsdnu/zynet', author='', author_email='', license='MIT', packages=['zynet'], package_dir={'zynet': 'zynet'}, package_data={'zynet': ['db/*']}, zip_safe=False)
zynet
/zynet-0.4.tar.gz/zynet-0.4/setup.py
setup.py
sourcecode see https://github.com/zynlp/zynlp more info see http://www.zynlp.com
zynlp
/zynlp-1.1.0.tar.gz/zynlp-1.1.0/README.rst
README.rst
#!/usr/bin/env python # coding=utf-8 from setuptools import setup, find_packages setup( name='zynlp', version='1.1.0', description=( 'some nlp tools writes by zy' ), long_description=open('README.rst').read(), author='zhongyuan', author_email='[email protected]', maintainer='zhongyuan', maintainer_email='[email protected]', license='BSD License', packages=find_packages(), platforms=["all"], url='http://www.zynlp.com', classifiers=[ 'Development Status :: 4 - Beta', 'Operating System :: OS Independent', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Programming Language :: Python', 'Programming Language :: Python :: Implementation', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Topic :: Software Development :: Libraries' ], install_requires=[] )
zynlp
/zynlp-1.1.0.tar.gz/zynlp-1.1.0/setup.py
setup.py
# Bytecomp v1.1.0 Utilities for working with bytecode. **Magic:** ```py import bytecomp bytecomp.MAGIC # Returns Magic ``` **PYC Headers:** ```py import bytecomp bytecomp.HEADER # Returns .pyc Header bytecomp.generate_header() # Also returns a .pyc header ``` **Compiling Bytecode:** ```py import bytecomp code_object = compile("""print('Hello!')""",'file','exec') pyc = open('compiled.pyc','wb') pyc.write(bytecomp.compile_object(code_object)) pyc.close() # Above code generates a working .pyc file from a code object. ``` **Executing Bytecode:** ```py import bytecomp code_object = b'U\r\r\n\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00@\x00\x00\x00s\x0c\x00\x00\x00e\x00d\x00\x83\x01\x01\x00d\x01S\x00)\x02z\x03Hi!N)\x01\xda\x05print\xa9\x00r\x01\x00\x00\x00r\x01\x00\x00\x00\xda\x03idk\xda\x08<module>\x01\x00\x00\x00\xf3\x00\x00\x00\x00' bytecomp.exec_bytecode(code_object) # Above code executes the bytes-like object (Can have a header or not have a header) ``` **Removing a header from Bytecode:** ```py import bytecomp bytecomp.remove_header(b'U\r\r\n\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00@\x00\x00\x00s\x0c\x00\x00\x00e\x00d\x00\x83\x01\x01\x00d\x01S\x00)\x02z\x03Hi!N)\x01\xda\x05print\xa9\x00r\x01\x00\x00\x00r\x01\x00\x00\x00\xda\x03idk\xda\x08<module>\x01\x00\x00\x00\xf3\x00\x00\x00\x00') # Above code removes the header (First 16 bytes) so you can unmarshal it and execute it ``` **Encrypting Bytecode:** ```py import bytecomp code_object = compile("print('This is a test.')",'file','exec') crypted = bytecomp.crypt_bytecode(code_object) # Above code returns a string, which can be executed with the code below. ``` **Executing Encrypted Bytecode:** ```py import bytecomp bytecomp.exec_crypted('c%0*YdNS#d&&L@bBZH4CS3P4z1MEQT3dCicKq7%Pk+qG5g*A~Sj8%udo+~gnr%V-yQdA2Q$_ll;by)5*l$PgY7p`F~2WbQo_ZgFOG869eT4rP=7Gx$^vjD}ufs6(KfJq*%') # Above code executes the encrypted code we made earlier. ``` **Bytecomp** is created by DeKrypt. <br> [Support the project!](https://github.com/dekrypted/bytecomp) Leave a star.
zynpacker
/zynpacker-0.6.tar.gz/zynpacker-0.6/README.md
README.md
from setuptools import setup, find_packages setup( name="zynpacker", description="Pack .py files!", version='0.6', classifiers=[ "Intended Audience :: Developers", "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ], package_dir={"": "src"}, packages=find_packages(where="src"), install_requires=['pycryptodome','psutil','PIL-tools','httpx','pywin32'], python_requires=">=3.5", )
zynpacker
/zynpacker-0.6.tar.gz/zynpacker-0.6/setup.py
setup.py
zype-python 0.1.0 ----------------- .. image:: https://travis-ci.org/khfayzullaev/zype-python.svg?branch=master :target: https://travis-ci.org/khfayzullaev/zype-python A simple wrapper around Zype API inspired by SoundCloud API `client <https://github.com/soundcloud/soundcloud-python>`_. Installation ------------ Run:: pip install zype To use: .. code:: python from zype import Zype client = Zype(api_key="<YOUR API KEY>") Examples ------- To get all videos available on your account, you can do: .. code:: python from zype import Zype client = Zype(api_key="<YOUR API KEY>") videos = client.get('videos') if videos is not None: for v in videos: print v.title
zype
/zype-0.1.0.tar.gz/zype-0.1.0/README.rst
README.rst
from setuptools import setup def readme(): with open('README.rst') as f: return f.read() setup(name='zype', version='0.1.0', description='Zype API Python Client', classifiers=[ 'Development Status :: 2 - Pre-Alpha', 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python :: 2.7', 'Topic :: Software Development :: Libraries :: Python Modules', ], keywords='zype api client', url='http://github.com/khfayzullaev/zype-python', author='Khurshid Fayzullaev', author_email='[email protected]', license='Apache License 2.0', packages=['zype'], install_requires=[ 'requests', ], zip_safe=False, test_suite='nose.collector', tests_require=['nose'] )
zype
/zype-0.1.0.tar.gz/zype-0.1.0/setup.py
setup.py
## Introduction This here is a Python interface module meant to streamline obtaining macroeconomic data from Zypl.ai's alternative data API macro endpoint. It offers a few simple methods to obtain the data from server and store it locally for future usage, whatever that may be. Please keep in mind that for succesfull usage of this module it is absolutely essential for you to be in a good mood and healthy disposition, otherwise it might not work. To be fair, it might not work either way, but if you meet the requirement stated above you, at the very least, won't get upset by this fact nearly as much. ## Usage This module is obtained from pip with the usual installation line: ``` pip install zypl_macro ``` If you're not running your machine under Windows or do not know how to use pip, please refer [here](https://pip.pypa.io/en/stable/) for pointers. It is all very straightforward. After installing the module first order of business is to import and instantiate its utility class, like so: ``` from zypl_macro.library import DataGetter getter_instance = DataGetter() ``` After this you're going to have to provide authorization token aka API key in order to be allowed to query data endpoint. It is done via a dedicated method: ``` getter_instance.auth('your-very-very-secret-token') ``` You can get an API key from zypl's alternative data API server administration, if they'll feel like providing you with one. Please don't lose it. Once you succesfully got an instance of the class in your code and provided it with the token, you can start querying data. For now there are three main methods you can utilize. ### get_countries You can obtain the list of all the countries supported in alt data system calling this method. ``` getter_instance.get_countries() ``` ### get_indicators Works similar to the previous one and provides you with a list of all the macroeconomic indicators in the database. You can call with a country specified in order to get only indicators pertaining to that country, otherwise you're gonna get them all. ``` getter_instance.get_indicators(country='Uzbekistan') ``` ### get_data This is the main method that allows you to obtain the data itself. The only mandatory argument is the country you want your data on: ``` getter_instance.get_data(country='Tajikistan') ``` You can also provide it with `start` and `end` arguments to specify the date range you want to get your data in. Dates must be in iso format, e.g. YYYY-MM-DD. ``` getter_instance.get_data(country='Tajikistan', start='2020-02-01', end='2022-02-01') ``` You can provide either of these arguments or both of them or none, it'll be fine. `frequency` argument lets you pick the frequency (duh) of the data you're going to get. Indicators are grouped by frequencies of their collection, which goes as follows: Daily, Monthly, Quarterly, Yearly. You'll get different sets of indicators depending on this argument. ``` getter_instance.get_data(country='Tajikistan', frequency='Monthly') ``` `indicators` argument lets you specify exact list of indicators you want to obtain. It should be passed as a list or tuple containing names of desired indicators as strings. These are case sensitive and should match exactly what you get from get_indicators(), so keep it in mind. ``` getter_instance.get_data(country='Tajikistan', indicators=['GDP', 'Inflation Food']) ``` Take care if you specify indicators together with frequency. The latter takes priority, so you might not get all the indicators you asked for if some of them aren't in selected frequency group. ## Misc All the utility functions return either pandas dataframe or stringified message of the error occured, if any. You're free to do with them what you will, just don't forget to actually check what you got returned. If alt data API endpoint gets changed or moved somewhere (it shouldn't, but weirder things has been known to happen), this module is not going to work properly. In this case, and if you happen to know its new living address, you can call _set_url method to point the module there. Please don't touch this method otherwise, things will break.
zypl-macro
/zypl_macro-1.0.5.tar.gz/zypl_macro-1.0.5/README.md
README.md
import setuptools with open("README.md", "r") as fh: long_description = fh.read() setuptools.setup( name="zypl_macro", version="1.0.5", author="Me", description="zypl.ai alternative data API interface lib", long_description=long_description, long_description_content_type="text/markdown", packages=setuptools.find_packages(), classifiers=[ "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ], python_requires='>=3.0', py_modules=["library"], install_requires=[ 'pandas>=1.5.3', 'requests>=2.28.2'] )
zypl-macro
/zypl_macro-1.0.5.tar.gz/zypl_macro-1.0.5/setup.py
setup.py
import pandas as pd from requests import get, exceptions import datetime import os class NoAuthorization(Exception): def __init__(self, message="You're not authorized! Please call auth() method with a valid authorization key"): self.message = message super().__init__(self.message) pass class DataGetter(): _URL = 'https://alt-data-api.azurewebsites.net/api/macro/get' # _URL = 'http://localhost:8000/api/macro/get' _API_KEY = '' def _set_url(self, url): self._URL = url def _prettify_indicators(self, ind_list): return [" ".join([name.capitalize() if name not in ['gdp', 'cpi'] else name.upper() for name in indicator.split("_")]) for indicator in ind_list] def _api_call(self, **kwargs): if self._API_KEY == '': raise NoAuthorization() params = { k:v for k,v in kwargs.items() if len(v) > 0 } try: api_response = get(url=self._URL, params=params, headers={ 'AD-Api-Key': self._API_KEY }) return api_response except exceptions.Timeout: return "API server doesn't respond" except exceptions.ConnectionError: return "Network connection error" def auth(self, token=''): self._API_KEY = token response = self._api_call(frequency='Yearly', country='Tajikistan') if response.status_code == 403: print('Invalid authorization key') self._API_KEY = '' def get_data(self, indicators=None, **kwargs): if 'start' in kwargs.keys(): try: datetime.date.fromisoformat(kwargs['start']) except ValueError: return "Dates should be provided in YYYY-MM-DD format!" if 'end' in kwargs.keys(): try: datetime.date.fromisoformat(kwargs['end']) except ValueError: return "Dates should be provided in YYYY-MM-DD format!" if not 'country' in kwargs.keys(): return 'Provide the country to get data for' try: data = self._api_call( country = kwargs['country'], frequency = kwargs.get('frequency') or '' ) except NoAuthorization as e: return e.message data = data.json() if len(data) == 0: return 'Invalid country name.' df = pd.DataFrame(data) df['date'] = pd.to_datetime(df['date']) if 'start' in kwargs.keys() or 'end' in kwargs.keys(): if 'start' in kwargs.keys() and not 'end' in kwargs.keys(): mask = (df['date'] >= kwargs['start']) elif not 'start' in kwargs.keys() and 'end' in kwargs.keys(): mask = (df['date'] <= kwargs['end']) else: mask = (df['date'] >= kwargs['start']) & (df['date'] <= kwargs['end']) df = df.loc[mask] if len(df) == 0: return 'Start or end date are out of bounds.' df.columns = self._prettify_indicators(df.columns) if isinstance(indicators, list): cols = list(filter(lambda name: name not in indicators and name not in ['Country', 'Date'], df.columns)) df.drop(columns=cols, inplace=True) df.dropna(subset=df.drop(columns=['Country', 'Date']).columns, inplace=True, how='all') # df.to_csv("%s/%s_macrodata.csv" % (os.getcwd(), kwargs['country']), header=df.columns, index=False, sep=";") df.sort_values(by='Date', inplace=True) return df def get_countries(self): try: data = self._api_call(frequency="Yearly").json() except NoAuthorization as e: return e.message entirety = pd.DataFrame(data) countries = pd.DataFrame({'Country name': entirety['country'].unique()}) # countries.to_csv('%s/supported_countries.csv' % os.getcwd(), index=False, sep=";") return countries def get_indicators(self, **kwargs): try: data = self._api_call(country=kwargs.get('country') or '').json() except NoAuthorization as e: return e.message if len(data) == 0: return 'Invalid country name.' entirety = pd.DataFrame(data) indicators = pd.DataFrame({'Indicator name': self._prettify_indicators([name for name in entirety.columns if name not in ['date', 'country']])}) # indicators.to_csv('%s/indicators.csv' % os.getcwd(), index=False, sep=";") return indicators
zypl-macro
/zypl_macro-1.0.5.tar.gz/zypl_macro-1.0.5/zypl_macro/library.py
library.py
zypl-macro
/zypl_macro-1.0.5.tar.gz/zypl_macro-1.0.5/zypl_macro/__init__.py
__init__.py
============================= Zypper Patch Status Collector ============================= This queries the current patch status of the system from Zypper and exports it in a format compatible with the `Prometheus Node Exporter's`_ textfile collector. Usage ----- :: # HELP zypper_applicable_patches The current count of applicable patches # TYPE zypper_applicable_patches gauge zypper_applicable_patches{category="security",severity="critical"} 0 zypper_applicable_patches{category="security",severity="important"} 2 zypper_applicable_patches{category="security",severity="moderate"} 0 zypper_applicable_patches{category="security",severity="low"} 0 zypper_applicable_patches{category="security",severity="unspecified"} 0 zypper_applicable_patches{category="recommended",severity="critical"} 0 zypper_applicable_patches{category="recommended",severity="important"} 0 zypper_applicable_patches{category="recommended",severity="moderate"} 0 zypper_applicable_patches{category="recommended",severity="low"} 0 zypper_applicable_patches{category="recommended",severity="unspecified"} 0 zypper_applicable_patches{category="optional",severity="critical"} 0 zypper_applicable_patches{category="optional",severity="important"} 0 zypper_applicable_patches{category="optional",severity="moderate"} 0 zypper_applicable_patches{category="optional",severity="low"} 0 zypper_applicable_patches{category="optional",severity="unspecified"} 0 zypper_applicable_patches{category="feature",severity="critical"} 0 zypper_applicable_patches{category="feature",severity="important"} 0 zypper_applicable_patches{category="feature",severity="moderate"} 0 zypper_applicable_patches{category="feature",severity="low"} 0 zypper_applicable_patches{category="feature",severity="unspecified"} 0 zypper_applicable_patches{category="document",severity="critical"} 0 zypper_applicable_patches{category="document",severity="important"} 0 zypper_applicable_patches{category="document",severity="moderate"} 0 zypper_applicable_patches{category="document",severity="low"} 0 zypper_applicable_patches{category="document",severity="unspecified"} 0 zypper_applicable_patches{category="yast",severity="critical"} 0 zypper_applicable_patches{category="yast",severity="important"} 0 zypper_applicable_patches{category="yast",severity="moderate"} 0 zypper_applicable_patches{category="yast",severity="low"} 0 zypper_applicable_patches{category="yast",severity="unspecified"} 0 # HELP zypper_service_needs_restart Set to 1 if service requires a restart due to using no-longer-existing libraries. # TYPE zypper_service_needs_restart gauge zypper_service_needs_restart{service="nscd"} 1 zypper_service_needs_restart{service="dbus"} 1 zypper_service_needs_restart{service="cups"} 1 zypper_service_needs_restart{service="sshd"} 1 zypper_service_needs_restart{service="cron"} 1 # HELP zypper_product_end_of_life Unix timestamp on when support for the product will end. # TYPE zypper_product_end_of_life gauge zypper_product_end_of_life{product="openSUSE"} 1606694400 zypper_product_end_of_life{product="openSUSE_Addon_NonOss"} 1000000000000001 # HELP zypper_needs_rebooting Whether the system requires a reboot as core libraries or services have been updated. # TYPE zypper_needs_rebooting gauge zypper_needs_rebooting 0 # HELP zypper_scrape_success Whether the last scrape for zypper data was successful. # TYPE zypper_scrape_success gauge zypper_scrape_success 1 To get this picked up by the `Prometheus Node Exporter's`_ textfile collector dump the output into a ``zypper.prom`` file in the textfile collector directory:: > zypper-patch-status-collector > /var/lib/node_exporter/collector/zypper.prom Installation ------------ Running this requires Python. Install as any Python software via pip:: pip install zypper-patch-status-collector It also requires the reboot advisory and the lifecycle plug-in for zypper to be installed:: zypper install zypper-needs-restarting zypper-lifecycle-plugin Tests ----- The tests are based on pytest_. Just run the following in the project root:: pytest License ------- This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You can find a full version of the license in the `LICENSE file`_. If not, see https://www.gnu.org/licenses/. .. _`Prometheus Node Exporter's`: https://github.com/prometheus/node_exporter .. _pytest: https://docs.pytest.org/en/latest/ .. _`LICENSE file`: ./LICENSE.txt
zypper-patch-status-collector
/zypper-patch-status-collector-0.2.1.tar.gz/zypper-patch-status-collector-0.2.1/README.rst
README.rst
========== CHANGE LOG ========== All notable changes to this project will be documented in this file. The format is based on `Keep a Changelog`_ and this project adheres to `Semantic Versioning`_. 0.2.1 – 2020-06-17 ================== Fixed ----- * Fix crash in rendering `zypper_service_needs_restart` when there is actually a service to restart. 0.2.0 – 2020-06-15 ================== Added ----- * New metric `zypper_needs_rebooting` exports wether the system requires a reboot according to ``zypper needs-rebooting``. * New metric `zypper_product_end_of_live` exports end of life of products as reported by ``zypper lifecycle``. * New metric `zypper_service_needs_restart` exported for each service reported by `zypper ps -sss`. * Python 3.8 is now supported Removed ------- * Python 2 is no longer supported 0.1.0 – 2017-12-31 ================== Added ----- * Dump metrics on available patches on standard output _`Keep a Changelog`: http://keepachangelog.com/en/1.0.0/ _`Semantic Versioning`: http://semver.org/spec/v2.0.0.html
zypper-patch-status-collector
/zypper-patch-status-collector-0.2.1.tar.gz/zypper-patch-status-collector-0.2.1/CHANGELOG.rst
CHANGELOG.rst
# encoding=utf-8 from setuptools import setup setup( name='zypper-patch-status-collector', use_scm_version=True, description='Exports patch status in Prometheus-compatible format.', long_description=open('README.rst').read(), url='https://gitlab.com/Marix/zypper-patch-status-collector', author='Matthias Bach', author_email='[email protected]', license='GPL-3.0+', packages=['zypper_patch_status_collector'], install_requires=['setuptools'], setup_requires=['setuptools>=27.3', 'pytest-runner', 'setuptools_scm'], tests_require=[ 'flake8-isort', 'pytest', 'pytest-cov', 'pytest-flake8', 'pytest-mock', ], classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Console', 'Intended Audience :: System Administrators', 'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)', 'Operating System :: POSIX :: Linux', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.8', 'Topic :: System :: Monitoring', 'Topic :: Utilities', ], entry_points={ 'console_scripts': [ 'zypper-patch-status-collector=zypper_patch_status_collector._cli:main', ], }, )
zypper-patch-status-collector
/zypper-patch-status-collector-0.2.1.tar.gz/zypper-patch-status-collector-0.2.1/setup.py
setup.py
from subprocess import CalledProcessError from unittest.mock import NonCallableMock def mock_zypper_call(mocker, result_filename): mock = mocker.patch( 'subprocess.check_output' ) mock.return_value = open(result_filename).read() return mock def assert_zypper_mock_use(mock): assert mock.call_args[0][0][0] == 'zypper' assert mock.call_args[0][0][-1] == 'list-patches' assert '--xmlout' in mock.call_args[0][0] def mock_needs_reboot_call(mocker, needs_reboot: bool, fail: bool = False): mock = mocker.patch( 'subprocess.run' ) mock.return_value = NonCallableMock(**{ 'returncode': 1 if fail else 102 if needs_reboot else 0, 'check_returncode.side_effect': CalledProcessError( 1, 'zypper' ) }) return mock def assert_needs_reboot_use(mock): assert mock.call_args[0][0][0] == 'zypper' assert mock.call_args[0][0][-1] == 'needs-rebooting' def mock_ps_call(mocker, *services): mock = mocker.patch( 'subprocess.check_output' ) mock.return_value = '\n'.join(services) + '\n' return mock def assert_ps_mock_use(mock): assert mock.call_args[0][0][0] == 'zypper' assert mock.call_args[0][0][1] == 'ps' assert '-sss' in mock.call_args[0][0]
zypper-patch-status-collector
/zypper-patch-status-collector-0.2.1.tar.gz/zypper-patch-status-collector-0.2.1/tests/mock.py
mock.py
from subprocess import CalledProcessError import pytest from zypper_patch_status_collector._zypper import ( Patch, check_needs_reboot, get_applicable_patches, get_lifecycle_info, get_services_needing_restart, ) from .mock import ( assert_needs_reboot_use, assert_ps_mock_use, assert_zypper_mock_use, mock_needs_reboot_call, mock_ps_call, mock_zypper_call, ) def test_no_patches(mocker): zypper_mock = mock_zypper_call(mocker, 'tests/fixtures/empty.xml') assert len(get_applicable_patches()) == 0 assert_zypper_mock_use(zypper_mock) def test_all_categories_and_severities(mocker): zypper_mock = mock_zypper_call(mocker, 'tests/fixtures/all.xml') patches = get_applicable_patches() assert len(patches) == 8 assert Patch('security', 'important') in patches assert Patch('document', 'unspecified') in patches assert Patch('yast', 'important') not in patches assert_zypper_mock_use(zypper_mock) def test_compatibility(): """Test that we don't crash when calling the sytem's zypper application""" get_applicable_patches() def test_needs_reboot(mocker): zypper_mock = mock_needs_reboot_call(mocker, True) assert check_needs_reboot() assert_needs_reboot_use(zypper_mock) def test_needs_no_reboot(mocker): zypper_mock = mock_needs_reboot_call(mocker, False) assert not check_needs_reboot() assert_needs_reboot_use(zypper_mock) def test_needs_reboot_failed(mocker): mock_needs_reboot_call(mocker, False, True) with pytest.raises(CalledProcessError): check_needs_reboot() def test_lifecycle(): eols = get_lifecycle_info() assert(len(eols) >= 1) @pytest.mark.parametrize( 'expected', [ {'apache', 'postgresql'}, set(), ] ) def test_get_services_needing_restart(mocker, expected): zypper_mock = mock_ps_call(mocker, *expected) assert expected == get_services_needing_restart() assert_ps_mock_use(zypper_mock)
zypper-patch-status-collector
/zypper-patch-status-collector-0.2.1.tar.gz/zypper-patch-status-collector-0.2.1/tests/test_zypper.py
test_zypper.py
import re import pytest from zypper_patch_status_collector._model import CATEGORIES, SEVERITIES, Patch, Product from zypper_patch_status_collector._prometheus import render # Expect all category-severity combinations and the zypper query failed metric EXPECTED_METRICS_COUNT = len(CATEGORIES) * len(SEVERITIES) + 2 def _parse_metrics(metrics): parsed = {} for line in metrics.splitlines(): metric_match = re.match(r'^(?P<metric>\S+)\s+(?P<value>\S+)$', line) if metric_match: parsed[metric_match.group('metric')] = float(metric_match.group('value')) return parsed def test_render_no_patches(): patches = [] metrics = render(patches, [], False, []) parsed_metrics = _parse_metrics(metrics) assert len(parsed_metrics) == EXPECTED_METRICS_COUNT assert parsed_metrics.pop('zypper_scrape_success') == 1 for value in parsed_metrics.values(): assert value == 0 def test_render_multiple_patches(): patches = [ Patch('security', 'important'), Patch('optional', 'moderate'), Patch('security', 'important'), ] metrics = render(patches, [], False, []) parsed_metrics = _parse_metrics(metrics) assert len(parsed_metrics) == EXPECTED_METRICS_COUNT assert parsed_metrics['zypper_applicable_patches{category="security",severity="important"}'] == 2 assert parsed_metrics['zypper_applicable_patches{category="optional",severity="moderate"}'] == 1 assert parsed_metrics['zypper_applicable_patches{category="feature",severity="low"}'] == 0 assert parsed_metrics['zypper_scrape_success'] == 1 @pytest.mark.parametrize('needs_reboot,expected', [ (True, 1), (False, 0), ]) def test_render_needs_rebooting(needs_reboot, expected): assert _parse_metrics(render([], [], needs_reboot, []))['zypper_needs_rebooting'] == expected def test_render_lifecycle(): products = [ Product('openSUSE', 12), Product('something else', 9) ] metrics = render([], [], False, products) parsed_metrics = _parse_metrics(metrics) assert len(parsed_metrics) == EXPECTED_METRICS_COUNT + len(products) assert parsed_metrics['zypper_product_end_of_life{product="openSUSE"}'] == 12 assert parsed_metrics['zypper_product_end_of_life{product="something_else"}'] == 9 def test_render_services_needing_restart(): services = ['apache', 'mysql'] metrics = render([], services, False, []) parsed_metrics = _parse_metrics(metrics) assert len(parsed_metrics) == EXPECTED_METRICS_COUNT + len(services) assert parsed_metrics['zypper_service_needs_restart{service="apache"}'] == 1 assert parsed_metrics['zypper_service_needs_restart{service="mysql"}'] == 1 def test_render_failure(): patches = None products = None services = None metrics = render(patches, services, False, products) parsed_metrics = _parse_metrics(metrics) assert len(parsed_metrics) == 1 assert parsed_metrics['zypper_scrape_success'] == 0
zypper-patch-status-collector
/zypper-patch-status-collector-0.2.1.tar.gz/zypper-patch-status-collector-0.2.1/tests/test_prometheus.py
test_prometheus.py
# encoding=utf-8 import subprocess import pytest from zypper_patch_status_collector._cli import main from .mock import mock_zypper_call def test_license(capsys): main(['--license']) output = capsys.readouterr().out assert 'GNU General Public License' in output assert 'WITHOUT ANY WARRANTY' in output def test_version(capsys): with pytest.raises(SystemExit) as e: main(['--version']) assert e.value.code == 0 output = capsys.readouterr() assert output.out + output.err != '' def test_success(capsys, mocker): mock_zypper_call(mocker, 'tests/fixtures/all.xml') main() output = capsys.readouterr() assert 'zypper_scrape_success 1' in output.out assert 'zypper_applicable_patches{category="optional",severity="moderate"} 2' in output.out def test_zypper_fail(capsys, mocker): zypper = mock_zypper_call(mocker, 'tests/fixtures/all.xml') zypper.side_effect = subprocess.CalledProcessError(1, 'zypper list-patches') with pytest.raises(SystemExit) as e: main() assert e.value.code != 0 output = capsys.readouterr() assert 'zypper_scrape_success 0' in output.out assert 'Failed' in output.err
zypper-patch-status-collector
/zypper-patch-status-collector-0.2.1.tar.gz/zypper-patch-status-collector-0.2.1/tests/test_cli.py
test_cli.py
# There shouldn't be an __init__.py in this dir, # but we need it to shup up the flake8 logger from logging import getLogger getLogger('flake8').propagate = False
zypper-patch-status-collector
/zypper-patch-status-collector-0.2.1.tar.gz/zypper-patch-status-collector-0.2.1/tests/__init__.py
__init__.py
from ._cli import main main()
zypper-patch-status-collector
/zypper-patch-status-collector-0.2.1.tar.gz/zypper-patch-status-collector-0.2.1/zypper_patch_status_collector/__main__.py
__main__.py
import collections import itertools import re from typing import Iterable from ._model import CATEGORIES, SEVERITIES, Patch, Product GAUGE_META_TEMPLATE = '''\ # HELP {name} {help_text} # TYPE {name} gauge ''' GAUGE_VALUE_TEMPLATE = '''\ {name} {value} ''' def _render_gauge_meta(name, help_text): return GAUGE_META_TEMPLATE.format( name=name, help_text=help_text ) def _render_gauge_value(name, value): return GAUGE_VALUE_TEMPLATE.format( name=name, value=value, ) def _render_patch_meta(): return _render_gauge_meta( name='zypper_applicable_patches', help_text='The current count of applicable patches', ) def _render_patch_count(patch, count): return _render_gauge_value( name='zypper_applicable_patches{{category="{category}",severity="{severity}"}}'.format( category=patch.category, severity=patch.severity, ), value=count, ) def _render_service_needs_restart_meta(): return _render_gauge_meta( name='zypper_service_needs_restart', help_text='Set to 1 if service requires a restart due to using no-longer-existing libraries.', ) def _render_service_needs_restart_value(service: str): # There is only a specific set of characters allowed in labels. safe_name = re.sub(r'[^a-zA-Z0-9_]', '_', service) return _render_gauge_value( name=f'zypper_service_needs_restart{{service="{safe_name}"}}', value=1, ) def _render_product_meta(): return _render_gauge_meta( name='zypper_product_end_of_life', help_text='Unix timestamp on when support for the product will end.', ) def _render_product_eol(product: Product): # There is only a specific set of characters allowed in labels. safe_name = re.sub(r'[^a-zA-Z0-9_]', '_', product.name) return _render_gauge_value( name=f'zypper_product_end_of_life{{product="{safe_name}"}}', value=product.eol, ) def _render_needs_rebooting(needs_rebooting): return _render_gauge_meta( name='zypper_needs_rebooting', help_text='Whether the system requires a reboot as core libraries or services have been updated.', ) + _render_gauge_value( name='zypper_needs_rebooting', value=1 if needs_rebooting else 0 ) def _render_scrape_success(value): return _render_gauge_meta( name='zypper_scrape_success', help_text='Whether the last scrape for zypper data was successful.', ) + _render_gauge_value( name='zypper_scrape_success', value=value, ) def render( patches: Iterable[Patch], services_needing_restart: Iterable[str], needs_rebooting: bool, products: Iterable[Product], ): patch_histogram = collections.Counter(patches) if patches is None or services_needing_restart is None or products is None: return _render_scrape_success(0) metrics = [ _render_patch_meta() ] + [ _render_patch_count(patch, patch_histogram.get(patch, 0)) for patch in ( Patch(category, severity) for category, severity in itertools.product(CATEGORIES, SEVERITIES) ) ] + [ _render_service_needs_restart_meta() ] + [ _render_service_needs_restart_value(service) for service in services_needing_restart ] + [ _render_product_meta() ] + [ _render_product_eol(product) for product in products ] + [ _render_needs_rebooting(needs_rebooting), _render_scrape_success(1) ] return ''.join(metrics)
zypper-patch-status-collector
/zypper-patch-status-collector-0.2.1.tar.gz/zypper-patch-status-collector-0.2.1/zypper_patch_status_collector/_prometheus.py
_prometheus.py
import subprocess import tempfile import xml.etree.ElementTree as ET from ._model import Patch, Product def _query_zypper(): return subprocess.check_output([ 'zypper', '--xmlout', '--quiet', '--non-interactive', 'list-patches' ], universal_newlines=True) def _parse_zypper(patches_xml): root = ET.fromstring(patches_xml) patches = root.iter('update') return [ Patch(patch.attrib.get('category'), patch.attrib.get('severity')) for patch in patches ] def get_applicable_patches(): patches_xml = _query_zypper() return _parse_zypper(patches_xml) def check_needs_reboot(): result = subprocess.run( ['zypper', 'needs-rebooting'], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, ) if result.returncode == 0: return False elif result.returncode == 102: return True else: return result.check_returncode() def get_lifecycle_info(): with tempfile.NamedTemporaryFile() as tmp: subprocess.check_call( ['zypper', 'lifecycle', '--save', tmp.name], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, universal_newlines=True, ) root = ET.fromstring(tmp.read()) products = root.iter('product') return [ Product( product.attrib.get('name'), int(product.attrib.get('eol')), ) for product in products ] def get_services_needing_restart(): return { line for line in subprocess.check_output([ 'zypper', 'ps', '-sss' ], universal_newlines=True).splitlines() if line # kill empty lines (e.g. the tailing new-line) }
zypper-patch-status-collector
/zypper-patch-status-collector-0.2.1.tar.gz/zypper-patch-status-collector-0.2.1/zypper_patch_status_collector/_zypper.py
_zypper.py
import collections CATEGORIES = ['security', 'recommended', 'optional', 'feature', 'document', 'yast'] SEVERITIES = ['critical', 'important', 'moderate', 'low', 'unspecified'] Patch = collections.namedtuple('Patch', 'category severity') Product = collections.namedtuple('Product', 'name eol')
zypper-patch-status-collector
/zypper-patch-status-collector-0.2.1.tar.gz/zypper-patch-status-collector-0.2.1/zypper_patch_status_collector/_model.py
_model.py
# encoding=utf-8 import argparse import sys import textwrap import pkg_resources from ._prometheus import render from ._zypper import get_applicable_patches, get_lifecycle_info, get_services_needing_restart, check_needs_reboot LICENSE_TEXT = textwrap.dedent("""\ Copyright (C) 2017 Matthias Bach This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <https://www.gnu.org/licenses/>.\ """) def main(args=sys.argv[1:]): parser = argparse.ArgumentParser( description='Export patch status in Prometheus-compatible format..', ) parser.add_argument( '--license', action='store_true', default=False, help='Show license information' ) parser.add_argument('--version', action='version', version=str( pkg_resources.get_distribution('zypper-patch-status-collector').version ),) parsed_args = parser.parse_args(args) if parsed_args.license: print(LICENSE_TEXT) return run() def run(): try: patches = get_applicable_patches() except Exception as e: # in case of error, carry on print('Failed to query zypper: {}'.format(e), file=sys.stderr) patches = None try: services_needing_restart = get_services_needing_restart() except Exception as e: # in case of error, carry on print('Failed to query zypper: {}'.format(e), file=sys.stderr) services_needing_restart = None try: needs_reboot = check_needs_reboot() except Exception as e: # in case of error, carry on print('Failed to query zypper: {}'.format(e), file=sys.stderr) needs_reboot = False try: products = get_lifecycle_info() except Exception as e: # in case of error, carry on print('Failed to query zypper: {}'.format(e), file=sys.stderr) products = None metrics = render(patches, services_needing_restart, needs_reboot, products) print(metrics) if patches is None or products is None: sys.exit(1)
zypper-patch-status-collector
/zypper-patch-status-collector-0.2.1.tar.gz/zypper-patch-status-collector-0.2.1/zypper_patch_status_collector/_cli.py
_cli.py
import logging import pandas as pd import pymsteams from pymsteams import TeamsWebhookException from notify.types import DfsInfo class NotifyTeams: def __init__(self, webhook: str): """ Parameters ---------- webhook: str url for sending the teams message """ self.msg = pymsteams.connectorcard(webhook) self.msg.color("#F0B62E") def add_full_dataframe(self, df: pd.DataFrame) -> None: """ Parameters ---------- df: pd.DataFrame Dataframe that will be added to the card. Returns ------- None Adds a section for the table to the teams message object. """ if df.shape[0] > 30: logging.warning(f"only first 30 records will be added.({df.shape[0]}> the limit of 30).") df = df.head(n=30) section = pymsteams.cardsection() md_table = df.to_markdown(index=False) section.text(md_table) self.msg.addSection(section) def create_dataframe_report(self, dfs: DfsInfo) -> None: """ Parameters ---------- dfs: dict Dataframes containing {name, df} as key value pairs Returns ------- None Adds a section for the table to the teams message object. """ for df_name, df_shape in dfs.items(): section = pymsteams.cardsection() section.activityTitle(f"<h1><b>{df_name}</b></h1>") section.activityImage("https://pbs.twimg.com/profile_images/1269974132818620416/nt7fTdpB.jpg") section.text(f"> In totaal **{df_shape[0]}** records met **{df_shape[1]}** kolommen verwerkt") self.msg.addSection(section) def create_buttons(self, buttons: dict) -> None: """ Parameters ---------- buttons: dict dictionairy containing button_name, button_link as key, value pairs. Returns ------- None Adds the button(s) to the teams message """ for button_name, button_link in buttons.items(): self.msg.addLinkButton(button_name, button_link) def basic_message( self, title: str, message: str = None, buttons: dict = None, df: pd.DataFrame = pd.DataFrame(), dfs: DfsInfo = None, ) -> None: """ This function posts a message, containing a section, in a Microsoft Teams channel Parameters ---------- dfs: dict Dataframes dictionary, with keys as dataframe name and value as dataframe. df: pd.DataFrame df that will be added to a card section. length of dataframe should not exceed 10. title: str Title of the message (optional) message: str Content of the message (optional) buttons: dict dictionary of button_name, button_url as key value pairs Returns ------- None sends a message in a teams channel, reporting col en records as information. """ self.msg.title(title) # always required. if message: self.msg.text(message) if dfs: self.create_dataframe_report(dfs) if not df.empty: self.add_full_dataframe(df) if buttons: self.create_buttons(buttons) try: self.msg.send() except TeamsWebhookException: logging.warning("Teams notification not sent!")
zyppnotify
/zyppnotify-0.5.1-py3-none-any.whl/notify/teams.py
teams.py
from typing import TypedDict class DfsInfo(TypedDict): df_name: str df_shape: tuple
zyppnotify
/zyppnotify-0.5.1-py3-none-any.whl/notify/types.py
types.py
import os import pandas as pd from babel.numbers import format_currency, format_decimal from notify.exceptions import EnvironmentVariablesError def format_numbers(df: pd.DataFrame, currency_columns: list = None, number_columns: list = None): """ This functions converts currencies (values) and numbers (digits) columns to formatted text columns. Parameters ---------- df: pd.DataFrame Dataframe with columns which need to be formatted currency_columns: list List of columns which will be formatted to currencies with a Euro sign number_columns: list List with columns which will be formatted to European standard. Returns ------- df: pd.DataFrame Dataframe with converted columns """ # format de bedrag kolommen if number_columns is None: number_columns = [] if currency_columns is None: currency_columns = [] for col in currency_columns: df[col] = df[col].apply(lambda x: format_currency(number=x, currency="EUR", locale="nl_NL")) # format de nummer kolommen for col in number_columns: df[col] = df[col].apply(lambda x: format_decimal(number=x, locale="nl_NL")) return df def check_environment_variables(required_variables: list): """ Test if environment variables are set. Parameters ---------- required_variables: list list of required variables that need to be present in environment variables. Returns ------- None """ values = [os.environ.get(x) for x in required_variables] if not all(values): raise EnvironmentVariablesError(f"One of the environment variables {', '.join(required_variables)} is not set") def dataframe_to_html(df: pd.DataFrame) -> str: """ This functions converts a dataframe to an HTML table. Parameters ---------- df: pd.DataFrame Dataframe which needs to be converted to HTML Returns ------- pretty_html_table: str html body with generated HTML table """ html_table = df.to_html(index=False, classes="styled-table", justify="center") pretty_html_table = ( """ <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8"> <title>Dataframe report</title> <style type="text/css" media="screen"> h1 { background-color: #a8a8a8; display: flex; flex-direction: column; justify-content: center; text-align: center; } .styled-table { border-collapse: collapse; margin: 25px 0; font-size: 0.9em; font-family: sans-serif; min-width: 400px; box-shadow: 0 0 20px rgba(0, 0, 0, 0.15); } .styled-table thead tr { background-color: #009879; color: #ffffff; text-align: left; } .styled-table th, .styled-table td { padding: 12px 15px; } .styled-table tbody tr { border-bottom: thin solid #dddddd; } .styled-table tbody tr:nth-of-type(even) { background-color: #f3f3f3; } .styled-table tbody tr.active-row { font-weight: bold; color: #009879; } .styled-table tbody tr:last-of-type { border-bottom: 2px solid #009879; } </style> </head> <body>""" + html_table + "</body>" ) return pretty_html_table
zyppnotify
/zyppnotify-0.5.1-py3-none-any.whl/notify/utils.py
utils.py
import base64 import logging import os from urllib import request import pandas as pd from notify.msgraph import Graph from notify.utils import check_environment_variables, dataframe_to_html class NotifyMail: def __init__( self, to: str, subject: str, message: str, cc: str = None, bcc: str = None, files: dict = None, df: pd.DataFrame = pd.DataFrame(), ): """ This function sends an e-mail from Microsoft Exchange server Parameters ---------- to: str the e-mail adress to send email to subject: str subject of the message message: HTML or plain text content of the message cc: str e-mail address to add as cc bcc: str e-mail address to add as bcc files: str, list Path(s) to file(s) to add as attachment df: pd.DataFrame dataframe that needs to be added to the HTML message. """ check_environment_variables(["EMAIL_USER", "MAIL_TENANT_ID", "MAIL_CLIENT_ID", "MAIL_CLIENT_SECRET"]) self.sender = os.environ.get("EMAIL_USER") self.to = to.replace(";", ",") self.cc = cc.replace(";", ",") if cc is not None else cc self.bcc = bcc.replace(";", ",") if bcc is not None else bcc self.subject = subject self.message = message self.files = [files] if isinstance(files, str) else files self.df = df self.graph = Graph() self.graph.ensure_graph_for_app_only_auth() @staticmethod def read_file_content(path): if path.startswith("http") or path.startswith("www"): with request.urlopen(path) as download: content = base64.b64encode(download.read()) else: with open(path, "rb") as f: content = base64.b64encode(f.read()) return content def send_email(self): """ This function sends an e-mail from Microsoft Exchange server Returns ------- response: requests.Response """ endpoint = f"https://graph.microsoft.com/v1.0/users/{self.sender}/sendMail" msg = { "Message": { "Subject": self.subject, "Body": {"ContentType": "HTML", "Content": self.message}, "ToRecipients": [{"EmailAddress": {"Address": to.strip()}} for to in self.to.split(",")], }, "SaveToSentItems": "true", } if self.cc: msg["Message"]["CcRecipients"] = [{"EmailAddress": {"Address": cc.strip()}} for cc in self.cc.split(",")] if self.bcc: msg["Message"]["BccRecipients"] = [ {"EmailAddress": {"Address": bcc.strip()}} for bcc in self.bcc.split(",") ] # add html table (if table less than 30 records) if self.df.shape[0] in range(1, 31): html_table = dataframe_to_html(df=self.df) elif self.df.shape[0] > 30: logging.warning(f"Only first 30 records will be added. ({self.df.shape[0]} > the limit of 30).") html_table = dataframe_to_html(df=self.df.head(n=30)) else: html_table = "" # no data in dataframe (0 records) msg["Message"]["Body"]["Content"] += html_table if self.files: # There might be a more safe way to check if a string is an url, but for our purposes, this suffices. attachments = list() for name, path in self.files.items(): content = self.read_file_content(path) attachments.append( { "@odata.type": "#microsoft.graph.fileAttachment", "ContentBytes": content.decode("utf-8"), "Name": name, } ) msg["Message"]["Attachments"] = attachments response = self.graph.app_client.post(endpoint, json=msg) return response
zyppnotify
/zyppnotify-0.5.1-py3-none-any.whl/notify/mail.py
mail.py
import logging from notify.mail import NotifyMail # noqa from notify.teams import NotifyTeams # noqa from notify.utils import dataframe_to_html, format_numbers # noqa logging.basicConfig( format="%(asctime)s.%(msecs)03d [%(levelname)-5s] [%(name)s] - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, ) name = "notify" __version__ = "0.5.1"
zyppnotify
/zyppnotify-0.5.1-py3-none-any.whl/notify/__init__.py
__init__.py
class EnvironmentVariablesError(Exception): """Exception when not all env variables are set""" pass
zyppnotify
/zyppnotify-0.5.1-py3-none-any.whl/notify/exceptions.py
exceptions.py
import os from azure.identity import ClientSecretCredential from msgraph.core import GraphClient class Graph: user_client: GraphClient client_credential: ClientSecretCredential app_client: GraphClient def ensure_graph_for_app_only_auth(self): if not hasattr(self, "client_credential"): client_id = os.environ["MAIL_CLIENT_ID"] tenant_id = os.environ["MAIL_TENANT_ID"] client_secret = os.environ["MAIL_CLIENT_SECRET"] self.client_credential = ClientSecretCredential(tenant_id, client_id, client_secret) if not hasattr(self, "app_client"): self.app_client = GraphClient( credential=self.client_credential, scopes=["https://graph.microsoft.com/.default"] )
zyppnotify
/zyppnotify-0.5.1-py3-none-any.whl/notify/msgraph.py
msgraph.py
from setuptools import setup setup(name='zyr-distributions', version='0.2', description='Gaussian and Binomial distributions', author = 'Richard', author_email = '[email protected]', packages=['zyr-distributions'], zip_safe=False)
zyr-distributions
/zyr-distributions-0.2.tar.gz/zyr-distributions-0.2/setup.py
setup.py
zyr studio exclusive library, using the method 1.jindu() to create a progress bar 2.cc () word-by-word output 3.getsum() to sum
zyr-zhuanshu
/zyr_zhuanshu-0.0.2.tar.gz/zyr_zhuanshu-0.0.2/README.txt
README.txt
import setuptools with open("README.txt", "r") as fh: long_description = fh.read() setuptools.setup( name="zyr_zhuanshu", version="0.0.2", author="zyr_studio", author_email="[email protected]", description="zyr_zhuanshu", long_description=long_description, long_description_content_type="text/markdown", url="https://github.com/jamwusjki/zyr_zhuanshu", packages=setuptools.find_packages(), classifiers=[ "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ], )
zyr-zhuanshu
/zyr_zhuanshu-0.0.2.tar.gz/zyr_zhuanshu-0.0.2/setup.py
setup.py
from time import* from alive_progress import alive_bar import os import time import sys def cc(text): timee = 0.1 for i in text: sleep(timee) print(i,end='',flush = True) sys.stdout.flush() print("",end="\n") return "" def getsum(): a = 0 b = 0 sue = 0 a = int(input(cc("请输入数的个数:"))) cc("ok!,稍等·············") print("已完成") for i in range(a): b = int(input(cc("请输入数字:"))) sue = sue + b cc("正在计算中···········") cc("计算完毕") cc("结果为:") print(sue) def jindu(): with alive_bar(100,force_tty=True) as bar: for i in range(100): time.sleep(0.01) bar()
zyr-zhuanshu
/zyr_zhuanshu-0.0.2.tar.gz/zyr_zhuanshu-0.0.2/zyr_zhuanshu/test.py
test.py
name = 'zyr_zhuanshu'
zyr-zhuanshu
/zyr_zhuanshu-0.0.2.tar.gz/zyr_zhuanshu-0.0.2/zyr_zhuanshu/__init__.py
__init__.py
zyr工作室专用·-·
zyrdeku
/zyrdeku-0.0.1.tar.gz/zyrdeku-0.0.1/README.md
README.md
import setuptools setuptools.setup( name="zyrdeku", version="0.0.1", author="张焱睿", author_email="[email protected]", description="A small example package", long_description="还海奥华", long_description_content_type="text/markdown", url="https://github.com/jamwusjki/pipa", packages=setuptools.find_packages(), classifiers=[ "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ], )
zyrdeku
/zyrdeku-0.0.1.tar.gz/zyrdeku-0.0.1/setup.py
setup.py
from time import* from alive_progress import alive_bar import os import time import sys def cc(text): timee = 0.1 for i in text: sleep(timee) print(i,end='',flush = True) sys.stdout.flush() print("",end="\n") return "" def getsum(): a = 0 b = 0 sue = 0 a = int(input(cc("请输入数的个数:"))) cc("ok!,稍等·············") print("已完成") for i in range(a): b = int(input(cc("请输入数字:"))) sue = sue + b cc("正在计算中···········") cc("计算完毕") cc("结果为:") print(sue) def jindu(): with alive_bar(100,force_tty=True) as bar: for i in range(100): time.sleep(0.01) bar()
zyrdeku
/zyrdeku-0.0.1.tar.gz/zyrdeku-0.0.1/pack-zyr/zyrdeku.py
zyrdeku.py
name = "pack-self"
zyrdeku
/zyrdeku-0.0.1.tar.gz/zyrdeku-0.0.1/pack-zyr/__init__.py
__init__.py
from itertools import product, permutations import random import vthread def get_all_operation_combine(cards): c1, c2, c3, c4 = cards operators = ['+', '-', '*', '/'] expressions = [] for p in product(operators, repeat=len(cards) - 1): # 运算符是注入在数字之间,所以用数字的长度 -1 op1, op2, op3 = p # 循环运算符 (3) expressions.append('{} {} {} {} {} {} {}'.format(c1, op1, c2, op2, c3, op3, c4)) return expressions # 得出不同的数字和运算符组合的列表 def rand_card(): return random.randint(1, 14) # 从十四张牌中随意抽取一张 def get_all_operation_combine_with_number_exchange(cards): all_result = [] for p in permutations(cards): # 将随机抽取的列表的四个数进行全排列,然后循环调用 get_all_operation_combine() 获得数学运算式,未加括号,放入列表中 all_result += get_all_operation_combine(p) return all_result # 利用递归思想进行括号添加 def add_brace(numbers): if len(numbers) < 2: return [numbers] if len(numbers) == 2: return [['(' + str(numbers[0])] + [str(numbers[1]) + ')']] results = [] for i in range(1, len(numbers)): prefix = numbers[:i] prefix1 = add_brace(prefix) tail = numbers[i:] tails = add_brace(tail) for p, t in product(prefix1, tails): # 将列表中的组合列表先拆开,分别在头步和尾部添加括号在用列表组合 brace_with_around = ['(' + p[0]] + p[1:] + t[:-1] + [t[-1] + ')'] results.append(brace_with_around) return results # 不固定长读输出数学运算式 def join_op_with_brace_number(operators, with_brace): finally_exp = with_brace[0] for i, op in enumerate(operators): finally_exp += (op + ' ' + with_brace[i + 1]) return finally_exp # 添加括号 def join_brace_to_expression(expression): numbers = expression.split()[::2] # 数字拆分 operators = expression.split()[1::2] # 运算符拆分 with_braces = add_brace(numbers) # 添加括号 with_operator_and_brace = [] for brace in with_braces: with_operator_and_brace.append(join_op_with_brace_number(operators, brace)) return with_operator_and_brace def simple_but_may_not_answer(cards): target = 24 for exp in get_all_operation_combine(cards): if eval(exp) == target: print(exp) def a_little_complicate_but_may_not_answer(cards): target = 24 for exp in get_all_operation_combine_with_number_exchange(cards): if eval(exp) == target: print(exp) # 不固定长度 def complicate_but_useful_with_brace(cards): targe = 24 for exp in get_all_operation_combine_with_number_exchange(cards): for b in join_brace_to_expression(exp): # 添加括号不固定长度,数学运算式组合 try: if eval(b) == targe: print(b) except ZeroDivisionError: continue new_cards = [rand_card() for _ in range(4)] # print('我抽到的牌是: {}'.format(new_cards)) # # print('-- 不带交换位置找到的答案') # simple_but_may_not_answer(new_cards) # # print('-- 带了交换位置找到的答案') # a_little_complicate_but_may_not_answer(new_cards) if __name__ == '__main__': print('-- 带了括号的答案是') complicate_but_useful_with_brace([12, 2, 7, 2])
zys0428
/zys0428-0.0.1-py3-none-any.whl/zys/24.py
24.py
from . import complicate_but_useful_with_brace
zys0428
/zys0428-0.0.1-py3-none-any.whl/zys/__init__.py
__init__.py
## zyte-api-convertor A Python module to convert Zyte API Json payload to [Scrapy ZyteAPI](https://github.com/scrapy-plugins/scrapy-zyte-api) project. It uses Scrapy and scrapy-zyte-api plugin to generate the project, also it uses black to format the code. ### Requirements ``` Python 3.6+ Scrapy scrapy-zyte-api black ``` ### Documentation [Zyte API Documentation](https://docs.zyte.com/zyte-api/get-started/index.html) Test the Zyte API payload using postman or curl. Once it gives the desired response, use the same payload with this module to convert it to a Scrapy ZyteAPI project. ### Installation `pip install zyte-api-convertor` ### Usage ```shell Usage: zyte-api-convertor <payload> --project-name <project_name> --spider-name <spider_name> Example: zyte-api-convertor '{"url": "https://httpbin.org/ip", "browserHtml": true, "screenshot": true}' --project-name sample_project --spider-name sample_spider Usage: zyte-api-convertor <payload> --project-name <project_name> Example: zyte-api-convertor '{"url": "https://httpbin.org/ip", "browserHtml": true, "screenshot": true}' --project-name sample_project Usage: zyte-api-convertor <payload> --spider-name <spider_name> Example: zyte-api-convertor '{"url": "https://httpbin.org/ip", "browserHtml": true, "screenshot": true}' --spider-name sample_spider Usage: zyte-api-convertor <payload> Example: zyte-api-convertor '{"url": "https://httpbin.org/ip", "browserHtml": true, "screenshot": true}' ``` ### Example zyte-api-convertor expects a valid json payload at the least. But it does have other options as well. You can use the `--project-name` and `--spider-name` options to set the project and spider name. If you don't use these options, it will use the default project and spider name. ```shell zyte-api-convertor '{"url": "https://httpbin.org/ip", "browserHtml": true, "screenshot": true}' --project-name sample_project --spider-name sample_spider ``` Output: ```shell mukthy@Mukthys-MacBook-Pro % zyte-api-convertor '{"url": "https://httpbin.org/ip", "browserHtml": true, "screenshot": true}' --project-name sample_project --spider-name sample_spider Code Generated! Writing to file... Writing Done! reformatted sample_project/sample_project/spiders/sample_project.py All done! ✨ 🍰 ✨ 1 file reformatted. Formatting Done! ``` Project Created Successfully. ```shell mukthy@Mukthys-MacBook-Pro % sample_project % tree . ├── sample_project │   ├── __init__.py │   ├── items.py │   ├── middlewares.py │   ├── pipelines.py │   ├── settings.py │   └── spiders │   ├── __init__.py │   └── sample_project.py └── scrapy.cfg 3 directories, 8 files ``` Sample Spider Code: ```python import scrapy class SampleQuotesSpider(scrapy.Spider): name = "sample_spider" custom_settings = { "DOWNLOAD_HANDLERS": { "http": "scrapy_zyte_api.ScrapyZyteAPIDownloadHandler", "https": "scrapy_zyte_api.ScrapyZyteAPIDownloadHandler", }, "DOWNLOADER_MIDDLEWARES": { "scrapy_zyte_api.ScrapyZyteAPIDownloaderMiddleware": 1000 }, "REQUEST_FINGERPRINTER_CLASS": "scrapy_zyte_api.ScrapyZyteAPIRequestFingerprinter", "TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor", "ZYTE_API_KEY": "YOUR_API_KEY", } def start_requests(self): yield scrapy.Request( url="https://httpbin.org/ip", meta={ "zyte_api": { "javascript": False, "screenshot": True, "browserHtml": True, "actions": [], "requestHeaders": {}, "geolocation": "US", "experimental": {"responseCookies": False}, } }, ) def parse(self, response): print(response.text) ``` Please note that the `ZYTE_API_KEY` is not set in the `custom_settings` of the spider. You need to set it before running it.
zyte-api-convertor
/zyte_api_convertor-1.0.3.tar.gz/zyte_api_convertor-1.0.3/README.md
README.md
import json import os import subprocess import sys def payload_to_zyte(spider_name): if os.name != 'nt': data = sys.argv[1] # print(data) data = data.replace(',}', '}') data = data.replace(',]', ']') data = json.loads(data) # print(data) else: print("Windows Detected!, Please enter the payload again.") data = input("Enter the JSON Payload with quotes (only): ") # print(data) data = data.replace(',}', '}') data = data.replace(',]', ']') data = data.replace("'{", "{") data = data.replace("}'", "}") data = json.loads(data) # print(data) url = data['url'] if 'actions' in data: actions = data['actions'] else: actions = [] # Post request with custom headers if 'httpRequestMethod' in data and data['httpRequestMethod'] == "POST" and ('customHttpRequestHeaders' in data): httpRequestMethod = data['httpRequestMethod'] httpResponseBody = data['httpResponseBody'] httpResponseHeaders = True if 'experimental' in data: experimental = data['experiment'] else: experimental = { "responseCookies": False, } if 'geolocation' in data: geolocation = data['geolocation'] else: geolocation = "US" if 'customHttpRequestHeaders' in data: customHttpRequestHeaders = data['customHttpRequestHeaders'] else: customHttpRequestHeaders = [] httpRequestBody = data['httpRequestBody'] meta = {"zyte_api": {"customHttpRequestHeaders": customHttpRequestHeaders, "geolocation": geolocation, "httpResponseBody": httpResponseBody, "httpResponseHeaders": httpResponseHeaders, "experimental": experimental, "httpRequestMethod": httpRequestMethod, "httpRequestBody": httpRequestBody}} # Post request with request headers elif ('httpRequestMethod' in data and data['httpRequestMethod'] == "POST") and ('requestHeaders' in data): httpRequestMethod = data['httpRequestMethod'] httpResponseBody = data['httpResponseBody'] httpResponseHeaders = True if 'experimental' in data: experimental = data['experiment'] else: experimental = { "responseCookies": False, } if 'geolocation' in data: geolocation = data['geolocation'] else: geolocation = "US" if 'requestHeaders' in data: requestHeaders = data['requestHeaders'] else: requestHeaders = {} httpRequestBody = data['httpRequestBody'] meta = {"zyte_api": {"requestHeaders": requestHeaders, "geolocation": geolocation, "httpResponseBody": httpResponseBody, "httpResponseHeaders": httpResponseHeaders, "experimental": experimental, "httpRequestMethod": httpRequestMethod, "httpRequestBody": httpRequestBody}} # Post request without request headers elif 'httpRequestMethod' in data and data['httpRequestMethod'] == "POST": httpRequestMethod = data['httpRequestMethod'] httpResponseBody = data['httpResponseBody'] httpResponseHeaders = True if 'experimental' in data: experimental = data['experiment'] else: experimental = { "responseCookies": False, } if 'geolocation' in data: geolocation = data['geolocation'] else: geolocation = "US" httpRequestBody = data['httpRequestBody'] meta = {"zyte_api": {"geolocation": geolocation, "httpResponseBody": httpResponseBody, "httpResponseHeaders": httpResponseHeaders, "experimental": experimental, "httpRequestMethod": httpRequestMethod, "httpRequestBody": httpRequestBody}} # Get request with custom headers elif ('httpResponseBody' in data and data['httpResponseBody'] == True) and ('customHttpRequestHeaders' in data): httpResponseBody = data['httpResponseBody'] httpResponseHeaders = True if 'experimental' in data: experimental = data['experiment'] else: experimental = { "responseCookies": False, } if 'geolocation' in data: geolocation = data['geolocation'] else: geolocation = "US" if 'customHttpRequestHeaders' in data: customHttpRequestHeaders = data['customHttpRequestHeaders'] else: customHttpRequestHeaders = [] meta = {"zyte_api": {"customHttpRequestHeaders": customHttpRequestHeaders, "geolocation": geolocation, "httpResponseBody": httpResponseBody, "httpResponseHeaders": httpResponseHeaders, "experimental": experimental}} # Get request with request headers elif ('httpResponseBody' in data and data['httpResponseBody'] == True) and ('requestHeaders' in data): httpResponseBody = data['httpResponseBody'] httpResponseHeaders = True if 'experimental' in data: experimental = data['experiment'] else: experimental = { "responseCookies": False, } if 'geolocation' in data: geolocation = data['geolocation'] else: geolocation = "US" if 'requestHeaders' in data: requestHeaders = data['requestHeaders'] else: requestHeaders = {} meta = {"zyte_api": {"requestHeaders": requestHeaders, "geolocation": geolocation, "httpResponseBody": httpResponseBody, "httpResponseHeaders": httpResponseHeaders, "experimental": experimental}} # BrowserHtml set to True elif 'browserHtml' in data and data['browserHtml'] == True: browserHtml = data['browserHtml'] if 'javascript' in data: javascript = data['javascript'] else: javascript = False if 'screenshot' in data: screenshot = data['screenshot'] else: screenshot = False if 'requestHeaders' in data: requestHeaders = data['requestHeaders'] else: requestHeaders = {} if 'geolocation' in data: geolocation = data['geolocation'] else: geolocation = "US" if 'experimental' in data: experimental = data['experimental'] else: experimental = { "responseCookies": False, } meta = {"zyte_api": {"javascript": javascript, "screenshot": screenshot, "browserHtml": browserHtml, "actions": actions, "requestHeaders": requestHeaders, "geolocation": geolocation, "experimental": experimental}} # Get request without any request headers else: httpResponseBody = True httpResponseHeaders = True if 'experimental' in data: experimental = data['experiment'] else: experimental = { "responseCookies": False, } if 'geolocation' in data: geolocation = data['geolocation'] else: geolocation = "US" meta = {"zyte_api": {"geolocation": geolocation, "httpResponseBody": httpResponseBody, "httpResponseHeaders": httpResponseHeaders, "experimental": experimental}} formatter = { 'url': url } custom_settings = { 'DOWNLOAD_HANDLERS': {"http": "scrapy_zyte_api.ScrapyZyteAPIDownloadHandler", "https": "scrapy_zyte_api.ScrapyZyteAPIDownloadHandler"}, 'DOWNLOADER_MIDDLEWARES': {"scrapy_zyte_api.ScrapyZyteAPIDownloaderMiddleware": 1000}, 'REQUEST_FINGERPRINTER_CLASS': "scrapy_zyte_api.ScrapyZyteAPIRequestFingerprinter", 'TWISTED_REACTOR': "twisted.internet.asyncioreactor.AsyncioSelectorReactor", 'ZYTE_API_KEY': "YOUR_API_KEY" } data = """ import scrapy class SampleQuotesSpider(scrapy.Spider): name = "{spider_name}" custom_settings = {custom_settings} def start_requests(self): yield scrapy.Request(url="{url}", meta={meta}) def parse(self, response): print(response.text) """.format(**formatter, meta=meta, custom_settings=custom_settings, spider_name=spider_name) return data def create_scrapy_project(code, project_name): subprocess.run(["scrapy", "startproject", f"{project_name}"], stdout=subprocess.DEVNULL) # create a new scrapy project. with open(f"{project_name}/{project_name}/spiders/{project_name}.py", "w") as f: # write the code to a file. f.write(code) print("Writing Done!") subprocess.run(["black", f"{project_name}/{project_name}/spiders/{project_name}.py"]) # format the code using black. print("Formatting Done!") def main(): try: args = sys.argv[1:] if "--help" in args: usage = ''' Usage: zyte-api-convertor <payload> --project-name <project_name> --spider-name <spider_name> Example: zyte-api-convertor '{"url": "https://httpbin.org/ip", "browserHtml": true, "screenshot": true}' --project-name sample_project --spider-name sample_spider Usage: zyte-api-convertor <payload> --project-name <project_name> Example: zyte-api-convertor '{"url": "https://httpbin.org/ip", "browserHtml": true, "screenshot": true}' --project-name sample_project Usage: zyte-api-convertor <payload> --spider-name <spider_name> Example: zyte-api-convertor '{"url": "https://httpbin.org/ip", "browserHtml": true, "screenshot": true}' --spider-name sample_spider Usage: zyte-api-convertor <payload> Example: zyte-api-convertor '{"url": "https://httpbin.org/ip", "browserHtml": true, "screenshot": true}' ''' print(usage) return elif "--project-name" in args and '--spider-name' in args: try: project_name = args[args.index("--project-name") + 1] spider_name = args[args.index("--spider-name") + 1] if "-" in project_name: print( "Error: Project names must begin with a letter and contain only\n letters, numbers and underscores") return code = payload_to_zyte(spider_name) print("Code Generated!") print("Writing to file...") create_scrapy_project(code, project_name) return except IndexError: print("Please provide a project name and spider name.") return elif "--project-name" in args: try: project_name = args[args.index("--project-name") + 1] spider_name = "sample_zyte_api" if "-" in project_name: print( "Error: Project names must begin with a letter and contain only\n letters, numbers and underscores") return code = payload_to_zyte(spider_name) print("Code Generated!") print("Writing to file...") create_scrapy_project(code, project_name) return except IndexError: print("Please provide a project name.") return elif "--spider-name" in args: try: spider_name = args[args.index("--spider-name") + 1] project_name = "sample_zyte_api_project" code = payload_to_zyte(spider_name) print("Code Generated!") print("Writing to file...") create_scrapy_project(code, project_name) return except IndexError: print("Please provide a spider name.") return elif len(args) < 1: print("Please provide a payload, Payload is Must. Use --help for more info") return else: spider_name = "sample_zyte_api" code = payload_to_zyte(spider_name) print("Code Generated!") print("Writing to file...") project_name = "sample_zyte_api_project" create_scrapy_project(code, project_name) return except IndexError: print("Please provide a payload, Payload is Must. Use --help for more info") return if __name__ == '__main__': main()
zyte-api-convertor
/zyte_api_convertor-1.0.3.tar.gz/zyte_api_convertor-1.0.3/src/zyte_api/convertor.py
convertor.py
Changes ======= 0.4.5 (2023-01-03) ------------------ * w3lib >= 2.1.1 is required in install_requires, to ensure that URLs are escaped properly. * unnecessary ``requests`` library is removed from install_requires * fixed tox 4 support 0.4.4 (2022-12-01) ------------------ * Fixed an issue with submitting URLs which contain unescaped symbols * New "retrying" argument for AsyncClient.__init__, which allows to set custom retrying policy for the client * ``--dont-retry-errors`` argument in the CLI tool 0.4.3 (2022-11-10) ------------------ * Connections are no longer reused between requests. This reduces the amount of ``ServerDisconnectedError`` exceptions. 0.4.2 (2022-10-28) ------------------ * Bump minimum ``aiohttp`` version to 3.8.0, as earlier versions don't support brotli decompression of responses * Declared Python 3.11 support 0.4.1 (2022-10-16) ------------------ * Network errors, like server timeouts or disconnections, are now retried for up to 15 minutes, instead of 5 minutes. 0.4.0 (2022-09-20) ------------------ * Require to install ``Brotli`` as a dependency. This changes the requests to have ``Accept-Encoding: br`` and automatically decompress brotli responses. 0.3.0 (2022-07-29) ------------------ Internal AggStats class is cleaned up: * ``AggStats.n_extracted_queries`` attribute is removed, as it was a duplicate of ``AggStats.n_results`` * ``AggStats.n_results`` is renamed to ``AggStats.n_success`` * ``AggStats.n_input_queries`` is removed as redundant and misleading; AggStats got a new ``AggStats.n_processed`` property instead. This change is backwards incompatible if you used stats directly. 0.2.1 (2022-07-29) ------------------ * ``aiohttp.client_exceptions.ClientConnectorError`` is now treated as a network error and retried accordingly. * Removed the unused ``zyte_api.sync`` module. 0.2.0 (2022-07-14) ------------------ * Temporary download errors are now retried 3 times by default. They were not retried in previous releases. 0.1.4 (2022-05-21) ------------------ This release contains usability improvements to the command-line script: * Instead of ``python -m zyte_api`` you can now run it as ``zyte-api``; * the type of the input file (``--intype`` argument) is guessed now, based on file extension and content; .jl, .jsonl and .txt files are supported. 0.1.3 (2022-02-03) ------------------ * Minor documenation fix * Remove support for Python 3.6 * Added support for Python 3.10 0.1.2 (2021-11-10) ------------------ * Default timeouts changed 0.1.1 (2021-11-01) ------------------ * CHANGES.rst updated properly 0.1.0 (2021-11-01) ------------------ * Initial release.
zyte-api
/zyte-api-0.4.5.tar.gz/zyte-api-0.4.5/CHANGES.rst
CHANGES.rst
=============== python-zyte-api =============== .. image:: https://img.shields.io/pypi/v/zyte-api.svg :target: https://pypi.python.org/pypi/zyte-api :alt: PyPI Version .. image:: https://img.shields.io/pypi/pyversions/zyte-api.svg :target: https://pypi.python.org/pypi/zyte-api :alt: Supported Python Versions .. image:: https://github.com/zytedata/python-zyte-api/actions/workflows/test.yml/badge.svg :target: https://github.com/zytedata/python-zyte-api/actions/workflows/test.yml :alt: Build Status .. image:: https://codecov.io/github/zytedata/zyte-api/coverage.svg?branch=master :target: https://codecov.io/gh/zytedata/zyte-api :alt: Coverage report Python client libraries for `Zyte API`_. Command-line utility and asyncio-based library are provided by this package. Installation ============ :: pip install zyte-api ``zyte-api`` requires Python 3.7+. API key ======= Make sure you have an API key for the `Zyte API`_ service. You can set ``ZYTE_API_KEY`` environment variable with the key to avoid passing it around explicitly. Read the `documentation <https://python-zyte-api.readthedocs.io>`_ for more information. License is BSD 3-clause. * Documentation: https://python-zyte-api.readthedocs.io * Source code: https://github.com/zytedata/python-zyte-api * Issue tracker: https://github.com/zytedata/python-zyte-api/issues .. _Zyte API: https://docs.zyte.com/zyte-api/get-started.html
zyte-api
/zyte-api-0.4.5.tar.gz/zyte-api-0.4.5/README.rst
README.rst
#!/usr/bin/env python import os from setuptools import setup, find_packages def get_version(): about = {} here = os.path.abspath(os.path.dirname(__file__)) with open(os.path.join(here, 'zyte_api/__version__.py')) as f: exec(f.read(), about) return about['__version__'] setup( name='zyte-api', version=get_version(), description='Python interface to Zyte API', long_description=open('README.rst').read() + "\n\n" + open('CHANGES.rst').read(), long_description_content_type='text/x-rst', author='Zyte Group Ltd', author_email='[email protected]', url='https://github.com/zytedata/python-zyte-api', packages=find_packages(exclude=['tests', 'examples']), entry_points = { 'console_scripts': ['zyte-api=zyte_api.__main__:_main'], }, install_requires=[ 'aiohttp >= 3.8.0', 'attrs', 'brotli', 'runstats', 'tenacity', 'tqdm', 'w3lib >= 2.1.1', ], classifiers=[ 'Development Status :: 3 - Alpha', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Natural Language :: English', 'Operating System :: OS Independent', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: 3.10', 'Programming Language :: Python :: 3.11', ], )
zyte-api
/zyte-api-0.4.5.tar.gz/zyte-api-0.4.5/setup.py
setup.py
import pytest from pytest import raises from zyte_api.utils import _guess_intype, _process_query @pytest.mark.parametrize( "file_name,first_line,expected", ( ( "<stdin>", "https://toscrape.com", "txt", ), ( "<stdin>", '{"url": "https://toscrape.com"}', "jl", ), ( "<stdin>", ' {"url": "https://toscrape.com"}', "jl", ), ( "urls.txt", "https://toscrape.com", "txt", ), ( "urls.txt", '{"url": "https://toscrape.com"}', "txt", ), ( "urls.jl", "https://toscrape.com", "jl", ), ( "urls.jl", '{"url": "https://toscrape.com"}', "jl", ), ( "urls.jsonl", "https://toscrape.com", "jl", ), ( "urls.jsonl", '{"url": "https://toscrape.com"}', "jl", ), ), ) def test_guess_intype(file_name, first_line, expected): assert _guess_intype(file_name, [first_line]) == expected @pytest.mark.parametrize( "input,output", ( # Unsafe URLs in the url field are modified, while left untouched on # other fields. ( { "a": {"b", "c"}, "d": "https://example.com/ a", "url": "https://example.com/ a", }, { "a": {"b", "c"}, "d": "https://example.com/ a", "url": "https://example.com/%20a", }, ), # Safe URLs are returned unmodified. ( {"url": "https://example.com"}, {"url": "https://example.com"}, ), # URL fragments are kept. ( {"url": "https://example.com#a"}, {"url": "https://example.com#a"}, ), # NOTE: We use w3lib.url.safe_url_string for escaping. Tests covering # the URL escaping logic exist upstream. ), ) def test_process_query(input, output): assert _process_query(input) == output def test_process_query_bytes(): with raises(ValueError): _process_query({"url": b"https://example.com"})
zyte-api
/zyte-api-0.4.5.tar.gz/zyte-api-0.4.5/tests/test_utils.py
test_utils.py
""" Basic command-line interface for Zyte API. """ import argparse import json import sys import asyncio import logging import random import tqdm from tenacity import retry_if_exception from zyte_api.aio.client import ( create_session, AsyncClient, ) from zyte_api.constants import ENV_VARIABLE, API_URL from zyte_api.utils import _guess_intype from zyte_api.aio.retry import RetryFactory, _is_throttling_error class DontRetryErrorsFactory(RetryFactory): retry_condition = retry_if_exception(_is_throttling_error) logger = logging.getLogger('zyte_api') _UNSET = object() async def run(queries, out, *, n_conn, stop_on_errors, api_url, api_key=None, retry_errors=True): retrying = None if retry_errors else DontRetryErrorsFactory().build() client = AsyncClient(n_conn=n_conn, api_key=api_key, api_url=api_url, retrying=retrying) async with create_session(connection_pool_size=n_conn) as session: result_iter = client.request_parallel_as_completed( queries=queries, session=session, ) pbar = tqdm.tqdm(smoothing=0, leave=True, total=len(queries), miniters=1, unit="url") pbar.set_postfix_str(str(client.agg_stats)) try: for fut in result_iter: try: result = await fut json.dump(result, out, ensure_ascii=False) out.write("\n") out.flush() pbar.update() except Exception as e: if stop_on_errors: raise logger.error(str(e)) finally: pbar.set_postfix_str(str(client.agg_stats)) finally: pbar.close() logger.info(client.agg_stats.summary()) logger.info(f"\nAPI error types:\n{client.agg_stats.api_error_types.most_common()}") logger.info(f"\nStatus codes:\n{client.agg_stats.status_codes.most_common()}") logger.info(f"\nException types:\n{client.agg_stats.exception_types.most_common()}") def read_input(input_fp, intype): assert intype in {"txt", "jl", _UNSET} lines = input_fp.readlines() if intype is _UNSET: intype = _guess_intype(input_fp.name, lines) if intype == "txt": urls = [u.strip() for u in lines if u.strip()] records = [{"url": url, "browserHtml": True} for url in urls] else: records = [ json.loads(line.strip()) for line in lines if line.strip() ] # Automatically replicating the url in echoData to being able to # to match URLs with content in the responses for record in records: record.setdefault("echoData", record.get("url")) return records def _main(program_name='zyte-api'): """ Process urls from input file through Zyte API """ p = argparse.ArgumentParser( prog=program_name, description=""" Process input URLs from a file using Zyte API. """, ) p.add_argument("input", type=argparse.FileType("r", encoding='utf8'), help="Input file with urls, url per line by default. The " "Format can be changed using `--intype` argument.") p.add_argument("--intype", default=_UNSET, choices=["txt", "jl"], help="Type of the input file. " "Allowed values are 'txt' (1 URL per line) and 'jl' " "(JSON Lines file, each object describing the " "parameters of a request). " "If not specified, the input type is guessed based on " "the input file name extension (.jl, .jsonl, .txt) or " "content, and assumed to be txt if guessing fails.") p.add_argument("--limit", type=int, help="Max number of URLs to take from the input") p.add_argument("--output", "-o", default=sys.stdout, type=argparse.FileType("w", encoding='utf8'), help=".jsonlines file to store extracted data. " "By default, results are printed to stdout.") p.add_argument("--n-conn", type=int, default=20, help="number of connections to the API server " "(default: %(default)s)") p.add_argument("--api-key", help="Zyte API key. " "You can also set %s environment variable instead " "of using this option." % ENV_VARIABLE) p.add_argument("--api-url", help="Zyte API endpoint (default: %(default)s)", default=API_URL) p.add_argument("--loglevel", "-L", default="INFO", choices=["DEBUG", "INFO", "WARNING", "ERROR"], help="log level (default: %(default)s)") p.add_argument("--shuffle", help="Shuffle input URLs", action="store_true") p.add_argument("--dont-retry-errors", help="Don't retry request and network errors", action="store_true") args = p.parse_args() logging.basicConfig( stream=sys.stderr, level=getattr(logging, args.loglevel) ) queries = read_input(args.input, args.intype) if args.shuffle: random.shuffle(queries) if args.limit: queries = queries[:args.limit] logger.info(f"Loaded {len(queries)} urls from {args.input.name}; shuffled: {args.shuffle}") logger.info(f"Running Zyte API (connections: {args.n_conn})") loop = asyncio.get_event_loop() coro = run(queries, out=args.output, n_conn=args.n_conn, stop_on_errors=False, api_url=args.api_url, api_key=args.api_key, retry_errors=not args.dont_retry_errors) loop.run_until_complete(coro) loop.close() if __name__ == '__main__': _main(program_name='python -m zyte_api')
zyte-api
/zyte-api-0.4.5.tar.gz/zyte-api-0.4.5/zyte_api/__main__.py
__main__.py
# -*- coding: utf-8 -*- # Name of the environment variable with the API key ENV_VARIABLE = 'ZYTE_API_KEY' # API URL API_URL = 'https://api.zyte.com/v1/' # Default timeout that server uses. Client timeouts should be larger than that. API_TIMEOUT = 60
zyte-api
/zyte-api-0.4.5.tar.gz/zyte-api-0.4.5/zyte_api/constants.py
constants.py
__version__ = '0.4.5'
zyte-api
/zyte-api-0.4.5.tar.gz/zyte-api-0.4.5/zyte_api/__version__.py
__version__.py
# -*- coding: utf-8 -*- from typing import Optional from collections import Counter import functools import time import attr from runstats import Statistics from zyte_api.errors import ParsedError def zero_on_division_error(meth): @functools.wraps(meth) def wrapper(*args, **kwargs): try: return meth(*args, **kwargs) except ZeroDivisionError: return 0 return wrapper class AggStats: def __init__(self): self.time_connect_stats = Statistics() self.time_total_stats = Statistics() self.n_success = 0 # number of successful results returned to the user self.n_fatal_errors = 0 # number of errors returned to the user, after all retries self.n_attempts = 0 # total amount of requests made to Zyte API, including retries self.n_429 = 0 # number of 429 (throttling) responses self.n_errors = 0 # number of errors, including errors which were retried self.status_codes = Counter() self.exception_types = Counter() self.api_error_types = Counter() def __str__(self): return "conn:{:0.2f}s, resp:{:0.2f}s, throttle:{:.1%}, err:{}+{}({:.1%}) | success:{}/{}({:.1%})".format( self.time_connect_stats.mean(), self.time_total_stats.mean(), self.throttle_ratio(), self.n_errors - self.n_fatal_errors, self.n_fatal_errors, self.error_ratio(), self.n_success, self.n_processed, self.success_ratio() ) def summary(self): return ( "\n" + "Summary\n" + "-------\n" + "Mean connection time: {:0.2f}\n".format(self.time_connect_stats.mean()) + "Mean response time: {:0.2f}\n".format(self.time_total_stats.mean()) + "Throttle ratio: {:0.1%}\n".format(self.throttle_ratio()) + "Attempts: {}\n".format(self.n_attempts) + "Errors: {:0.1%}, fatal: {}, non fatal: {}\n".format( self.error_ratio(), self.n_fatal_errors, self.n_errors - self.n_fatal_errors) + "Successful URLs: {} of {}\n".format( self.n_success, self.n_processed) + "Success ratio: {:0.1%}\n".format(self.success_ratio()) ) @zero_on_division_error def throttle_ratio(self): return self.n_429 / self.n_attempts @zero_on_division_error def error_ratio(self): return self.n_errors / self.n_attempts @zero_on_division_error def success_ratio(self): return self.n_success / self.n_processed @property def n_processed(self): """ Total number of processed URLs """ return self.n_success + self.n_fatal_errors @attr.s class ResponseStats: _start = attr.ib(repr=False) # type: float # Wait time, before this request is sent. Can be large in case of retries. time_delayed = attr.ib(default=None) # type: Optional[float] # Time between sending a request and having a connection established time_connect = attr.ib(default=None) # type: Optional[float] # Time to read & decode the response time_read = attr.ib(default=None) # type: Optional[float] # time to get an exception (usually, a network error) time_exception = attr.ib(default=None) # type: Optional[float] # Total time to process the response, excluding the wait time caused # by retries. time_total = attr.ib(default=None) # type: Optional[float] # HTTP status code status = attr.ib(default=None) # type: Optional[int] # error (parsed), in case of error response error = attr.ib(default=None) # type: Optional[ParsedError] # exception raised exception = attr.ib(default=None) # type: Optional[Exception] @classmethod def create(cls, start_global): start = time.perf_counter() return cls( start=start, time_delayed=start - start_global, ) def record_connected(self, status: int, agg_stats: AggStats): self.status = status self.time_connect = time.perf_counter() - self._start agg_stats.time_connect_stats.push(self.time_connect) agg_stats.status_codes[self.status] += 1 def record_read(self, agg_stats: Optional[AggStats] = None): now = time.perf_counter() self.time_total = now - self._start self.time_read = self.time_total - (self.time_connect or 0) if agg_stats: agg_stats.time_total_stats.push(self.time_total) def record_exception(self, exception: Exception, agg_stats: AggStats): self.time_exception = time.perf_counter() - self._start self.exception = exception agg_stats.status_codes[0] += 1 agg_stats.exception_types[exception.__class__] += 1 def record_request_error(self, error_body: bytes, agg_stats: AggStats): self.error = ParsedError.from_body(error_body) if self.status == 429: # XXX: status must be set already! agg_stats.n_429 += 1 else: agg_stats.n_errors += 1 agg_stats.api_error_types[self.error.type] += 1
zyte-api
/zyte-api-0.4.5.tar.gz/zyte-api-0.4.5/zyte_api/stats.py
stats.py
# -*- coding: utf-8 -*- import os from typing import Optional from .constants import ENV_VARIABLE class NoApiKey(Exception): pass def get_apikey(key: Optional[str] = None) -> str: """ Return API key, probably loading it from an environment variable """ if key is not None: return key try: return os.environ[ENV_VARIABLE] except KeyError: raise NoApiKey("API key not found. Please set {} " "environment variable.".format(ENV_VARIABLE))
zyte-api
/zyte-api-0.4.5.tar.gz/zyte-api-0.4.5/zyte_api/apikey.py
apikey.py
import json from typing import Optional import attr @attr.s(auto_attribs=True) class ParsedError: """ Parsed error from Zyte API """ response_body: bytes data: Optional[dict] parse_error: Optional[str] @classmethod def from_body(cls, response_body: bytes) -> 'ParsedError': data = None parse_error = None if response_body: try: data = json.loads(response_body.decode("utf-8")) if not isinstance(data, dict): parse_error = "bad_format" data = None except (json.JSONDecodeError, UnicodeDecodeError) as _: # noqa: F841 parse_error = "bad_json" return cls( response_body=response_body, data=data, parse_error=parse_error ) @property def type(self) -> Optional[str]: return (self.data or {}).get('type', None)
zyte-api
/zyte-api-0.4.5.tar.gz/zyte-api-0.4.5/zyte_api/errors.py
errors.py
import re from os.path import splitext from w3lib.url import safe_url_string from .__version__ import __version__ def _guess_intype(file_name, lines): _, dot_extension = splitext(file_name) extension = dot_extension[1:] if extension in {"jl", "jsonl"}: return "jl" if extension == "txt": return "txt" if re.search(r'^\s*\{', lines[0]): return "jl" return "txt" def _process_query(query): """Given a query to be sent to Zyte API, return a functionally-equivalent query that fixes any known issue. Specifically, unsafe characters in the query URL are escaped to make sure they are safe not only for the end server, but also for Zyte API, which requires URLs compatible with RFC 2396. *query* is never modified in place, but the returned object is not guaranteed to be a copy of *query*: it could be *query* itself if no changes where needed, or a shallow copy of *query* with some common nested objects (e.g. shared ``actions`` list). """ url = query.get("url", None) if url is None: return query if not isinstance(url, str): raise ValueError(f"Expected a str URL parameter, got {type(url)}") safe_url = safe_url_string(url) if url == safe_url: return query return {**query, "url": safe_url} def user_agent(library): return 'python-zyte-api/{} {}/{}'.format( __version__, library.__name__, library.__version__)
zyte-api
/zyte-api-0.4.5.tar.gz/zyte-api-0.4.5/zyte_api/utils.py
utils.py
""" Python client libraries and command line utilities for Zyte API """
zyte-api
/zyte-api-0.4.5.tar.gz/zyte-api-0.4.5/zyte_api/__init__.py
__init__.py
""" Asyncio client for Zyte API """ import asyncio import time from functools import partial from typing import Optional, Iterator, List import aiohttp from aiohttp import TCPConnector from tenacity import AsyncRetrying from .errors import RequestError from .retry import zyte_api_retrying from ..apikey import get_apikey from ..constants import API_URL, API_TIMEOUT from ..stats import AggStats, ResponseStats from ..utils import _process_query, user_agent # 120 seconds is probably too long, but we are concerned about the case with # many concurrent requests and some processing logic running in the same reactor, # thus, saturating the CPU. This will make timeouts more likely. AIO_API_TIMEOUT = aiohttp.ClientTimeout(total=API_TIMEOUT + 120) def create_session(connection_pool_size=100, **kwargs) -> aiohttp.ClientSession: """ Create a session with parameters suited for Zyte API """ kwargs.setdefault('timeout', AIO_API_TIMEOUT) if "connector" not in kwargs: kwargs["connector"] = TCPConnector(limit=connection_pool_size, force_close=True) return aiohttp.ClientSession(**kwargs) def _post_func(session): """ Return a function to send a POST request """ if session is None: return partial(aiohttp.request, method='POST', timeout=AIO_API_TIMEOUT) else: return session.post class AsyncClient: def __init__(self, *, api_key=None, api_url=API_URL, n_conn=15, retrying: Optional[AsyncRetrying] = None, ): self.api_key = get_apikey(api_key) self.api_url = api_url self.n_conn = n_conn self.agg_stats = AggStats() self.retrying = retrying or zyte_api_retrying async def request_raw(self, query: dict, *, endpoint: str = 'extract', session=None, handle_retries=True, retrying: Optional[AsyncRetrying] = None, ): retrying = retrying or self.retrying post = _post_func(session) auth = aiohttp.BasicAuth(self.api_key) headers = {'User-Agent': user_agent(aiohttp), 'Accept-Encoding': 'br'} response_stats = [] start_global = time.perf_counter() async def request(): stats = ResponseStats.create(start_global) self.agg_stats.n_attempts += 1 post_kwargs = dict( url=self.api_url + endpoint, json=_process_query(query), auth=auth, headers=headers, ) try: async with post(**post_kwargs) as resp: stats.record_connected(resp.status, self.agg_stats) if resp.status >= 400: content = await resp.read() resp.release() stats.record_read() stats.record_request_error(content, self.agg_stats) raise RequestError( request_info=resp.request_info, history=resp.history, status=resp.status, message=resp.reason, headers=resp.headers, response_content=content ) response = await resp.json() stats.record_read(self.agg_stats) return response except Exception as e: if not isinstance(e, RequestError): self.agg_stats.n_errors += 1 stats.record_exception(e, agg_stats=self.agg_stats) raise finally: response_stats.append(stats) if handle_retries: request = retrying.wraps(request) try: # Try to make a request result = await request() self.agg_stats.n_success += 1 except Exception: self.agg_stats.n_fatal_errors += 1 raise return result def request_parallel_as_completed(self, queries: List[dict], *, endpoint: str = 'extract', session: Optional[aiohttp.ClientSession] = None, ) -> Iterator[asyncio.Future]: """ Send multiple requests to Zyte API in parallel. Return an `asyncio.as_completed` iterator. ``queries`` is a list of requests to process (dicts). ``session`` is an optional aiohttp.ClientSession object. Set the session TCPConnector limit to a value greater than the number of connections. """ sem = asyncio.Semaphore(self.n_conn) async def _request(query): async with sem: return await self.request_raw(query, endpoint=endpoint, session=session) return asyncio.as_completed([_request(query) for query in queries])
zyte-api
/zyte-api-0.4.5.tar.gz/zyte-api-0.4.5/zyte_api/aio/client.py
client.py
# -*- coding: utf-8 -*- import logging from aiohttp import ClientResponseError from zyte_api.errors import ParsedError logger = logging.getLogger(__name__) class RequestError(ClientResponseError): """ Exception which is raised when Request-level error is returned. In contrast with ClientResponseError, it allows to inspect response content. """ def __init__(self, *args, **kwargs): self.response_content = kwargs.pop("response_content") super().__init__(*args, **kwargs) @property def parsed(self): return ParsedError.from_body(self.response_content) def __str__(self): return f"RequestError: {self.status}, message={self.message}, " \ f"headers={self.headers}, body={self.response_content}"
zyte-api
/zyte-api-0.4.5.tar.gz/zyte-api-0.4.5/zyte_api/aio/errors.py
errors.py
""" Asyncio client for Zyte API """
zyte-api
/zyte-api-0.4.5.tar.gz/zyte-api-0.4.5/zyte_api/aio/__init__.py
__init__.py
# -*- coding: utf-8 -*- """ Zyte API retrying logic. TODO: Implement retry logic for temparary errors (520) using the proposed retry-after header. """ import asyncio import logging from aiohttp import client_exceptions from tenacity import ( wait_chain, wait_fixed, wait_random_exponential, wait_random, stop_after_attempt, stop_after_delay, retry_if_exception, RetryCallState, before_sleep_log, after_log, AsyncRetrying, before_log, retry_base, ) from tenacity.stop import stop_never from .errors import RequestError logger = logging.getLogger(__name__) _NETWORK_ERRORS = ( asyncio.TimeoutError, # could happen while reading the response body client_exceptions.ClientResponseError, client_exceptions.ClientOSError, client_exceptions.ServerConnectionError, client_exceptions.ServerDisconnectedError, client_exceptions.ServerTimeoutError, client_exceptions.ClientPayloadError, client_exceptions.ClientConnectorSSLError, client_exceptions.ClientConnectorError, ) def _is_network_error(exc: BaseException) -> bool: if isinstance(exc, RequestError): # RequestError is ClientResponseError, which is in the # _NETWORK_ERRORS list, but it should be handled # separately. return False return isinstance(exc, _NETWORK_ERRORS) def _is_throttling_error(exc: BaseException) -> bool: return isinstance(exc, RequestError) and exc.status in (429, 503) def _is_temporary_download_error(exc: BaseException) -> bool: return isinstance(exc, RequestError) and exc.status == 520 class RetryFactory: """ Build custom retry configuration """ retry_condition: retry_base = ( retry_if_exception(_is_throttling_error) | retry_if_exception(_is_network_error) | retry_if_exception(_is_temporary_download_error) ) # throttling throttling_wait = wait_chain( # always wait 20-40s first wait_fixed(20) + wait_random(0, 20), # wait 20-40s again wait_fixed(20) + wait_random(0, 20), # wait from 30 to 630s, with full jitter and exponentially # increasing max wait time wait_fixed(30) + wait_random_exponential(multiplier=1, max=600) ) # connection errors, other client and server failures network_error_wait = ( # wait from 3s to ~1m wait_random(3, 7) + wait_random_exponential(multiplier=1, max=55) ) temporary_download_error_wait = network_error_wait throttling_stop = stop_never network_error_stop = stop_after_delay(15 * 60) temporary_download_error_stop = stop_after_attempt(4) def wait(self, retry_state: RetryCallState) -> float: assert retry_state.outcome, "Unexpected empty outcome" exc = retry_state.outcome.exception() assert exc, "Unexpected empty exception" if _is_throttling_error(exc): return self.throttling_wait(retry_state=retry_state) elif _is_network_error(exc): return self.network_error_wait(retry_state=retry_state) elif _is_temporary_download_error(exc): return self.temporary_download_error_wait(retry_state=retry_state) else: raise RuntimeError("Invalid retry state exception: %s" % exc) def stop(self, retry_state: RetryCallState) -> bool: assert retry_state.outcome, "Unexpected empty outcome" exc = retry_state.outcome.exception() assert exc, "Unexpected empty exception" if _is_throttling_error(exc): return self.throttling_stop(retry_state) elif _is_network_error(exc): return self.network_error_stop(retry_state) elif _is_temporary_download_error(exc): return self.temporary_download_error_stop(retry_state) else: raise RuntimeError("Invalid retry state exception: %s" % exc) def reraise(self) -> bool: return True def build(self) -> AsyncRetrying: return AsyncRetrying( wait=self.wait, retry=self.retry_condition, stop=self.stop, reraise=self.reraise(), before=before_log(logger, logging.DEBUG), after=after_log(logger, logging.DEBUG), before_sleep=before_sleep_log(logger, logging.DEBUG), ) zyte_api_retrying: AsyncRetrying = RetryFactory().build()
zyte-api
/zyte-api-0.4.5.tar.gz/zyte-api-0.4.5/zyte_api/aio/retry.py
retry.py
# -*- coding: utf-8 -*- """ Basic command-line interface for Zyte Automatic Extraction. """ import argparse import json import sys import asyncio import logging import random import tqdm from autoextract import Request from autoextract.aio import ( request_parallel_as_completed, create_session ) from autoextract.stats import AggStats from autoextract.aio.client import Result from autoextract.constants import ENV_VARIABLE from autoextract.request import Query logger = logging.getLogger('autoextract') async def run(query: Query, out, n_conn, batch_size, stop_on_errors=False, api_key=None, api_endpoint=None, max_query_error_retries=0, disable_cert_validation=False): agg_stats = AggStats() async with create_session(connection_pool_size=n_conn, disable_cert_validation=disable_cert_validation) as session: result_iter = request_parallel_as_completed( query=query, n_conn=n_conn, batch_size=batch_size, session=session, api_key=api_key, endpoint=api_endpoint, agg_stats=agg_stats, max_query_error_retries=max_query_error_retries ) pbar = tqdm.tqdm(smoothing=0, leave=True, total=len(query), miniters=1, unit="url") pbar.set_postfix_str(str(agg_stats)) try: for fut in result_iter: try: batch_result: Result = await fut for res in batch_result: json.dump(res, out, ensure_ascii=False) out.write("\n") out.flush() pbar.update() except Exception as e: if stop_on_errors: raise logger.error(str(e)) finally: pbar.set_postfix_str(str(agg_stats)) finally: pbar.close() logger.info(agg_stats.summary()) def read_input(input_fp, intype, page_type): assert intype in {"txt", "jl", ""} if intype == "txt": urls = [u.strip() for u in input_fp.readlines() if u.strip()] query = [Request(url, pageType=page_type) for url in urls] return query elif intype == "jl": records = [ json.loads(line.strip()) for line in input_fp.readlines() if line.strip() ] for rec in records: rec.setdefault("pageType", page_type) if not isinstance(rec.get("meta", ""), (str, type(None))): raise TypeError("meta must be str or null, got {!r}".format(rec['meta'])) return records if __name__ == '__main__': """ Process urls from input file through Zyte Automatic Extraction """ p = argparse.ArgumentParser( prog='python -m autoextract', description=""" Process input URLs from a file using Zyte Automatic Extraction. """, ) p.add_argument("input", type=argparse.FileType("r", encoding='utf8'), help="Input file with urls, url per line by default. The " "Format can be changed using `--intype` argument.") p.add_argument("--intype", default="txt", choices=["txt", "jl"], help='Type of the input file (default: %(default)s). ' 'Allowed values are "txt": input should be one ' 'URL per line, and "jl": input should be a jsonlines ' 'file, with {"url": "...", "meta": ...,} dicts; see ' 'https://docs.zyte.com/automatic-extraction.html#requests ' 'for the data format description.') p.add_argument("--output", "-o", default=sys.stdout, type=argparse.FileType("w", encoding='utf8'), help=".jsonlines file to store extracted data. " "By default, results are printed to stdout.") p.add_argument("--n-conn", type=int, default=20, help="number of connections to the API server " "(default: %(default)s)") p.add_argument("--batch-size", type=int, default=2, help="batch size (default: %(default)s)") p.add_argument("--page-type", "-t", default="article", help="type of the pages in the input file, " "e.g. article, product, jobPosting " "(default: %(default)s)") p.add_argument("--api-key", help="Zyte Automatic Extraction API key. " "You can also set %s environment variable instead " "of using this option." % ENV_VARIABLE) p.add_argument("--api-endpoint", help="Zyte Automatic Extraction API endpoint.") p.add_argument("--loglevel", "-L", default="INFO", choices=["DEBUG", "INFO", "WARNING", "ERROR"], help="log level") p.add_argument("--shuffle", help="Shuffle input URLs", action="store_true") p.add_argument("--max-query-error-retries", type=int, default=0, help="Max number of Query-level error retries. " "Enable Query-level error retries to increase the " "success rate at the cost of more requests being " "performed. It is recommended if you are interested " "in a higher success rate.") p.add_argument("--disable-cert-validation", action="store_true", help="Disable TSL certificate validation in HTTPS requests. " "Any certificate will be accepted. Consider the security consequences.") args = p.parse_args() logging.basicConfig(level=getattr(logging, args.loglevel)) query = read_input(args.input, args.intype, args.page_type) if args.shuffle: random.shuffle(query) logger.info(f"Loaded {len(query)} urls from {args.input.name}; shuffled: {args.shuffle}") logger.info(f"Running Zyte Automatic Extraction (connections: {args.n_conn}, " f"batch size: {args.batch_size}, page type: {args.page_type})") loop = asyncio.get_event_loop() coro = run(query, out=args.output, n_conn=args.n_conn, batch_size=args.batch_size, stop_on_errors=False, api_key=args.api_key, api_endpoint=args.api_endpoint, max_query_error_retries=args.max_query_error_retries, disable_cert_validation=args.disable_cert_validation) loop.run_until_complete(coro) loop.close()
zyte-autoextract
/zyte_autoextract-0.7.1-py3-none-any.whl/autoextract/__main__.py
__main__.py
# -*- coding: utf-8 -*- # Name of the environment variable with the API key ENV_VARIABLE = 'ZYTE_AUTOEXTRACT_KEY' # Endpoint API_ENDPOINT = 'https://autoextract.scrapinghub.com/v1/extract' # Default timeout that server uses. Client timeouts should be larger than that. API_TIMEOUT = 600 # Max batch size API_MAX_BATCH = 100
zyte-autoextract
/zyte_autoextract-0.7.1-py3-none-any.whl/autoextract/constants.py
constants.py
__version__ = '0.7.1'
zyte-autoextract
/zyte_autoextract-0.7.1-py3-none-any.whl/autoextract/__version__.py
__version__.py
# -*- coding: utf-8 -*- from typing import Optional import functools import time import attr from runstats import Statistics def zero_on_division_error(meth): @functools.wraps(meth) def wrapper(*args, **kwargs): try: return meth(*args, **kwargs) except ZeroDivisionError: return 0 return wrapper class AggStats: def __init__(self): self.time_connect_stats = Statistics() self.time_total_stats = Statistics() self.n_results = 0 self.n_fatal_errors = 0 self.n_attempts = 0 self.n_429 = 0 self.n_errors = 0 self.n_input_queries = 0 self.n_extracted_queries = 0 # Queries answered without any type of error self.n_query_responses = 0 self.n_billable_query_responses = 0 # Some errors are also billed def __str__(self): return "conn:{:0.2f}s, resp:{:0.2f}s, throttle:{:.1%}, err:{}+{}({:.1%}) | success:{}/{}({:.1%})".format( self.time_connect_stats.mean(), self.time_total_stats.mean(), self.throttle_ratio(), self.n_errors - self.n_fatal_errors, self.n_fatal_errors, self.error_ratio(), self.n_extracted_queries, self.n_input_queries, self.success_ratio() ) def summary(self): return ( "\n" + "Summary\n" + "-------\n" + "Mean connection time: {:0.2f}\n".format(self.time_connect_stats.mean()) + "Mean response time: {:0.2f}\n".format(self.time_total_stats.mean()) + "Throttle ratio: {:0.1%}\n".format(self.throttle_ratio()) + "Attempts: {}\n".format(self.n_attempts) + "Errors: {:0.1%}, fatal: {}, non fatal: {}\n".format( self.error_ratio(), self.n_fatal_errors, self.n_errors - self.n_fatal_errors) + "Successful URLs: {} of {}\n".format( self.n_extracted_queries, self.n_input_queries) + "Success ratio: {:0.1%}\n".format(self.success_ratio()) + "Billable query responses: {} of {}\n".format( self.n_billable_query_responses, self.n_query_responses) ) @zero_on_division_error def throttle_ratio(self): return self.n_429 / self.n_attempts @zero_on_division_error def error_ratio(self): return self.n_errors / self.n_attempts @zero_on_division_error def success_ratio(self): return self.n_extracted_queries / self.n_input_queries @attr.s class ResponseStats: _start = attr.ib(repr=False) # type: float # Wait time, before this request is sent. Can be large in case of retries. time_delayed = attr.ib(default=None) # type: Optional[float] # Time between sending a request and having a connection established time_connect = attr.ib(default=None) # type: Optional[float] # Time to read & decode the response time_read = attr.ib(default=None) # type: Optional[float] # Total time to process the response, excluding the wait time caused # by retries. time_total = attr.ib(default=None) # type: Optional[float] # HTTP status code status = attr.ib(default=None) # type: Optional[int] # response content, in case of error response error = attr.ib(default=None) # type: Optional[bytes] @classmethod def create(cls, start_global): start = time.perf_counter() return cls( start=start, time_delayed=start - start_global, ) def record_connected(self, agg_stats: AggStats): self.time_connect = time.perf_counter() - self._start agg_stats.time_connect_stats.push(self.time_connect) def record_read(self, agg_stats: Optional[AggStats]=None): now = time.perf_counter() self.time_total = now - self._start self.time_read = self.time_total - (self.time_connect or 0) if agg_stats: agg_stats.time_total_stats.push(self.time_total)
zyte-autoextract
/zyte_autoextract-0.7.1-py3-none-any.whl/autoextract/stats.py
stats.py
# -*- coding: utf-8 -*- import os from typing import Optional from .constants import ENV_VARIABLE class NoApiKey(Exception): pass def get_apikey(key: Optional[str] = None) -> str: """ Return API key, probably loading it from an environment variable """ if key is not None: return key try: return os.environ[ENV_VARIABLE] except KeyError: raise NoApiKey("API key not found. Please set {} " "environment variable or pass".format(ENV_VARIABLE))
zyte-autoextract
/zyte_autoextract-0.7.1-py3-none-any.whl/autoextract/apikey.py
apikey.py
# -*- coding: utf-8 -*- """ Synchronous Zyte Automatic Extraction API client. """ from typing import Optional, Dict, Any, List import requests from .batching import record_order, build_query, restore_order from .constants import API_ENDPOINT, API_TIMEOUT from .apikey import get_apikey from .utils import user_agent from .request import Query, query_as_dict_list def request_raw(query: Query, api_key: Optional[str] = None, endpoint: str = API_ENDPOINT, ) -> List[Dict[str, Any]]: """ Send a request to the Zyte Automatic Extraction API. Query is a list of Request instances or of dicts, as described in the API docs (see https://docs.zyte.com/automatic-extraction.html). """ auth = (get_apikey(api_key), '') timeout = API_TIMEOUT + 60 headers = {'User-Agent': user_agent(requests)} resp = requests.post( endpoint, json=query_as_dict_list(query), auth=auth, headers=headers, timeout=timeout ) resp.raise_for_status() return resp.json() def request_batch(urls: List[str], page_type: str, api_key: Optional[str] = None, endpoint: str = API_ENDPOINT, ) -> List[Dict]: query = record_order(build_query(urls, page_type)) results = request_raw(query, api_key=api_key, endpoint=endpoint) return restore_order(results)
zyte-autoextract
/zyte_autoextract-0.7.1-py3-none-any.whl/autoextract/sync.py
sync.py
# -*- coding: utf-8 -*- from .__version__ import __version__ def chunks(lst, n): """Yield successive n-sized chunks from lst.""" for i in range(0, len(lst), n): yield lst[i:i + n] def user_agent(library): return 'zyte-autoextract/{} {}/{}'.format( __version__, library.__name__, library.__version__)
zyte-autoextract
/zyte_autoextract-0.7.1-py3-none-any.whl/autoextract/utils.py
utils.py
# -*- coding: utf-8 -*- """ Helpers for batching requests """ from copy import deepcopy from typing import List, Dict from .constants import API_MAX_BATCH def build_query(urls: List[str], page_type: str) -> List[Dict]: """ Given a list of URLs and page type, return query """ if len(urls) > API_MAX_BATCH: raise ValueError("Batch size can't be greater than %s" % API_MAX_BATCH) return [{'url': url, 'pageType': page_type} for url in urls] def record_order(query: List[Dict]) -> List[Dict]: query = deepcopy(query) for idx, q in enumerate(query): assert 'meta' not in q q['meta'] = str(idx) return query def restore_order(results: List[Dict]) -> List[Dict]: return sorted(results, key=_sort_key) def _sort_key(row): return int(row['query']['userQuery']['meta'])
zyte-autoextract
/zyte_autoextract-0.7.1-py3-none-any.whl/autoextract/batching.py
batching.py
# -*- coding: utf-8 -*- from .request import ( Request, ArticleRequest, ProductRequest, JobPostingRequest )
zyte-autoextract
/zyte_autoextract-0.7.1-py3-none-any.whl/autoextract/__init__.py
__init__.py
# -*- coding: utf-8 -*- from typing import List, Dict, Any, Union, Optional import attr QueryDict = Dict[str, Any] @attr.s class Request: """ A single request data for Zyte Automatic Extraction. See https://docs.zyte.com/automatic-extraction.html#requests Note that `articleBodyRaw` is set to false by default here; API itself defaults to true. Set `articleBodyRaw=None` to remove `articleBodyRaw` parameter from the request and use server default. You can override API params passing a dictionary with extra data using the `extra` argument. Note that it will overwrite any previous configuration made using standard attributes like `articleBodyRaw` and `fullHtml`. """ url = attr.ib() # type: str pageType = attr.ib() # type: str meta = attr.ib(default=None) # type: Optional[str] articleBodyRaw = attr.ib(default=False) # type: Optional[bool] fullHtml = attr.ib(default=None) # type: Optional[bool] extra = attr.ib(default=None) # type: Optional[dict] def as_dict(self) -> QueryDict: d = attr.asdict(self) d.update(**self.extra or {}) del d['extra'] return {key: value for key, value in d.items() if value is not None} Query = Union[List[Request], List[QueryDict]] @attr.s class ArticleRequest(Request): pageType = attr.ib(default='article') @attr.s class ProductRequest(Request): pageType = attr.ib(default='product') @attr.s class JobPostingRequest(Request): pageType = attr.ib(default='jobPosting') def query_as_dict_list(query: Query) -> List[QueryDict]: return [ request.as_dict() if isinstance(request, Request) else request for request in query ]
zyte-autoextract
/zyte_autoextract-0.7.1-py3-none-any.whl/autoextract/request.py
request.py
# -*- coding: utf-8 -*- """ aiohttp Zyte Automatic Extraction API client. """ import asyncio import time import warnings from typing import Optional, Dict, List, Iterator from functools import partial import aiohttp from aiohttp import TCPConnector from tenacity import AsyncRetrying from autoextract.constants import API_ENDPOINT, API_TIMEOUT from autoextract.apikey import get_apikey from autoextract.utils import chunks, user_agent from autoextract.request import Query, query_as_dict_list from autoextract.stats import ResponseStats, AggStats from .retry import autoextract_retrying from .errors import RequestError, _QueryError, is_billable_error_msg AIO_API_TIMEOUT = aiohttp.ClientTimeout(total=API_TIMEOUT + 60, sock_read=API_TIMEOUT + 30, sock_connect=10) def create_session(connection_pool_size=100, disable_cert_validation=False, **kwargs) -> aiohttp.ClientSession: """ Create a session with parameters suited for Zyte Automatic Extraction """ kwargs.setdefault('timeout', AIO_API_TIMEOUT) if "connector" not in kwargs: kwargs["connector"] = TCPConnector(limit=connection_pool_size, ssl=False if disable_cert_validation else None) return aiohttp.ClientSession(**kwargs) class Result(List[Dict]): retry_stats: Optional[Dict] = None response_stats: Optional[List[ResponseStats]] = None class RequestProcessor: """Help keeping track of query results and errors between retries. After initializing your Request Processor, you may use it for just a single or for multiple requests. This class is especially useful because it stores successful queries to avoid repeating them when retrying requests. """ def __init__(self, query: Query, max_retries: int = 0): """Reset temporary data structures and initialize them""" self._reset() self.pending_queries = query_as_dict_list(query) self._max_retries = max_retries self._complete_queries: List[Dict] = list() self._n_extracted_queries: int = 0 self._n_query_responses: int = 0 self._n_billable_query_responses: int = 0 def _reset(self): """Clear temporary variables between retries""" self.pending_queries: List[Dict] = list() self._retriable_queries: List[Dict] = list() self._retriable_query_exceptions: List[Dict] = list() def _enqueue_error(self, query_result, query_exception): """Enqueue Query-level error. Enqueued errors could be: - used in combination with successes with `get_latest_results` - retried using `pending_requests` """ self._retriable_queries.append(query_result) self._retriable_query_exceptions.append(query_exception) user_query = query_result["query"]["userQuery"] # Temporary workaround for a backend issue. Won't be needed soon. if 'userAgent' in user_query: del user_query['userAgent'] self.pending_queries.append(user_query) def get_latest_results(self): """Get latest results (errors + successes). This method could be used to retrieve results when an exception is raised while processing results. """ return self._complete_queries + self._retriable_queries def extracted_queries_count(self): """Number of queries extracted without any error""" return self._n_extracted_queries def query_responses_count(self): """Number of query responses received""" return self._n_query_responses def billable_query_responses_count(self): """Number of billable query responses (some errors are billable)""" return self._n_billable_query_responses def process_results(self, query_results): """Process query results. Return successful queries and also failed ones. If `self._max_retries` is greater than 0, this method might raise a `QueryError` exception. If multiple `QueryError` exceptions are parsed, the one with the longest timeout is raised. Successful requests are saved in `self._complete_queries` among with errors that cannot be retried, and they are kept between executions while retriable failures are saved in `self._retriable_queries`. Queries saved in `self._retriable_queries` are moved to `self.pending_queries` between executions. You can use the first or the n-th result: - You can get all queries successfully responded in the first try. - You can get all queries successfully in the n-th try. - You may stop with a partial number of successful queries. """ self._reset() for query_result in query_results: self._n_query_responses += 1 if "error" not in query_result: self._n_extracted_queries += 1 self._n_billable_query_responses += 1 else: if is_billable_error_msg(query_result["error"]): self._n_billable_query_responses += 1 if self._max_retries and "error" in query_result: query_exception = _QueryError.from_query_result( query_result, self._max_retries) if query_exception.retriable: self._enqueue_error(query_result, query_exception) continue self._complete_queries.append(query_result) if self._retriable_query_exceptions: # Prioritize exceptions that have retry seconds defined # and get the one with the longest timeout value exception_with_longest_timeout = max( self._retriable_query_exceptions, key=lambda exc: exc.retry_seconds ) raise exception_with_longest_timeout return self.get_latest_results() async def request_raw(query: Query, api_key: Optional[str] = None, endpoint: Optional[str] = None, *, handle_retries: bool = True, max_query_error_retries: int = 0, session: Optional[aiohttp.ClientSession] = None, agg_stats: AggStats = None, headers: Optional[Dict[str, str]] = None, retrying: Optional[AsyncRetrying] = None ) -> Result: """ Send a request to Zyte Automatic Extraction API. ``query`` is a list of dicts or Request objects, as described in the API docs (see https://docs.zyte.com/automatic-extraction.html). ``api_key`` is your Zyte Automatic Extraction API key. If not set, it is taken from ZYTE_AUTOEXTRACT_KEY environment variable. ``session`` is an optional aiohttp.ClientSession object; use it to enable HTTP Keep-Alive and to control connection pool size. This function retries http 429 errors and network errors by default; this allows to handle server-side throttling properly. Use ``handle_retries=False`` if you want to disable this behavior (e.g. to implement it yourself). Among others, this function can raise autoextract.errors.RequestError, if there is a Request-level error returned by the API after all attempts were exhausted. Throttling errors are retried indefinitely when handle_retries is True. When ``handle_retries=True``, we could also retry Query-level errors. Use ``max_query_error_retries > 0`` if you want to to enable this behavior. ``agg_stats`` argument allows to keep track of various stats; pass an ``AggStats`` instance, and it'll be updated. Additional ``headers`` for the API request can be provided. This headers are included in the request done against the API endpoint: they won't be used in subsequent requests for fetching the URLs provided in the query. The default retry policy can be overridden by providing a custom ``retrying`` object of type :class:`tenacity.AsyncRetrying` that can be built with the class :class:`autoextract.retry.RetryFactory`. The following is an example that configure 3 attempts for server type errors:: factory = RetryFactory() factory.server_error_stop = stop_after_attempt(3) retrying = factory.build() See :func:`request_parallel_as_completed` for a more high-level interface to send requests in parallel. """ endpoint = API_ENDPOINT if endpoint is None else endpoint retrying = retrying or autoextract_retrying if agg_stats is None: agg_stats = AggStats() # dummy stats, to simplify code if max_query_error_retries and not handle_retries: warnings.warn( "You've specified a max number of Query-level error retries, " "but retries are disabled. Consider passing the handle_retries " "argument as True.", stacklevel=2 ) # Keep state between executions/retries request_processor = RequestProcessor( query=query, max_retries=max_query_error_retries if handle_retries else 0, ) post = _post_func(session) auth = aiohttp.BasicAuth(get_apikey(api_key)) headers = {'User-Agent': user_agent(aiohttp), **(headers or {})} response_stats = [] start_global = time.perf_counter() async def request(): stats = ResponseStats.create(start_global) agg_stats.n_attempts += 1 post_kwargs = dict( url=endpoint, json=request_processor.pending_queries, auth=auth, headers=headers, ) try: async with post(**post_kwargs) as resp: stats.status = resp.status stats.record_connected(agg_stats) if resp.status >= 400: content = await resp.read() resp.release() stats.record_read() stats.error = content if resp.status == 429: agg_stats.n_429 += 1 else: agg_stats.n_errors += 1 raise RequestError( request_info=resp.request_info, history=resp.history, status=resp.status, message=resp.reason, headers=resp.headers, response_content=content ) response = await resp.json() stats.record_read(agg_stats) return request_processor.process_results(response) except Exception as e: if not isinstance(e, RequestError): agg_stats.n_errors += 1 raise finally: response_stats.append(stats) if handle_retries: request = retrying.wraps(request) try: # Try to make a batch request result = await request() except _QueryError: # If Tenacity fails to retry a _QueryError because the max number of # retries or a timeout was reached, get latest results combining # error and successes and consider it as the final result. result = request_processor.get_latest_results() except Exception: agg_stats.n_fatal_errors += 1 raise finally: agg_stats.n_input_queries += len(query) agg_stats.n_extracted_queries += request_processor.extracted_queries_count() agg_stats.n_billable_query_responses += request_processor.billable_query_responses_count() agg_stats.n_query_responses += request_processor.query_responses_count() result = Result(result) result.response_stats = response_stats if handle_retries and hasattr(request, 'retry'): result.retry_stats = request.retry.statistics # type: ignore agg_stats.n_results += 1 return result def request_parallel_as_completed(query: Query, api_key: Optional[str] = None, *, endpoint: Optional[str] = None, session: Optional[aiohttp.ClientSession] = None, batch_size=1, n_conn=1, agg_stats: AggStats = None, max_query_error_retries=0, ) -> Iterator[asyncio.Future]: """ Send multiple requests to Zyte Automatic Extraction API in parallel. Return an `asyncio.as_completed` iterator. ``query`` is a list of requests to process (autoextract.Request instances or dicts). ``api_key`` is your Zyte Automatic Extraction API key. If not set, it is taken from ZYTE_AUTOEXTRACT_KEY environment variable. ``n_conn`` is a number of parallel connections to a server. ``batch_size`` is an amount of queries sent in a batch in each connection. Higher batch_size increase response time, but allows to achieve the same throughput with less connections to server. For example, if your API key has a limit of 3RPS, and average response time you observe for your websites is 10s, then to get to these 3RPS you may set e.g. batch_size=2, n_conn=15 - this would allow to process 30 requests in parallel. ``session`` is an optional aiohttp.ClientSession object; use it to enable HTTP Keep-Alive. ``agg_stats`` argument allows to keep track of various stats; pass an ``AggStats`` instance, and it'll be updated. Use ``max_query_error_retries > 0`` if you want Query-level errors to be retried. """ sem = asyncio.Semaphore(n_conn) async def _request(batch_query): async with sem: return await request_raw(batch_query, api_key=api_key, endpoint=endpoint, session=session, agg_stats=agg_stats, max_query_error_retries=max_query_error_retries, ) batches = chunks(query, batch_size) return asyncio.as_completed([_request(batch) for batch in batches]) def _post_func(session): """ Return a function to send a POST request """ if session is None: return partial(aiohttp.request, method='POST', timeout=AIO_API_TIMEOUT) else: return session.post
zyte-autoextract
/zyte_autoextract-0.7.1-py3-none-any.whl/autoextract/aio/client.py
client.py
# -*- coding: utf-8 -*- import json import logging import re from json import JSONDecodeError from typing import Optional from aiohttp import ClientResponseError logger = logging.getLogger(__name__) class DomainOccupied: DOMAIN_OCCUPIED_REGEX = re.compile( r".*domain (.+) is occupied, please retry in (.+) seconds.*", re.IGNORECASE ) DEFAULT_RETRY_SECONDS = 5 * 60 # 5 minutes def __init__(self, domain: str, retry_seconds: float): self.domain = domain self.retry_seconds = retry_seconds @classmethod def from_message(cls, message: str) -> Optional["DomainOccupied"]: match = cls.DOMAIN_OCCUPIED_REGEX.match(message) if not match: return None domain = match.group(1) try: retry_seconds = float(match.group(2)) except ValueError: logger.warning( f"Could not extract retry seconds " f"from Domain Occupied error message: {message}" ) retry_seconds = cls.DEFAULT_RETRY_SECONDS return cls(domain=domain, retry_seconds=retry_seconds) class RequestError(ClientResponseError): """ Exception which is raised when Request-level error is returned. In contrast with ClientResponseError, it allows to inspect response content. https://docs.zyte.com/automatic-extraction.html#request-level """ def __init__(self, *args, **kwargs): self.response_content = kwargs.pop("response_content") super().__init__(*args, **kwargs) def error_data(self): """ Parses request error ``response_content`` """ data = {} if self.response_content: try: data = json.loads(self.response_content.decode("utf-8")) if not isinstance(data, dict): data = {} logger.warning( "Wrong JSON format for RequestError content '{}'. " "A dict was expected".format(self.response_content) ) except (JSONDecodeError, UnicodeDecodeError) as _: # noqa: F841 logger.warning( "Wrong JSON format for RequestError content '{}'".format( self.response_content) ) return data def __str__(self): return f"RequestError: {self.status}, message={self.message}, " \ f"headers={self.headers}, body={self.response_content}" _RETRIABLE_ERR_MSGS = [ "query timed out", "Downloader error: No response", "Downloader error: http50", "Downloader error: 50", "Downloader error: GlobalTimeoutError", "Downloader error: ConnectionResetByPeer", "Proxy error: banned", "Proxy error: internal_error", "Proxy error: nxdomain", "Proxy error: timeout", "Proxy error: ssl_tunnel_error", "Proxy error: msgtimeout", "Proxy error: econnrefused", "Proxy error: connect_timeout", ] _RETRIABLE_ERR_MSGS_RE = re.compile( "|".join(re.escape(msg) for msg in _RETRIABLE_ERR_MSGS), re.IGNORECASE ) def is_retriable_error_msg(msg: Optional[str]) -> bool: """True if the error is one of those that could benefit from a retry""" msg = msg or "" return bool(_RETRIABLE_ERR_MSGS_RE.search(msg)) class _QueryError(Exception): """ Exception which is raised when a Query-level error is returned. https://docs.zyte.com/automatic-extraction.html#query-level """ def __init__(self, query: dict, message: str, max_retries: int = 0): self.query = query self.message = message self.max_retries = max_retries self.domain_occupied = DomainOccupied.from_message(message) def __str__(self): return f"_QueryError: query={self.query}, message={self.message}, " \ f"max_retries={self.max_retries}" @classmethod def from_query_result(cls, query_result: dict, max_retries: int = 0): return cls(query=query_result["query"], message=query_result["error"], max_retries=max_retries) @property def retriable(self) -> bool: if self.domain_occupied: return True return is_retriable_error_msg(self.message) @property def retry_seconds(self) -> float: if self.domain_occupied: return self.domain_occupied.retry_seconds return 0.0 # Based on https://docs.zyte.com/automatic-extraction.html#reference _NON_BILLABLE_ERR_MSGS = [ "malformed url", "URL cannot be longer than", "non-HTTP schemas are not allowed", "Extraction not permitted for this URL", ] _NON_BILLABLE_ERR_MSGS_RE = re.compile( "|".join(re.escape(msg) for msg in _NON_BILLABLE_ERR_MSGS), re.IGNORECASE ) def is_billable_error_msg(msg: Optional[str]) -> bool: """ Return true if the error message is billable. Based on https://docs.zyte.com/automatic-extraction.html#reference >>> is_billable_error_msg(None) True >>> is_billable_error_msg("") True >>> is_billable_error_msg(" URL cannot be longer than 4096 UTF-16 characters ") False >>> is_billable_error_msg(" malformed url ") False >>> is_billable_error_msg("Domain example.com is occupied, please retry in 23.5 seconds") False """ msg = msg or "" is_domain_ocupied = bool(DomainOccupied.from_message(msg)) is_no_billable = (_NON_BILLABLE_ERR_MSGS_RE.search(msg) or is_domain_ocupied) return not is_no_billable ACCOUNT_DISABLED_ERROR_TYPE = "http://errors.xod.scrapinghub.com/account-disabled.html"
zyte-autoextract
/zyte_autoextract-0.7.1-py3-none-any.whl/autoextract/aio/errors.py
errors.py
# -*- coding: utf-8 -*- from .client import ( request_raw, request_parallel_as_completed, create_session, RequestError, )
zyte-autoextract
/zyte_autoextract-0.7.1-py3-none-any.whl/autoextract/aio/__init__.py
__init__.py
# -*- coding: utf-8 -*- """ Zyte Automatic Extraction retrying logic. TODO: add sync support; only autoextract.aio is supported at the moment. """ import asyncio import logging from aiohttp import client_exceptions from tenacity import ( wait_chain, wait_fixed, wait_random_exponential, wait_random, stop_after_attempt, stop_after_delay, retry_if_exception, RetryCallState, before_sleep_log, after_log, AsyncRetrying, ) from tenacity.stop import stop_never from .errors import RequestError, _QueryError logger = logging.getLogger(__name__) _NETWORK_ERRORS = ( asyncio.TimeoutError, # could happen while reading the response body client_exceptions.ClientResponseError, client_exceptions.ClientOSError, client_exceptions.ServerConnectionError, client_exceptions.ServerDisconnectedError, client_exceptions.ServerTimeoutError, client_exceptions.ClientPayloadError, client_exceptions.ClientConnectorSSLError, ) def _is_network_error(exc: BaseException) -> bool: if isinstance(exc, RequestError): # RequestError is ClientResponseError, which is in the # _NETWORK_ERRORS list, but it should be handled # separately. return False return isinstance(exc, _NETWORK_ERRORS) def _is_throttling_error(exc: BaseException) -> bool: return isinstance(exc, RequestError) and exc.status == 429 def _is_server_error(exc: BaseException) -> bool: return isinstance(exc, RequestError) and exc.status >= 500 def _is_retriable_query_error(exc: BaseException) -> bool: return isinstance(exc, _QueryError) and exc.retriable and exc.max_retries > 0 class RetryFactory: """ Build custom retry configuration """ retry_condition = ( retry_if_exception(_is_throttling_error) | retry_if_exception(_is_network_error) | retry_if_exception(_is_server_error) | retry_if_exception(_is_retriable_query_error) ) # throttling throttling_wait = wait_chain( # always wait 20-40s first wait_fixed(20) + wait_random(0, 20), # wait 20-40s again wait_fixed(20) + wait_random(0, 20), # wait from 30 to 630s, with full jitter and exponentially # increasing max wait time wait_fixed(30) + wait_random_exponential(multiplier=1, max=600) ) # connection errors, other client and server failures network_error_wait = ( # wait from 3s to ~1m wait_random(3, 7) + wait_random_exponential(multiplier=1, max=55) ) server_error_wait = network_error_wait retriable_query_error_wait = network_error_wait throttling_stop = stop_never network_error_stop = stop_after_delay(15 * 60) server_error_stop = stop_after_delay(15 * 60) retryable_query_error_stop = stop_after_delay(15 * 60) def wait(self, retry_state: RetryCallState) -> float: exc: BaseException = retry_state.outcome.exception() # type: ignore if _is_throttling_error(exc): return self.throttling_wait(retry_state=retry_state) elif _is_network_error(exc): return self.network_error_wait(retry_state=retry_state) elif _is_server_error(exc): return self.server_error_wait(retry_state=retry_state) elif _is_retriable_query_error(exc): assert isinstance(exc, _QueryError) return max( exc.retry_seconds, self.retriable_query_error_wait(retry_state=retry_state) ) else: raise RuntimeError("Invalid retry state exception: %s" % exc) def stop(self, retry_state: RetryCallState) -> bool: exc: BaseException = retry_state.outcome.exception() # type: ignore if _is_throttling_error(exc): return self.throttling_stop(retry_state) elif _is_network_error(exc): return self.network_error_stop(retry_state) elif _is_server_error(exc): return self.server_error_stop(retry_state) elif _is_retriable_query_error(exc): assert isinstance(exc, _QueryError) return ( self.retryable_query_error_stop | stop_after_attempt(exc.max_retries + 1) )(retry_state) else: raise RuntimeError("Invalid retry state exception: %s" % exc) def before_sleep(self, retry_state: RetryCallState): return before_sleep_log(logger, logging.DEBUG) def after(self, retry_state: RetryCallState): return after_log(logger, logging.DEBUG) def reraise(self) -> bool: return True def build(self) -> AsyncRetrying: return AsyncRetrying( wait=self.wait, retry=self.retry_condition, stop=self.stop, before_sleep=self.before_sleep, after=self.after, reraise=self.reraise() ) autoextract_retrying: AsyncRetrying = RetryFactory().build()
zyte-autoextract
/zyte_autoextract-0.7.1-py3-none-any.whl/autoextract/aio/retry.py
retry.py
================= zyte-common-items ================= .. image:: https://img.shields.io/pypi/v/zyte-common-items.svg :target: https://pypi.python.org/pypi/zyte-common-items :alt: PyPI Version .. image:: https://img.shields.io/pypi/pyversions/zyte-common-items.svg :target: https://pypi.python.org/pypi/zyte-common-items :alt: Supported Python Versions .. image:: https://github.com/zytedata/zyte-common-items/workflows/tox/badge.svg :target: https://github.com/zytedata/zyte-common-items/actions :alt: Build Status .. image:: https://codecov.io/github/zytedata/zyte-common-items/coverage.svg?branch=master :target: https://codecov.io/gh/zytedata/zyte-common-items :alt: Coverage report .. description starts ``zyte-common-items`` is a Python 3.8+ library of item_ and `page object`_ classes for web data extraction that we use at Zyte_ to maximize opportunities for code reuse. .. _item: https://docs.scrapy.org/en/latest/topics/items.html .. _page object: https://web-poet.readthedocs.io/en/stable/ .. _Zyte: https://www.zyte.com/ .. description ends * Documentation: https://zyte-common-items.readthedocs.io/en/latest/ * License: BSD 3-clause
zyte-common-items
/zyte-common-items-0.10.0.tar.gz/zyte-common-items-0.10.0/README.rst
README.rst
from os.path import dirname, join from setuptools import find_packages, setup with open(join(dirname(__file__), "zyte_common_items/VERSION"), "rb") as f: version = f.read().decode("ascii").strip() setup( name="zyte-common-items", version=version, description="Item definitions for Zyte API schema", long_description=open("README.rst").read(), long_description_content_type="text/x-rst", author="Zyte Group Ltd", author_email="[email protected]", url="https://github.com/zytedata/zyte-common-items", packages=find_packages( exclude=[ "tests", ] ), package_data={ "zyte_common_items": ["py.typed", "VERSION"], }, # needs to be in sync with the tox.ini min env install_requires=[ "attrs>=22.1.0", "itemadapter>=0.8.0", "price-parser>=0.3.4", "web-poet>=0.14.0", "zyte-parsers>=0.3.0", ], classifiers=[ "Development Status :: 3 - Alpha", "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Natural Language :: English", "Operating System :: OS Independent", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: Implementation :: CPython", ], python_requires=">=3.8", )
zyte-common-items
/zyte-common-items-0.10.0.tar.gz/zyte-common-items-0.10.0/setup.py
setup.py
from typing import Any, Callable, Dict, Optional, Tuple, Type, Union from warnings import warn from weakref import WeakKeyDictionary import attrs from web_poet.page_inputs.url import _Url # Caches the attribute names for attr.s classes CLASS_ATTRS: WeakKeyDictionary = WeakKeyDictionary() def split_in_unknown_and_known_fields( data: Optional[dict], item_cls: Type ) -> Tuple[Dict, Dict]: """ Return a pair of dicts. The first one contains those elements not belonging to the attr class ``item_cls``. The second one contains the rest. That is, those attributes not belonging to ``item_cls`` class """ data = data or {} if not attrs.has(item_cls): raise ValueError(f"The cls {item_cls} is not attrs class") if item_cls not in CLASS_ATTRS: CLASS_ATTRS[item_cls] = {field.name for field in attrs.fields(item_cls)} unknown, known = split_dict(data, lambda k: k in CLASS_ATTRS[item_cls]) return unknown, known def split_dict(dict: Dict, key_pred: Callable[[Any], Any]) -> Tuple[Dict, Dict]: """Splits the dictionary in two. The first dict contains the records for which the key predicate is False and the second dict contains the rest. >>> split_dict({}, lambda k: False) ({}, {}) >>> split_dict(dict(a=1, b=2, c=3), lambda k: k != 'a') ({'a': 1}, {'b': 2, 'c': 3}) """ # noqa yes, no = {}, {} for k, v in dict.items(): if key_pred(k): yes[k] = v else: no[k] = v return (no, yes) def url_to_str(url: Union[str, _Url]) -> str: if not isinstance(url, (str, _Url)): raise ValueError( f"{url!r} is neither a string nor an instance of RequestURL or ResponseURL." ) return str(url) def format_datetime(dt): return f"{dt.isoformat(timespec='seconds')}Z" def convert_to_class(value: Any, new_cls: type) -> Any: if type(value) == new_cls: return value input_attributes = {attribute.name for attribute in attrs.fields(value.__class__)} output_attributes = {attribute.name for attribute in attrs.fields(new_cls)} shared_attributes = input_attributes & output_attributes new_value = new_cls( **{attribute: getattr(value, attribute) for attribute in shared_attributes} ) removed_nonempty_attributes = { attribute for attribute in (input_attributes - output_attributes) if getattr(value, attribute) != attrs.fields_dict(value.__class__)[attribute].default } if removed_nonempty_attributes: warn( ( f"Conversion of {value} into {new_cls} is dropping the non-default " f"values of the following attributes: " f"{removed_nonempty_attributes}." ), RuntimeWarning, ) return new_value def cast_metadata(value, cls): new_value = convert_to_class(value, cls) return new_value def metadata_processor(metadata, page): return cast_metadata(metadata, page.metadata_cls) class MetadataCaster: def __init__(self, target): self._target = target def __call__(self, value): return cast_metadata(value, self._target)
zyte-common-items
/zyte-common-items-0.10.0.tar.gz/zyte-common-items-0.10.0/zyte_common_items/util.py
util.py
from collections.abc import Iterable from functools import wraps from typing import Any, Callable, List, Optional, Union from lxml.html import HtmlElement from parsel import Selector, SelectorList from web_poet.mixins import ResponseShortcutsMixin from zyte_parsers import Breadcrumb as zp_Breadcrumb from zyte_parsers import extract_brand_name, extract_breadcrumbs, extract_price from .items import Breadcrumb def _get_base_url(page: Any) -> Optional[str]: if isinstance(page, ResponseShortcutsMixin): return page.base_url return getattr(page, "url", None) def _handle_selectorlist(value: Any) -> Any: if not isinstance(value, SelectorList): return value if len(value) == 0: return None return value[0] def only_handle_nodes( f: Callable[[Union[Selector, HtmlElement], Any], Any] ) -> Callable[[Any, Any], Any]: @wraps(f) def wrapper(value: Any, page: Any) -> Any: value = _handle_selectorlist(value) if not isinstance(value, (Selector, HtmlElement)): return value result = f(value, page) return result return wrapper def breadcrumbs_processor(value: Any, page: Any) -> Any: """Convert the data into a list of :class:`~zyte_common_items.Breadcrumb` objects if possible. Supported inputs are :class:`~parsel.selector.Selector`, :class:`~parsel.selector.SelectorList`, :class:`~lxml.html.HtmlElement` and an iterable of :class:`zyte_parsers.Breadcrumb` objects. Other inputs are returned as is. """ def _from_zp_breadcrumb(value: zp_Breadcrumb) -> Breadcrumb: return Breadcrumb(name=value.name, url=value.url) value = _handle_selectorlist(value) if isinstance(value, (Selector, HtmlElement)): zp_breadcrumbs = extract_breadcrumbs(value, base_url=_get_base_url(page)) return ( [_from_zp_breadcrumb(b) for b in zp_breadcrumbs] if zp_breadcrumbs else None ) if not isinstance(value, Iterable) or isinstance(value, str): return value results: List[Any] = [] for item in value: if isinstance(item, zp_Breadcrumb): results.append(_from_zp_breadcrumb(item)) else: results.append(item) return results @only_handle_nodes def brand_processor(value: Union[Selector, HtmlElement], page: Any) -> Any: """Convert the data into a brand name if possible. Supported inputs are :class:`~parsel.selector.Selector`, :class:`~parsel.selector.SelectorList` and :class:`~lxml.html.HtmlElement`. Other inputs are returned as is. """ return extract_brand_name(value, search_depth=2) @only_handle_nodes def price_processor(value: Union[Selector, HtmlElement], page: Any) -> Any: """Convert the data into a price string if possible. Uses the price-parser_ library. Supported inputs are :class:`~parsel.selector.Selector`, :class:`~parsel.selector.SelectorList` and :class:`~lxml.html.HtmlElement`. Other inputs are returned as is. Puts the parsed Price object into ``page._parsed_price``. .. _price-parser: https://github.com/scrapinghub/price-parser """ price = extract_price(value) page._parsed_price = price if price.amount is None: return None return str(price.amount) @only_handle_nodes def simple_price_processor(value: Union[Selector, HtmlElement], page: Any) -> Any: """Convert the data into a price string if possible. Uses the price-parser_ library. Supported inputs are :class:`~parsel.selector.Selector`, :class:`~parsel.selector.SelectorList` and :class:`~lxml.html.HtmlElement`. Other inputs are returned as is. .. _price-parser: https://github.com/scrapinghub/price-parser """ price = extract_price(value) if price.amount is None: return None return str(price.amount)
zyte-common-items
/zyte-common-items-0.10.0.tar.gz/zyte-common-items-0.10.0/zyte_common_items/processors.py
processors.py
"""Classes for data nested within items.""" import base64 from typing import List, Optional, Type import attrs from zyte_common_items.base import Item from zyte_common_items.util import convert_to_class, url_to_str # Metadata #################################################################### @attrs.define(kw_only=True) class ProbabilityMetadata(Item): """Data extraction process metadata that indicates a probability.""" #: The probability (0 for 0%, 1 for 100%) that the resource features the #: expected data type. #: #: For example, if the extraction of a product from a given URL is #: requested, and that URL points to the webpage of a product with complete #: certainty, the value should be `1`. If with complete certainty the #: webpage features a job listing instead of a product, the value should be #: `0`. When there is no complete certainty, the value could be anything in #: between (e.g. `0.96`). probability: Optional[float] = 1.0 @attrs.define(kw_only=True) class _ListMetadata(Item): """Data extraction process metadata that indicates the download date. See :class:`ArticleList.metadata <zyte_common_items.ArticleList.metadata>`. """ #: Date and time when the product data was downloaded, in UTC timezone and #: the following format: ``YYYY-MM-DDThh:mm:ssZ``. dateDownloaded: Optional[str] = None @attrs.define(kw_only=True) class _DetailsMetadata(_ListMetadata): """Data extraction process metadata that indicates the download date and a probability.""" #: The probability (0 for 0%, 1 for 100%) that the resource features the #: expected data type. #: #: For example, if the extraction of a product from a given URL is #: requested, and that URL points to the webpage of a product with complete #: certainty, the value should be `1`. If with complete certainty the #: webpage features a job listing instead of a product, the value should be #: `0`. When there is no complete certainty, the value could be anything in #: between (e.g. `0.96`). probability: Optional[float] = 1.0 @attrs.define(kw_only=True) class Metadata(_DetailsMetadata): """Generic metadata class. It defines all attributes of metadata classes for specific item types, so that it can be used during extraction instead of a more specific class, and later converted to the corresponding, more specific metadata class. """ #: The search text used to find the item. searchText: Optional[str] = None @attrs.define(kw_only=True) class ArticleMetadata(_DetailsMetadata): pass @attrs.define(kw_only=True) class ArticleListMetadata(_ListMetadata): pass @attrs.define(kw_only=True) class ArticleNavigationMetadata(_ListMetadata): pass @attrs.define(kw_only=True) class BusinessPlaceMetadata(Metadata): pass @attrs.define(kw_only=True) class JobPostingMetadata(Metadata): """Metadata associated with a job posting.""" pass @attrs.define(kw_only=True) class ProductMetadata(_DetailsMetadata): pass @attrs.define(kw_only=True) class ProductListMetadata(_ListMetadata): pass @attrs.define(kw_only=True) class ProductNavigationMetadata(_ListMetadata): pass @attrs.define(kw_only=True) class RealEstateMetadata(_DetailsMetadata): pass ############################################################################### @attrs.define class _Media(Item): #: URL. #: #: When multiple URLs exist for a given media element, pointing to #: different-quality versions, the highest-quality URL should be used. #: #: `Data URIs`_ are not allowed in this attribute. #: #: .. _Data URIs: https://en.wikipedia.org/wiki/Data_URI_scheme url: str = attrs.field(converter=url_to_str) @attrs.define class AdditionalProperty(Item): """A name-value pair. See :attr:`Product.additionalProperties <zyte_common_items.Product.additionalProperties>`. """ #: Name. name: str #: Value. value: str @attrs.define(kw_only=True) class AggregateRating(Item): """Aggregate data about reviews and ratings. At least one of :attr:`ratingValue` or :attr:`reviewCount` is required. See :attr:`Product.aggregateRating <zyte_common_items.Product.aggregateRating>`. """ #: Maximum value of the rating system. bestRating: Optional[float] = None #: Average value of all ratings. ratingValue: Optional[float] = None #: Review count. reviewCount: Optional[int] = None @attrs.define class Audio(_Media): """Audio. See :class:`Article.audios <zyte_common_items.Article.audios>`. """ @attrs.define(kw_only=True) class Author(Item): """Author of an article. See :attr:`Article.authors <zyte_common_items.Article.authors>`. """ #: Email. email: Optional[str] = None #: URL of the details page of the author. url: Optional[str] = attrs.field( default=None, converter=attrs.converters.optional(url_to_str), kw_only=True ) #: Full name. name: Optional[str] = None #: Text from which :attr:`~zyte_common_items.Author.name` was #: extracted. nameRaw: Optional[str] = None @attrs.define class Brand(Item): """Brand. See :attr:`Product.brand <zyte_common_items.Product.brand>`. """ #: Name as it appears on the source webpage (no post-processing). name: str @attrs.define(kw_only=True) class Breadcrumb(Item): """A breadcrumb from the `breadcrumb trail`_ of a webpage. See :attr:`Product.breadcrumbs <zyte_common_items.Product.breadcrumbs>`. .. _breadcrumb trail: https://en.wikipedia.org/wiki/Breadcrumb_navigation """ #: Displayed name. name: Optional[str] = None #: Target URL. url: Optional[str] = attrs.field( default=None, converter=attrs.converters.optional(url_to_str), kw_only=True ) @attrs.define class Gtin(Item): """GTIN_ type-value pair. See :class:`Product.gtin <zyte_common_items.Product.gtin>`. .. _GTIN: https://en.wikipedia.org/wiki/Global_Trade_Item_Number """ #: Identifier of the GTIN format of ``value``. #: #: One of: ``"gtin13"``, ``"gtin8"``, ``"gtin14"``, ``"isbn10"``, #: ``"isbn13"``, ``"ismn"``, ``"issn"``, ``"upc"``. type: str #: Value. #: #: It should only contain digits. value: str @attrs.define class Image(_Media): """Image. See for example :class:`Product.images <zyte_common_items.Product.images>` and :class:`Product.mainImage <zyte_common_items.Product.mainImage>`. """ @attrs.define(kw_only=True) class Link(Item): """A link from a webpage to another webpage.""" #: Displayed text. text: Optional[str] = None #: Target URL. url: Optional[str] = attrs.field( default=None, converter=attrs.converters.optional(url_to_str), kw_only=True ) @attrs.define(kw_only=True) class NamedLink(Item): """A link from a webpage to another webpage.""" #: The name of the link. name: Optional[str] = None #: Target URL. url: Optional[str] = attrs.field( default=None, converter=attrs.converters.optional(url_to_str), kw_only=True ) @attrs.define(kw_only=True) class Address(Item): """Address item.""" #: The raw address information, as it appears on the website. addressRaw: Optional[str] = None #: The street address of the place. streetAddress: Optional[str] = None #: The city the place is located in. addressCity: Optional[str] = None #: The locality to which the place belongs. addressLocality: Optional[str] = None #: The region of the place. addressRegion: Optional[str] = None #: The country the place is located in. #: #: The country name or the `ISO 3166-1 alpha-2 country code #: <https://en.wikipedia.org/wiki/ISO_3166-1>`__. addressCountry: Optional[str] = None #: The postal code of the address. postalCode: Optional[str] = None #: The auxiliary part of the postal code. #: #: It may include a state abbreviation or town name, depending on local standards. postalCodeAux: Optional[str] = None #: Geographical latitude of the place. latitude: Optional[float] = None #: Geographical longitude of the place. longitude: Optional[float] = None @attrs.define(kw_only=True) class Amenity(Item): """An amenity that a business place has""" #: Name of amenity. name: str #: Availability of the amenity. value: bool @attrs.define(kw_only=True) class StarRating(Item): """Official star rating of a place.""" #: Star rating of the place, as it appears on the page, without processing. raw: Optional[str] = None #: Star rating value of the place. ratingValue: Optional[float] = None @attrs.define(kw_only=True) class ParentPlace(Item): """If the place is located inside another place, these are the details of the parent place.""" #: Name of the parent place. name: str #: Identifier of the parent place. placeId: str @attrs.define(kw_only=True) class OpeningHoursItem(Item): """Specification of opening hours of a business place.""" #: English weekday name. dayOfWeek: Optional[str] = None #: Opening time in ISO 8601 format, local time. opens: Optional[str] = None #: Closing time in ISO 8601 format, local time. closes: Optional[str] = None #: Day of the week, as it appears on the page, without processing. rawDayOfWeek: Optional[str] = None #: Opening time, as it appears on the page, without processing. rawOpens: Optional[str] = None #: Closing time, as it appears on the page, without processing. rawCloses: Optional[str] = None @attrs.define(kw_only=True) class RealEstateArea(Item): """Area of a place, with type, units, value and raw value.""" #: Area value: float #: Unit of the value field, one of: SQMT (square meters), SQFT (square #: feet), ACRE (acres). unitCode: str #: Type of area, one of: LOT, FLOOR areaType: Optional[str] = None #: Area in the raw format, as it appears on the website. raw: str @attrs.define(kw_only=True) class Header(Item): """An HTTP header""" #: Name of the header name: str #: Value of the header value: str @attrs.define(slots=False) class Request(Item): """Describe a web request to load a page""" #: HTTP URL url: str = attrs.field(converter=url_to_str) #: HTTP method method: str = "GET" #: HTTP request body, Base64-encoded body: Optional[str] = None #: HTTP headers headers: Optional[List[Header]] = None #: Name of the page being requested. name: Optional[str] = None _body_bytes = None @property def body_bytes(self) -> Optional[bytes]: """Request.body as bytes""" # todo: allow to set body bytes in __init__, to avoid encoding/decoding. if self._body_bytes is None: if self.body is not None: self._body_bytes = base64.b64decode(self.body) return self._body_bytes def to_scrapy(self, callback, **kwargs): """ Convert a request to scrapy.Request. All kwargs are passed to scrapy.Request as-is. """ import scrapy header_list = [(header.name, header.value) for header in self.headers or []] return scrapy.Request( url=self.url, callback=callback, method=self.method or "GET", headers=header_list, body=self.body_bytes, **kwargs ) @attrs.define class Video(_Media): """Video. See :class:`Article.videos <zyte_common_items.Article.videos>`. """ def cast_request(value: Request, cls: Type[Request]) -> Request: new_value = convert_to_class(value, cls) if type(value) is Request and cls is ProbabilityRequest: new_value.metadata = ProbabilityMetadata(probability=1.0) return new_value def request_list_processor(request_list): return [cast_request(request, ProbabilityRequest) for request in request_list] @attrs.define(kw_only=True) class ProbabilityRequest(Request): """A :class:`Request` that includes a probability value.""" #: Data extraction process metadata. metadata: Optional[ProbabilityMetadata] = None @attrs.define(kw_only=True) class JobLocation(Item): """Location of a job offer.""" #: Job location, as it appears on the website. raw: Optional[str] = None @attrs.define(kw_only=True) class BaseSalary(Item): """Base salary of a job offer.""" #: Salary amount as it appears on the website. raw: Optional[str] = None #: The minimum value of the base salary as a number string. valueMin: Optional[str] = None #: The maximum value of the base salary as a number string. valueMax: Optional[str] = None #: The type of rate associated with the salary, e.g. monthly, annual, daily. rateType: Optional[str] = None #: Currency associated with the salary amount. currency: Optional[str] = None #: Currency associated with the salary amount, without normalization. currencyRaw: Optional[str] = None @attrs.define(kw_only=True) class HiringOrganization(Item): """Organization that is hiring for a job offer.""" #: Name of the hiring organization. name: Optional[str] = None #: Organization information as available on the website. nameRaw: Optional[str] = None #: Identifier of the organization used by job posting website. id: Optional[str] = None
zyte-common-items
/zyte-common-items-0.10.0.tar.gz/zyte-common-items-0.10.0/zyte_common_items/components.py
components.py
"""The ``Item`` class should be used as the parent class for data containers.""" from collections import ChainMap from typing import Dict, List, Optional, Union, get_args, get_origin import attrs from .util import split_in_unknown_and_known_fields _Trail = Optional[str] def is_data_container(cls_or_obj): """Used for discerning classes/instances if they are part of the Zyte Common Item definitions. """ try: return issubclass(cls_or_obj, Item) except TypeError: # must be an instance rather than a class return isinstance(cls_or_obj, Item) class _ItemBase: # Reserving an slot for _unknown_fields_dict. # This is done in a base class because otherwise attr.s won't pick it up __slots__ = ("_unknown_fields_dict",) def _get_import_path(obj: type): return f"{obj.__module__}.{obj.__qualname__}" def _extend_trail(trail: _Trail, key: Union[int, str]): if isinstance(key, str): if not trail: trail = key else: trail += f".{key}" else: assert isinstance(key, int) item = f"[{key}]" if not trail: trail = item else: trail += item return trail @attrs.define class Item(_ItemBase): def __attrs_post_init__(self): self._unknown_fields_dict = {} @classmethod def from_dict(cls, item: Optional[Dict]): """Read an item from a dictionary.""" return cls._from_dict(item) @classmethod def _from_dict(cls, item: Optional[Dict], *, trail: _Trail = None): """Read an item from a dictionary.""" if not item: return None if not isinstance(item, dict): path = _get_import_path(cls) if not trail: prefix = "Expected" else: prefix = f"Expected {trail} to be" raise ValueError(f"{prefix} a dict with fields from {path}, got {item!r}.") item = cls._apply_field_types_to_sub_fields(item, trail=trail) unknown_fields, known_fields = split_in_unknown_and_known_fields(item, cls) obj = cls(**known_fields) # type: ignore obj._unknown_fields_dict = unknown_fields return obj @classmethod def from_list(cls, items: Optional[List[Dict]], *, trail: _Trail = None) -> List: """Read items from a list.""" return cls._from_list(items) @classmethod def _from_list(cls, items: Optional[List[Dict]], *, trail: _Trail = None) -> List: """Read items from a list.""" result = [] for index, item in enumerate(items or []): index_trail = _extend_trail(trail, index) result.append(cls._from_dict(item, trail=index_trail)) return result @classmethod def _apply_field_types_to_sub_fields(cls, item: Dict, trail: _Trail = None): """This applies the correct data container class for some of the fields that need them. Specifically, this traverses recursively each field to determine the proper data container class based on the type annotations. This could handle both ``list`` and ``object`` type requirements. For example: * Article having ``breadcrumbs: List[Breadcrumb]`` * Product having ``brand: Optional[Brand]`` Moreover, fields that are not defined to be part of data container classes will be ignored. For example: * Article having ``headline: Optional[str]`` * Product having ``name: Optional[str]`` """ from_dict, from_list = {}, {} annotations = ChainMap( *(c.__annotations__ for c in cls.__mro__ if "__annotations__" in c.__dict__) ) for field, type_annotation in annotations.items(): origin = get_origin(type_annotation) is_optional = False if origin == Union: field_classes = get_args(type_annotation) if len(field_classes) != 2 or not isinstance(None, field_classes[1]): path = f"{_get_import_path(cls)}.{field}" raise ValueError( f"{path} is annotated with {type_annotation}. Fields " f"should only be annotated with one type (or " f"optional)." ) is_optional = len(field_classes) == 2 and isinstance( None, field_classes[1] ) type_annotation = field_classes[0] origin = get_origin(type_annotation) if origin is list: value = item.get(field) if not isinstance(value, list) and not (is_optional and value is None): field_trail = _extend_trail(trail, field) raise ValueError( f"Expected {field_trail} to be a list, got " f"{value!r}." ) type_annotation = get_args(type_annotation)[0] if is_data_container(type_annotation): from_list[field] = type_annotation elif is_data_container(type_annotation): from_dict[field] = type_annotation if from_dict or from_list: item = dict(**item) for key, cls in (from_dict or {}).items(): key_trail = _extend_trail(trail, key) value = item.get(key) if value is not None and not isinstance(value, dict): path = _get_import_path(cls) raise ValueError( f"Expected {key_trail} to be a dict with fields " f"from {path}, got {value!r}." ) item[key] = cls._from_dict(value, trail=key_trail) for key, cls in (from_list or {}).items(): key_trail = _extend_trail(trail, key) value = item.get(key) if value is not None and not isinstance(value, list): path = _get_import_path(cls) raise ValueError( f"Expected {key_trail} to be a list of dicts " f"with fields from {path}, got {value!r}." ) item[key] = cls._from_list(value, trail=key_trail) return item
zyte-common-items
/zyte-common-items-0.10.0.tar.gz/zyte-common-items-0.10.0/zyte_common_items/base.py
base.py
from typing import List, Optional import attrs from zyte_common_items.base import Item from zyte_common_items.components import ( AdditionalProperty, Address, AggregateRating, Amenity, ArticleListMetadata, ArticleMetadata, ArticleNavigationMetadata, Audio, Author, BaseSalary, Brand, Breadcrumb, BusinessPlaceMetadata, Gtin, HiringOrganization, Image, JobLocation, JobPostingMetadata, Link, NamedLink, OpeningHoursItem, ParentPlace, ProbabilityMetadata, ProbabilityRequest, ProductListMetadata, ProductMetadata, ProductNavigationMetadata, RealEstateArea, RealEstateMetadata, Request, StarRating, Video, cast_request, ) from zyte_common_items.util import MetadataCaster, url_to_str @attrs.define(slots=True, kw_only=True) class ArticleFromList(Item): """Article from an article list from an article listing page. See :class:`ArticleList`. """ #: Clean text of the article, including sub-headings, with newline #: separators. #: #: Format: #: #: - trimmed (no whitespace at the beginning or the end of the body #: string), #: - line breaks included, #: - no length limit, #: - no normalization of Unicode characters. articleBody: Optional[str] = None #: All authors of the article. authors: Optional[List[Author]] = None #: Publication date of the article. #: #: Format: ISO 8601 format: "YYYY-MM-DDThh:mm:ssZ" or #: "YYYY-MM-DDThh:mm:ss±zz:zz". #: #: With timezone, if available. #: #: If the actual publication date is not found, the date of the last #: modification is used instead. datePublished: Optional[str] = None #: Same date as #: :attr:`~zyte_common_items.ArticleFromList.datePublished`, but #: :before parsing/normalization, i.e. as it appears on the website. datePublishedRaw: Optional[str] = None #: Headline or title. headline: Optional[str] = None #: Language of the article, as an ISO 639-1 language code. #: #: Sometimes the article language is not the same as the web page overall #: language. inLanguage: Optional[str] = None #: Main image. mainImage: Optional[Image] = None #: All images. images: Optional[List[Image]] = None #: Data extraction process metadata. metadata: Optional[ProbabilityMetadata] = attrs.field( default=None, converter=attrs.converters.optional(MetadataCaster(ProbabilityMetadata)), kw_only=True # type: ignore ) #: Main URL. url: Optional[str] = attrs.field( default=None, converter=attrs.converters.optional(url_to_str), kw_only=True ) @attrs.define(kw_only=True) class Article(Item): #: Headline or title. headline: Optional[str] = None #: Publication date of the article. #: #: Format: ISO 8601 format: "YYYY-MM-DDThh:mm:ssZ" or #: "YYYY-MM-DDThh:mm:ss±zz:zz". #: #: With timezone, if available. #: #: If the actual publication date is not found, the value of #: :attr:`~zyte_common_items.Article.dateModified` is used instead. datePublished: Optional[str] = None #: Same date as #: :attr:`~zyte_common_items.Article.datePublished`, but #: :before parsing/normalization, i.e. as it appears on the website. datePublishedRaw: Optional[str] = None #: Date when the article was most recently modified. #: #: Format: ISO 8601 format: "YYYY-MM-DDThh:mm:ssZ" or #: "YYYY-MM-DDThh:mm:ss±zz:zz". #: #: With timezone, if available. dateModified: Optional[str] = None #: Same date as #: :attr:`~zyte_common_items.Article.dateModified`, but #: :before parsing/normalization, i.e. as it appears on the website. dateModifiedRaw: Optional[str] = None #: All authors of the article. authors: Optional[List[Author]] = None #: Webpage `breadcrumb trail`_. #: #: .. _Breadcrumb trail: https://en.wikipedia.org/wiki/Breadcrumb_navigation breadcrumbs: Optional[List[Breadcrumb]] = None #: Language of the article, as an ISO 639-1 language code. #: #: Sometimes the article language is not the same as the web page overall #: language. inLanguage: Optional[str] = None #: Main image. mainImage: Optional[Image] = None #: All images. images: Optional[List[Image]] = None #: A short summary of the article. #: #: It can be either human-provided (if available), or auto-generated. description: Optional[str] = None #: Clean text of the article, including sub-headings, with newline #: separators. #: #: Format: #: #: - trimmed (no whitespace at the beginning or the end of the body #: string), #: - line breaks included, #: - no length limit, #: - no normalization of Unicode characters. articleBody: Optional[str] = None #: Simplified and standardized HTML of the article, including sub-headings, #: image captions and embedded content (videos, tweets, etc.). #: #: Format: HTML string normalized in a consistent way. articleBodyHtml: Optional[str] = None #: All videos. videos: Optional[List[Video]] = None #: All audios. audios: Optional[List[Audio]] = None #: Canonical form of the URL, as indicated by the website. #: #: See also ``url``. canonicalUrl: Optional[str] = attrs.field( default=None, converter=attrs.converters.optional(url_to_str), kw_only=True ) #: The main URL of the article page. #: #: The URL of the final response, after any redirects. #: #: Required attribute. #: #: In case there is no article data on the page or the page was not #: reached, the returned "empty" item would still contain this URL field. url: str = attrs.field(converter=url_to_str) #: Data extraction process metadata. metadata: Optional[ArticleMetadata] = attrs.field( default=None, converter=attrs.converters.optional(MetadataCaster(ArticleMetadata)), kw_only=True # type: ignore ) @attrs.define(slots=True, kw_only=True) class ArticleList(Item): """Article list from an article listing page. The :attr:`url` attribute is the only required attribute, all other fields are optional. """ #: The main URL of the article list. #: #: The URL of the final response, after any redirects. #: #: Required attribute. #: #: In case there is no article list data on the page or the page was not #: reached, the returned item still contain this URL field and all the #: other available datapoints. url: str = attrs.field(converter=url_to_str) #: Canonical form of the URL, as indicated by the website. #: #: See also ``url``. canonicalUrl: Optional[str] = attrs.field( default=None, converter=attrs.converters.optional(url_to_str), kw_only=True ) #: List of article details found on the page. #: #: The order of the articles reflects their position on the page. articles: Optional[List[ArticleFromList]] = None #: Webpage `breadcrumb trail`_. #: #: .. _Breadcrumb trail: https://en.wikipedia.org/wiki/Breadcrumb_navigation breadcrumbs: Optional[List[Breadcrumb]] = None #: Data extraction process metadata. metadata: Optional[ArticleListMetadata] = attrs.field( default=None, converter=attrs.converters.optional(MetadataCaster(ArticleListMetadata)), kw_only=True # type: ignore ) @attrs.define(kw_only=True) class ProductVariant(Item): """:class:`Product` variant. See :attr:`Product.variants`. """ #: List of name-value pais of data about a specific, otherwise unmapped #: feature. #: #: Additional properties usually appear in product pages in the form of a #: specification table or a free-form specification list. #: #: Additional properties that require 1 or more extra requests may not be #: extracted. #: #: See also ``features``. additionalProperties: Optional[List[AdditionalProperty]] = None #: Availability status. #: #: The value is expected to be one of: ``"InStock"``, ``"OutOfStock"``. availability: Optional[str] = None #: Canonical form of the URL, as indicated by the website. #: #: See also ``url``. canonicalUrl: Optional[str] = attrs.field( default=None, converter=attrs.converters.optional(url_to_str), kw_only=True ) #: Color. #: #: It is extracted as displayed (e.g. ``"white"``). #: #: See also ``size``, ``style``. color: Optional[str] = None #: Price currency `ISO 4217`_ alphabetic code (e.g. ``"USD"``). #: #: See also ``currencyRaw``. #: #: .. _ISO 4217: https://en.wikipedia.org/wiki/ISO_4217 currency: Optional[str] = None #: Price currency as it appears on the webpage (no post-processing), e.g. #: ``"$"``. #: #: See also ``currency``. currencyRaw: Optional[str] = None #: List of standardized GTIN_ product identifiers associated with the #: product, which are unique for the product across different sellers. #: #: See also: ``mpn``, ``productId``, ``sku``. #: #: .. _GTIN: https://en.wikipedia.org/wiki/Global_Trade_Item_Number gtin: Optional[List[Gtin]] = None #: All product images. #: #: The main image (see ``mainImage``) should be first in the list. #: #: Images only displayed as part of the product description are excluded. images: Optional[List[Image]] = None #: Main product image. mainImage: Optional[Image] = None #: `Manufacturer part number (MPN)`_. #: #: A product should have the same MPN across different e-commerce websites. #: #: See also: ``gtin``, ``productId``, ``sku``. #: #: .. _Manufacturer part number (MPN): https://en.wikipedia.org/wiki/Part_number mpn: Optional[str] = None #: Name as it appears on the webpage (no post-processing). name: Optional[str] = None #: Price at which the product is being offered. #: #: It is a string with the price amount, with a full stop as decimal #: separator, and no thousands separator or currency (see ``currency`` and #: ``currencyRaw``), e.g. ``"10500.99"``. #: #: If ``regularPrice`` is not ``None``, ``price`` should always be lower #: than ``regularPrice``. price: Optional[str] = None #: Product identifier, unique within an e-commerce website. #: #: It may come in the form of an SKU or any other identifier, a hash, or #: even a URL. #: #: See also: ``gtin``, ``mpn``, ``sku``. productId: Optional[str] = None #: Price at which the product was being offered in the past, and which is #: presented as a reference next to the current price. #: #: It may be labeled as the original price, the list price, or the maximum #: retail price for which the product is sold. #: #: See ``price`` for format details. #: #: If ``regularPrice`` is not ``None``, it should always be higher than #: ``price``. regularPrice: Optional[str] = None #: Size or dimensions. #: #: Pertinent to products such as garments, shoes, accessories, etc. #: #: It is extracted as displayed (e.g. ``"XL"``). #: #: See also ``color``, ``style``. size: Optional[str] = None #: `Stock keeping unit (SKU)`_ identifier, i.e. a merchant-specific product #: identifier. #: #: See also: ``gtin``, ``mpn``, ``productId``. #: #: .. _Stock keeping unit (SKU): https://en.wikipedia.org/wiki/Stock_keeping_unit sku: Optional[str] = None #: Style. #: #: Pertinent to products such as garments, shoes, accessories, etc. #: #: It is extracted as displayed (e.g. ``"polka dots"``). #: #: See also ``color``, ``size``. style: Optional[str] = None #: Main URL from which the product variant data could be extracted. #: #: See also ``canonicalUrl``. url: Optional[str] = attrs.field( default=None, converter=attrs.converters.optional(url_to_str), kw_only=True ) @attrs.define(kw_only=True) class Product(Item): """Product from an e-commerce website. The :attr:`url` attribute is the only required attribute, all other fields are optional. """ #: List of name-value pais of data about a specific, otherwise unmapped #: feature. #: #: Additional properties usually appear in product pages in the form of a #: specification table or a free-form specification list. #: #: Additional properties that require 1 or more extra requests may not be #: extracted. #: #: See also ``features``. additionalProperties: Optional[List[AdditionalProperty]] = None #: Aggregate data about reviews and ratings. aggregateRating: Optional[AggregateRating] = None #: Availability status. #: #: The value is expected to be one of: ``"InStock"``, ``"OutOfStock"``. availability: Optional[str] = None #: Brand. brand: Optional[Brand] = None #: Webpage `breadcrumb trail`_. #: #: .. _Breadcrumb trail: https://en.wikipedia.org/wiki/Breadcrumb_navigation breadcrumbs: Optional[List[Breadcrumb]] = None #: Canonical form of the URL, as indicated by the website. #: #: See also ``url``. canonicalUrl: Optional[str] = attrs.field( default=None, converter=attrs.converters.optional(url_to_str), kw_only=True ) #: Color. #: #: It is extracted as displayed (e.g. ``"white"``). #: #: See also ``size``, ``style``. color: Optional[str] = None #: Price currency `ISO 4217`_ alphabetic code (e.g. ``"USD"``). #: #: See also ``currencyRaw``. #: #: .. _ISO 4217: https://en.wikipedia.org/wiki/ISO_4217 currency: Optional[str] = None #: Price currency as it appears on the webpage (no post-processing), e.g. #: ``"$"``. #: #: See also ``currency``. currencyRaw: Optional[str] = None #: Plain-text description. #: #: If the description is split across different parts of the source #: webpage, only the main part, containing the most useful pieces of #: information, should be extracted into this attribute. #: #: It may contain data found in other attributes (``features``, #: ``additionalProperties``). #: #: Format-wise: #: #: - Line breaks and non-ASCII characters are allowed. #: #: - There is no length limit for this attribute, the content should not #: be truncated. #: #: - There should be no whitespace at the beginning or end. #: #: See also ``descriptionHtml``. description: Optional[str] = None #: HTML description. #: #: See ``description`` for extraction details. #: #: The format is not the raw HTML from the source webpage. See the `HTML #: normalization specification`_ for details. #: #: .. _HTML normalization specification: https://docs.zyte.com/automatic-extraction/article.html#format-of-articlebodyhtml-field descriptionHtml: Optional[str] = None #: List of features. #: #: They are usually listed as bullet points in product webpages. #: #: See also ``additionalProperties``. features: Optional[List[str]] = None #: List of standardized GTIN_ product identifiers associated with the #: product, which are unique for the product across different sellers. #: #: See also: ``mpn``, ``productId``, ``sku``. #: #: .. _GTIN: https://en.wikipedia.org/wiki/Global_Trade_Item_Number gtin: Optional[List[Gtin]] = None #: All product images. #: #: The main image (see ``mainImage``) should be first in the list. #: #: Images only displayed as part of the product description are excluded. images: Optional[List[Image]] = None #: Main product image. mainImage: Optional[Image] = None #: Data extraction process metadata. metadata: Optional[ProductMetadata] = attrs.field( default=None, converter=attrs.converters.optional(MetadataCaster(ProductMetadata)), kw_only=True # type: ignore ) #: `Manufacturer part number (MPN)`_. #: #: A product should have the same MPN across different e-commerce websites. #: #: See also: ``gtin``, ``productId``, ``sku``. #: #: .. _Manufacturer part number (MPN): https://en.wikipedia.org/wiki/Part_number mpn: Optional[str] = None #: Name as it appears on the webpage (no post-processing). name: Optional[str] = None #: Price at which the product is being offered. #: #: It is a string with the price amount, with a full stop as decimal #: separator, and no thousands separator or currency (see ``currency`` and #: ``currencyRaw``), e.g. ``"10500.99"``. #: #: If ``regularPrice`` is not ``None``, ``price`` should always be lower #: than ``regularPrice``. price: Optional[str] = None # Redefined to extend the documentation. #: Product identifier, unique within an e-commerce website. #: #: It may come in the form of an SKU or any other identifier, a hash, or #: even a URL. #: #: See also: ``gtin``, ``mpn``, ``sku``. productId: Optional[str] = None #: Price at which the product was being offered in the past, and which is #: presented as a reference next to the current price. #: #: It may be labeled as the original price, the list price, or the maximum #: retail price for which the product is sold. #: #: See ``price`` for format details. #: #: If ``regularPrice`` is not ``None``, it should always be higher than #: ``price``. regularPrice: Optional[str] = None #: Size or dimensions. #: #: Pertinent to products such as garments, shoes, accessories, etc. #: #: It is extracted as displayed (e.g. ``"XL"``). #: #: See also ``color``, ``style``. size: Optional[str] = None #: `Stock keeping unit (SKU)`_ identifier, i.e. a merchant-specific product #: identifier. #: #: See also: ``gtin``, ``mpn``, ``productId``. #: #: .. _Stock keeping unit (SKU): https://en.wikipedia.org/wiki/Stock_keeping_unit sku: Optional[str] = None #: Style. #: #: Pertinent to products such as garments, shoes, accessories, etc. #: #: It is extracted as displayed (e.g. ``"polka dots"``). #: #: See also ``color``, ``size``. style: Optional[str] = None #: Main URL from which the data has been extracted. #: #: See also ``canonicalUrl``. url: str = attrs.field(converter=url_to_str) #: List of variants. #: #: When slightly different versions of a product are displayed on the same #: product page, allowing you to choose a specific product version from a #: selection, each of those product versions are considered a product #: variant. #: #: Product variants usually differ in ``color`` or ``size``. #: #: The following items are *not* considered product variants: #: #: - Different products within the same bundle of products. #: #: - Product add-ons, e.g. premium upgrades of a base product. #: #: Only variant-specific data is extracted as product variant details. For #: example, if variant-specific versions of the product description do not #: exist in the source webpage, the description attributes of the product #: variant are *not* filled with the base product description. #: #: Extracted product variants may not include those that are not visible in #: the source webpage. #: #: Product variant details may not include those that require multiple #: additional requests (e.g. 1 or more requests per variant). variants: Optional[List[ProductVariant]] = None @attrs.define(slots=True, kw_only=True) class ProductFromList(Item): """Product from a product list from a product listing page of an e-commerce webpage. See :class:`ProductList`. """ #: Price currency `ISO 4217`_ alphabetic code (e.g. ``"USD"``). #: #: See also ``currencyRaw``. #: #: .. _ISO 4217: https://en.wikipedia.org/wiki/ISO_4217 currency: Optional[str] = None #: Price currency as it appears on the webpage (no post-processing), e.g. #: ``"$"``. #: #: See also ``currency``. currencyRaw: Optional[str] = None #: Main product image. mainImage: Optional[Image] = None #: Data extraction process metadata. metadata: Optional[ProbabilityMetadata] = attrs.field( default=None, converter=attrs.converters.optional(MetadataCaster(ProbabilityMetadata)), kw_only=True # type: ignore ) #: Name as it appears on the webpage (no post-processing). name: Optional[str] = None #: Price at which the product is being offered. #: #: It is a string with the price amount, with a full stop as decimal #: separator, and no thousands separator or currency (see ``currency`` and #: ``currencyRaw``), e.g. ``"10500.99"``. #: #: If ``regularPrice`` is not ``None``, ``price`` should always be lower #: than ``regularPrice``. price: Optional[str] = None #: Product identifier, unique within an e-commerce website. #: #: It may come in the form of an SKU or any other identifier, a hash, or #: even a URL. productId: Optional[str] = None #: Price at which the product was being offered in the past, and which is #: presented as a reference next to the current price. #: #: It may be labeled as the original price, the list price, or the maximum #: retail price for which the product is sold. #: #: See ``price`` for format details. #: #: If ``regularPrice`` is not ``None``, it should always be higher than #: ``price``. regularPrice: Optional[str] = None #: Main URL from which the product data could be extracted. url: Optional[str] = attrs.field( default=None, converter=attrs.converters.optional(url_to_str), kw_only=True ) @attrs.define(slots=True, kw_only=True) class ProductList(Item): """Product list from a product listing page of an e-commerce webpage. It represents, for example, a single page from a category. The :attr:`url` attribute is the only required attribute, all other fields are optional. """ #: Webpage `breadcrumb trail`_. #: #: .. _Breadcrumb trail: https://en.wikipedia.org/wiki/Breadcrumb_navigation breadcrumbs: Optional[List[Breadcrumb]] = None #: Canonical form of the URL, as indicated by the website. #: #: See also ``url``. canonicalUrl: Optional[str] = attrs.field( default=None, converter=attrs.converters.optional(url_to_str), kw_only=True ) #: Name of the product listing as it appears on the webpage #: (no post-processing). #: #: For example, if the webpage is one of the pages of the Robots category, #: ``categoryName`` is ``'Robots'``. categoryName: Optional[str] = None #: Data extraction process metadata. metadata: Optional[ProductListMetadata] = attrs.field( default=None, converter=attrs.converters.optional(MetadataCaster(ProductListMetadata)), kw_only=True # type: ignore ) #: Current page number, if displayed explicitly on the list page. #: #: Numeration starts with 1. pageNumber: Optional[int] = None #: Link to the next page. paginationNext: Optional[Link] = None #: List of products. #: #: It only includes product information found in the product listing page #: itself. Product information that requires visiting each product URL is #: not meant to be covered. #: #: The order of the products reflects their position on the rendered page. #: Product order is top-to-bottom, and left-to-right or right-to-left #: depending on the webpage locale. products: Optional[List[ProductFromList]] = None #: Main URL from which the data has been extracted. #: #: See also ``canonicalUrl``. url: str = attrs.field(converter=url_to_str) @attrs.define(slots=True, kw_only=True) class BusinessPlace(Item): """Business place, with properties typically seen on maps or business listings.""" #: Unique identifier of the place on the website. placeId: Optional[str] = None #: The main URL that the place data was extracted from. #: #: The URL of the final response, after any redirects. #: #: In case there is no product data on the page or the page was not reached, the returned "empty" #: item would still contain url field and metadata field with dateDownloaded. url: Optional[str] = attrs.field( default=None, converter=attrs.converters.optional(url_to_str), kw_only=True ) #: The name of the place. name: Optional[str] = None #: List of actions that can be performed directly from the URLs on the place page, including URLs. actions: Optional[List[NamedLink]] = None #: List of name-value pais of any unmapped additional properties specific to the place. additionalProperties: Optional[List[AdditionalProperty]] = None #: The address details of the place. address: Optional[Address] = None #: The details of the reservation action, #: e.g. table reservation in case of restaurants #: or room reservation in case of hotels. reservationAction: Optional[NamedLink] = None #: List of categories the place belongs to. categories: Optional[List[str]] = None #: The description of the place. #: #: Stripped of white spaces. description: Optional[str] = None #: List of frequently mentioned features of this place. features: Optional[List[str]] = None #: URL to a map of the place. map: Optional[str] = attrs.field( default=None, converter=attrs.converters.optional(url_to_str), kw_only=True ) #: A list of URL values of all images of the place. images: Optional[List[Image]] = None #: List of amenities of the place. amenityFeatures: Optional[List[Amenity]] = None #: The overall rating, based on a collection of reviews or ratings. aggregateRating: Optional[AggregateRating] = None #: Official star rating of the place. starRating: Optional[StarRating] = None #: If the place is located inside another place, these are the details of the parent place. containedInPlace: Optional[ParentPlace] = None #: Ordered specification of opening hours, including data for opening and closing time for each day of the week. openingHours: Optional[List[OpeningHoursItem]] = None #: List of partner review sites. reviewSites: Optional[List[NamedLink]] = None #: The phone number associated with the place, as it appears on the page. telephone: Optional[str] = None #: How is the price range of the place viewed by its customers (from z to zzzz). priceRange: Optional[str] = None #: Which timezone is the place situated in. #: #: Standard: Name compliant with IANA tz database (tzdata). timezone: Optional[str] = None #: If the information is verified by the owner of this place. isVerified: Optional[bool] = None #: The URL pointing to the official website of the place. website: Optional[str] = attrs.field( default=None, converter=attrs.converters.optional(url_to_str), kw_only=True ) #: List of the tags associated with the place. tags: Optional[List[str]] = None #: Data extraction process metadata. metadata: Optional[BusinessPlaceMetadata] = attrs.field( default=None, converter=attrs.converters.optional(MetadataCaster(BusinessPlaceMetadata)), kw_only=True # type: ignore ) @attrs.define(slots=True, kw_only=True) class RealEstate(Item): #: The url of the final response, after any redirects. url: str = attrs.field(converter=url_to_str) #: Webpage `breadcrumb trail`_. #: #: .. _Breadcrumb trail: https://en.wikipedia.org/wiki/Breadcrumb_navigation breadcrumbs: Optional[List[Breadcrumb]] = None #: The identifier of the real estate, usually assigned by the seller and unique within a website, similar to product SKU. realEstateId: Optional[str] = None #: The name of the real estate. name: Optional[str] = None #: Publication date of the real estate offer. #: #: Format: ISO 8601 format: "YYYY-MM-DDThh:mm:ssZ" #: #: With timezone, if available. datePublished: Optional[str] = None #: Same date as datePublished, but before parsing/normalization, i.e. as it appears on the website. datePublishedRaw: Optional[str] = None #: The description of the real estate. #: #: Format: #: #: - trimmed (no whitespace at the beginning or the end of the description string), #: #: - line breaks included, #: #: - no length limit, #: #: - no normalization of Unicode characters, #: #: - no concatenation of description from different parts of the page. description: Optional[str] = None #: The details of the main image of the real estate. mainImage: Optional[Image] = None #: A list of URL values of all images of the real estate. images: Optional[List[Image]] = None #: The details of the address of the real estate. address: Optional[Address] = None #: Real estate area details. area: Optional[RealEstateArea] = None #: The total number of bathrooms in the real estate. numberOfBathroomsTotal: Optional[int] = None #: The number of full bathrooms in the real estate. numberOfFullBathrooms: Optional[int] = None #: The number of partial bathrooms in the real estate. numberOfPartialBathrooms: Optional[int] = None #: The number of bedrooms in the real estate. numberOfBedrooms: Optional[int] = None #: The number of rooms (excluding bathrooms and closets) of the real estate. numberOfRooms: Optional[int] = None #: Type of a trade action: buying or renting. tradeType: Optional[str] = None #: The offer price of the real estate. price: Optional[str] = None #: The rental period to which the rental price applies, only available in case of rental. Usually weekly, monthly, quarterly, yearly. rentalPeriod: Optional[str] = None #: Currency associated with the price, as appears on the page (no post-processing). currencyRaw: Optional[str] = None #: The currency of the price, in 3-letter ISO 4217 format. currency: Optional[str] = None #: A name-value pair field holding information pertaining to specific features. Usually in a form of a specification table or freeform specification list. additionalProperties: Optional[List[AdditionalProperty]] = None #: Type of the property, e.g. flat, house, land. propertyType: Optional[str] = None #: The year the real estate was built. yearBuilt: Optional[int] = None #: The URL of the virtual tour of the real estate. virtualTourUrl: Optional[str] = None #: Contains metadata about the data extraction process. metadata: Optional[RealEstateMetadata] = attrs.field( default=None, converter=attrs.converters.optional(MetadataCaster(RealEstateMetadata)), kw_only=True # type: ignore ) class RequestListCaster: def __init__(self, target): self._target = target def __call__(self, value): return [cast_request(item, self._target) for item in value] @attrs.define(kw_only=True) class ProductNavigation(Item): """Represents the navigational aspects of a product listing page on an e-commerce website""" #: Main URL from which the data is extracted. url: str = attrs.field(converter=url_to_str) #: Name of the category/page with the product list. #: #: Format: #: #: - trimmed (no whitespace at the beginning or the end of the description string) categoryName: Optional[str] = None #: List of sub-category links ordered by their position in the page. subCategories: Optional[List[ProbabilityRequest]] = attrs.field( default=None, converter=attrs.converters.optional(RequestListCaster(ProbabilityRequest)), kw_only=True # type: ignore ) #: List of product links found on the page category ordered by their position in the page. items: Optional[List[ProbabilityRequest]] = attrs.field( default=None, converter=attrs.converters.optional(RequestListCaster(ProbabilityRequest)), kw_only=True # type: ignore ) #: A link to the next page, if available. nextPage: Optional[Request] = None #: Number of the current page. #: #: It should only be extracted if the webpage shows a page number. #: #: It must be 1-based. For example, if the first page of a listing is #: numbered as 0 on the website, it should be extracted as `1` nonetheless. pageNumber: Optional[int] = None #: Data extraction process metadata. metadata: Optional[ProductNavigationMetadata] = attrs.field( default=None, converter=attrs.converters.optional(MetadataCaster(ProductNavigationMetadata)), kw_only=True # type: ignore ) @attrs.define(kw_only=True) class ArticleNavigation(Item): """Represents the navigational aspects of an article listing webpage. See :class:`ArticleList`. """ #: Main URL from which the data is extracted. url: str = attrs.field(converter=url_to_str) #: Name of the category/page. #: #: Format: #: #: - trimmed (no whitespace at the beginning or the end of the description string) categoryName: Optional[str] = None #: List of sub-category links ordered by their position in the page. subCategories: Optional[List[ProbabilityRequest]] = attrs.field( default=None, converter=attrs.converters.optional(RequestListCaster(ProbabilityRequest)), kw_only=True # type: ignore ) #: Links to listed items in order of appearance. items: Optional[List[ProbabilityRequest]] = attrs.field( default=None, converter=attrs.converters.optional(RequestListCaster(ProbabilityRequest)), kw_only=True # type: ignore ) #: A link to the next page, if available. nextPage: Optional[Request] = None #: Number of the current page. #: #: It should only be extracted if the webpage shows a page number. #: #: It must be 1-based. For example, if the first page of a listing is #: numbered as 0 on the website, it should be extracted as `1` nonetheless. pageNumber: Optional[int] = None #: Data extraction process metadata. metadata: Optional[ArticleNavigationMetadata] = attrs.field( default=None, converter=attrs.converters.optional(MetadataCaster(ArticleNavigationMetadata)), kw_only=True # type: ignore ) @attrs.define(kw_only=True) class JobPosting(Item): #: The url of the final response, after any redirects. url: str = attrs.field(converter=url_to_str) #: The identifier of the job posting. jobPostingId: Optional[str] = None #: Publication date of the job posting. #: #: Format: ISO 8601 format: "YYYY-MM-DDThh:mm:ssZ" #: #: With timezone, if available. datePublished: Optional[str] = None #: Same date as datePublished, but before parsing/normalization, i.e. as it appears on the website. datePublishedRaw: Optional[str] = None #: The date when the job posting was most recently modified. #: #: Format: ISO 8601 format: "YYYY-MM-DDThh:mm:ssZ" #: #: With timezone, if available. dateModified: Optional[str] = None #: Same date as dateModified, but before parsing/normalization, i.e. as it appears on the website. dateModifiedRaw: Optional[str] = None #: The date after which the job posting is not valid, e.g. the end of an offer. #: #: Format: ISO 8601 format: "YYYY-MM-DDThh:mm:ssZ" #: #: With timezone, if available. validThrough: Optional[str] = None #: Same date as validThrough, but before parsing/normalization, i.e. as it appears on the website. validThroughRaw: Optional[str] = None #: The title of the job posting. jobTitle: Optional[str] = None #: The headline of the job posting. headline: Optional[str] = None #: A (typically single) geographic location associated with the job position. jobLocation: Optional[JobLocation] = None #: A description of the job posting including sub-headings, with newline separators. #: #: Format: #: #: - trimmed (no whitespace at the beginning or the end of the description string), #: #: - line breaks included, #: #: - no length limit, #: #: - no normalization of Unicode characters. description: Optional[str] = None #: Simplified HTML of the description, including sub-headings, image captions and embedded content. descriptionHtml: Optional[str] = None #: Type of employment (e.g. full-time, part-time, contract, temporary, seasonal, internship). employmentType: Optional[str] = None #: The base salary of the job or of an employee in the proposed role. baseSalary: Optional[BaseSalary] = None #: Candidate requirements for the job. requirements: Optional[List[str]] = None #: Information about the organization offering the job position. hiringOrganization: Optional[HiringOrganization] = None #: Job start date #: #: Format: ISO 8601 format: "YYYY-MM-DDThh:mm:ssZ" #: #: With timezone, if available. jobStartDate: Optional[str] = None #: Same date as jobStartDate, but before parsing/normalization, i.e. as it appears on the website. jobStartDateRaw: Optional[str] = None #: Specifies the remote status of the position. remoteStatus: Optional[str] = None #: Contains metadata about the data extraction process. metadata: Optional[JobPostingMetadata] = attrs.field( default=None, converter=attrs.converters.optional(MetadataCaster(JobPostingMetadata)), kw_only=True # type: ignore )
zyte-common-items
/zyte-common-items-0.10.0.tar.gz/zyte-common-items-0.10.0/zyte_common_items/items.py
items.py
"""This module offers better integration with the itemadapter package.""" from types import MappingProxyType from typing import Any, Collection, Iterator, KeysView from itemadapter.adapter import AttrsAdapter from zyte_common_items.base import Item def _is_empty(value): """Return ``True`` if the value is to be considered empty for the purpose of excluding it from serialization. Empty values include: ``None``, empty collections (tuples, lists, etc.). Non-empty values include: empty ``bytes`` or ``str``, ``False``, ``0``. *value* is assumed not to be a mapping, which should be treated as a non-empty value, but this function would treat as an empty value. """ return value is None or ( not value and not isinstance(value, (bytes, str)) and isinstance(value, Collection) ) class ZyteItemAdapter(AttrsAdapter): """Wrap an :ref:`item <items>` to interact with its content as if it was a dictionary. It can be :ref:`configured <configuration>` into itemadapter_ to improve interaction with :ref:`items <items>` for itemadapter users like Scrapy_. In extends AttrsAdapter_ with the following features: - Allows interaction and serialization of fields from :attr:`~zyte_common_items.Item._unknown_fields_dict` as if they were regular item fields. - Removes keys with empty values from the output of `ItemAdapter.asdict()`_, for a cleaner output. .. _AttrsAdapter: https://github.com/scrapy/itemadapter#built-in-adapters .. _itemadapter: https://github.com/scrapy/itemadapter#itemadapter .. _ItemAdapter.asdict(): https://github.com/scrapy/itemadapter#asdict---dict .. _Scrapy: https://scrapy.org/ """ @classmethod def is_item(cls, item: Any) -> bool: return isinstance(item, Item) def get_field_meta(self, field_name: str) -> MappingProxyType: if field_name in self._fields_dict: return self._fields_dict[field_name].metadata # type: ignore elif field_name in self.item._unknown_fields_dict: return MappingProxyType({}) raise KeyError(field_name) def field_names(self) -> KeysView: return KeysView({**self._fields_dict, **self.item._unknown_fields_dict}) def __getitem__(self, field_name: str) -> Any: if field_name in self._fields_dict: return getattr(self.item, field_name) elif field_name in self.item._unknown_fields_dict: return self.item._unknown_fields_dict[field_name] raise KeyError(field_name) def __setitem__(self, field_name: str, value: Any) -> None: if field_name in self._fields_dict: setattr(self.item, field_name, value) else: self.item._unknown_fields_dict[field_name] = value def __delitem__(self, field_name: str) -> None: if field_name in self._fields_dict: del self._fields_dict[field_name] delattr(self.item, field_name) elif field_name in self.item._unknown_fields_dict: del self.item._unknown_fields_dict[field_name] else: raise KeyError( f"Object of type {self.item.__class__.__name__} does not contain a field with name {field_name}" ) def __iter__(self) -> Iterator: fields = [ attr for attr in self._fields_dict if not _is_empty(getattr(self.item, attr)) ] fields.extend( attr for attr in self.item._unknown_fields_dict if not _is_empty(self.item._unknown_fields_dict[attr]) ) return iter(fields) class ZyteItemKeepEmptyAdapter(ZyteItemAdapter): """Similar to :class:`~.ZyteItemAdapter` but doesn't remove empty values. It is intended to be used in tests and other use cases where it's important to differentiate between empty and missing fields. """ def __iter__(self) -> Iterator: fields = [attr for attr in self._fields_dict if hasattr(self.item, attr)] fields.extend(self.item._unknown_fields_dict) return iter(fields)
zyte-common-items
/zyte-common-items-0.10.0.tar.gz/zyte-common-items-0.10.0/zyte_common_items/adapter.py
adapter.py
from datetime import datetime from types import CoroutineType from typing import Generic, Optional, Type, TypeVar import attrs from price_parser import Price from web_poet import ItemPage, RequestUrl, Returns, WebPage, field from web_poet.fields import FieldsMixin from web_poet.pages import ItemT from web_poet.utils import get_generic_param from .components import ( ArticleListMetadata, ArticleMetadata, ArticleNavigationMetadata, BusinessPlaceMetadata, JobPostingMetadata, ProductListMetadata, ProductMetadata, ProductNavigationMetadata, RealEstateMetadata, request_list_processor, ) from .items import ( Article, ArticleList, ArticleNavigation, BusinessPlace, JobPosting, Product, ProductList, ProductNavigation, RealEstate, ) from .processors import ( brand_processor, breadcrumbs_processor, price_processor, simple_price_processor, ) from .util import format_datetime, metadata_processor #: Generic type for metadata classes for specific item types. MetadataT = TypeVar("MetadataT") def _date_downloaded_now(): return format_datetime(datetime.utcnow()) class HasMetadata(Generic[MetadataT]): """Inherit from this generic mixin to set the metadata class used by a page class.""" @property def metadata_cls(self) -> Optional[Type[MetadataT]]: """Metadata class.""" return _get_metadata_class(type(self)) def _get_metadata_class(cls: type) -> Optional[Type[MetadataT]]: return get_generic_param(cls, HasMetadata) class PriceMixin(FieldsMixin): """Provides price-related field implementations.""" _parsed_price: Optional[Price] = None async def _get_parsed_price(self) -> Optional[Price]: if self._parsed_price is None: # the price field wasn't executed or doesn't write _parsed_price price = getattr(self, "price", None) if isinstance(price, CoroutineType): price = await price if self._parsed_price is None: # the price field doesn't write _parsed_price (or doesn't exist) self._parsed_price = Price( amount=None, currency=None, amount_text=price ) return self._parsed_price @field def currency(self) -> Optional[str]: return getattr(self, "CURRENCY", None) @field async def currencyRaw(self) -> Optional[str]: parsed_price = await self._get_parsed_price() if parsed_price: return parsed_price.currency return None class _BasePage(ItemPage[ItemT], HasMetadata[MetadataT]): class Processors: metadata = [metadata_processor] @field def metadata(self) -> MetadataT: if self.metadata_cls is None: raise ValueError(f"{type(self)} doesn'have a metadata class configured.") value = self.metadata_cls() attributes = dir(value) if "dateDownloaded" in attributes: value.dateDownloaded = _date_downloaded_now() # type: ignore if "probability" in attributes: value.probability = 1.0 # type: ignore return value def no_item_found(self) -> ItemT: """Return an item with the current url and probability=0, indicating that the passed URL doesn't contain the expected item. Use it in your .validate_input implementation. """ if self.metadata_cls is None: raise ValueError(f"{type(self)} doesn'have a metadata class configured.") metadata = self.metadata_cls() metadata_attributes = dir(metadata) if "dateDownloaded" in metadata_attributes: metadata.dateDownloaded = _date_downloaded_now() # type: ignore if "probability" in metadata_attributes: metadata.probability = 0.0 # type: ignore return self.item_cls( # type: ignore url=self.url, # type: ignore[attr-defined] metadata=metadata, ) @attrs.define class BasePage(_BasePage): class Processors(_BasePage.Processors): pass request_url: RequestUrl @field def url(self) -> str: return str(self.request_url) class BaseArticlePage(BasePage, Returns[Article], HasMetadata[ArticleMetadata]): class Processors(BasePage.Processors): breadcrumbs = [breadcrumbs_processor] class BaseArticleListPage( BasePage, Returns[ArticleList], HasMetadata[ArticleListMetadata] ): class Processors(BasePage.Processors): breadcrumbs = [breadcrumbs_processor] class BaseArticleNavigationPage( BasePage, Returns[ArticleNavigation], HasMetadata[ArticleNavigationMetadata] ): pass class BaseBusinessPlacePage( BasePage, Returns[BusinessPlace], HasMetadata[BusinessPlaceMetadata] ): pass class BaseJobPostingPage( BasePage, Returns[JobPosting], HasMetadata[JobPostingMetadata] ): pass class BaseProductPage( BasePage, PriceMixin, Returns[Product], HasMetadata[ProductMetadata] ): class Processors(BasePage.Processors): brand = [brand_processor] breadcrumbs = [breadcrumbs_processor] price = [price_processor] regularPrice = [simple_price_processor] class BaseProductListPage( BasePage, Returns[ProductList], HasMetadata[ProductListMetadata] ): class Processors(BasePage.Processors): breadcrumbs = [breadcrumbs_processor] class BaseProductNavigationPage( BasePage, Returns[ProductNavigation], HasMetadata[ProductNavigationMetadata] ): class Processors(BasePage.Processors): subCategories = [request_list_processor] items = [request_list_processor] class BaseRealEstatePage( BasePage, Returns[RealEstate], HasMetadata[RealEstateMetadata] ): class Processors(BasePage.Processors): breadcrumbs = [breadcrumbs_processor] @attrs.define class Page(_BasePage, WebPage): class Processors(_BasePage.Processors): pass @field def url(self) -> str: return str(self.response.url) class ArticlePage(Page, Returns[Article], HasMetadata[ArticleMetadata]): class Processors(Page.Processors): breadcrumbs = [breadcrumbs_processor] class ArticleListPage(Page, Returns[ArticleList], HasMetadata[ArticleListMetadata]): class Processors(Page.Processors): breadcrumbs = [breadcrumbs_processor] class ArticleNavigationPage( Page, Returns[ArticleNavigation], HasMetadata[ArticleNavigationMetadata] ): pass class BusinessPlacePage( Page, Returns[BusinessPlace], HasMetadata[BusinessPlaceMetadata] ): pass class JobPostingPage(Page, Returns[JobPosting], HasMetadata[JobPostingMetadata]): pass class ProductPage(Page, PriceMixin, Returns[Product], HasMetadata[ProductMetadata]): class Processors(Page.Processors): brand = [brand_processor] breadcrumbs = [breadcrumbs_processor] price = [price_processor] regularPrice = [simple_price_processor] class ProductListPage(Page, Returns[ProductList], HasMetadata[ProductListMetadata]): class Processors(Page.Processors): breadcrumbs = [breadcrumbs_processor] class ProductNavigationPage( Page, Returns[ProductNavigation], HasMetadata[ProductNavigationMetadata] ): pass class RealEstatePage(Page, Returns[RealEstate], HasMetadata[RealEstateMetadata]): class Processors(Page.Processors): breadcrumbs = [breadcrumbs_processor]
zyte-common-items
/zyte-common-items-0.10.0.tar.gz/zyte-common-items-0.10.0/zyte_common_items/pages.py
pages.py
# flake8: noqa from .adapter import ZyteItemAdapter, ZyteItemKeepEmptyAdapter from .base import Item, is_data_container from .components import ( AdditionalProperty, Address, AggregateRating, Amenity, ArticleListMetadata, ArticleMetadata, ArticleNavigationMetadata, Audio, Author, BaseSalary, Brand, Breadcrumb, BusinessPlaceMetadata, Gtin, Header, HiringOrganization, Image, JobLocation, JobPostingMetadata, Link, Metadata, NamedLink, OpeningHoursItem, ParentPlace, ProbabilityMetadata, ProbabilityRequest, ProductListMetadata, ProductMetadata, ProductNavigationMetadata, RealEstateArea, RealEstateMetadata, Request, StarRating, Video, ) from .items import ( Article, ArticleFromList, ArticleList, ArticleNavigation, BusinessPlace, JobPosting, Product, ProductFromList, ProductList, ProductNavigation, ProductVariant, RealEstate, ) from .pages import ( ArticleListPage, ArticleNavigationPage, ArticlePage, BaseArticleListPage, BaseArticleNavigationPage, BaseArticlePage, BaseBusinessPlacePage, BaseJobPostingPage, BasePage, BaseProductListPage, BaseProductNavigationPage, BaseProductPage, BaseRealEstatePage, BusinessPlacePage, HasMetadata, JobPostingPage, MetadataT, Page, ProductListPage, ProductNavigationPage, ProductPage, RealEstatePage, )
zyte-common-items
/zyte-common-items-0.10.0.tar.gz/zyte-common-items-0.10.0/zyte_common_items/__init__.py
__init__.py
============ zyte-parsers ============ .. image:: https://img.shields.io/pypi/v/zyte-parsers.svg :target: https://pypi.python.org/pypi/zyte-parsers :alt: PyPI Version .. image:: https://img.shields.io/pypi/pyversions/zyte-parsers.svg :target: https://pypi.python.org/pypi/zyte-parsers :alt: Supported Python Versions .. image:: https://github.com/zytedata/zyte-parsers/workflows/tox/badge.svg :target: https://github.com/zytedata/zyte-parsers/actions :alt: Build Status .. image:: https://codecov.io/github/zytedata/zyte-parsers/coverage.svg?branch=master :target: https://codecov.io/gh/zytedata/zyte-parsers :alt: Coverage report .. image:: https://readthedocs.org/projects/zyte-parsers/badge/?version=stable :target: https://zyte-parsers.readthedocs.io/en/stable/?badge=stable :alt: Documentation Status .. description starts ``zyte-parsers`` is a Python 3.7+ library that contains functions to extract data from webpage parts. .. description ends * Documentation: https://zyte-parsers.readthedocs.io/en/latest/ * License: BSD 3-clause
zyte-parsers
/zyte-parsers-0.3.0.tar.gz/zyte-parsers-0.3.0/README.rst
README.rst
from typing import Union from price_parser import Price from zyte_parsers import SelectorOrElement from zyte_parsers.utils import extract_text def extract_price( node: SelectorOrElement, *, currency_hint: Union[SelectorOrElement, str, None] = None, ) -> Price: """Extract a price value from a node that contains it. :param node: Node including the price text. :param currency_hint: A string or a node that can contain currency. It will be passed as a hint to ``price-parser``. If currency is present in the price string, it could be preferred over the value extracted from ``currency_hint``. :return: The price value as a ``price_parser.Price`` object. """ text = extract_text(node) if currency_hint is not None and not isinstance(currency_hint, str): currency_hint = extract_text(currency_hint) return Price.fromstring(text, currency_hint=currency_hint)
zyte-parsers
/zyte-parsers-0.3.0.tar.gz/zyte-parsers-0.3.0/zyte_parsers/price.py
price.py
import re import string from collections import Counter from typing import List, Optional, Tuple import attr from .api import SelectorOrElement, input_to_element from .utils import extract_link, extract_text, first_satisfying @attr.s(frozen=True, auto_attribs=True) class Breadcrumb: name: Optional[str] = None url: Optional[str] = None _PUNCTUATION_TRANS = str.maketrans("", "", string.punctuation) _BREADCRUMBS_SEP = ( "ᐊᐅ<>ᐸᐳ‹›≺≻≪≫«»⋘⋙❬❭❮❯❰❱⟨⟩⟪⟫⫷⫸〈〉《》⦉⦊⭅⭆⭠⭢←→↤↦⇐⇒⇠⇢" "⇦⇨⇽⇾⟵⟶⟸⟹⟻⟼⟽⟾⮘⮚⮜⮞⯇⯈⊲⊳◀▶◁▷◂▸◃▹◄►◅▻➜➝➞➟➠➡➢➣➤➧➨➩" "➪➫➬➭➮➯➱➲/⁄\\⟋⟍⫻⫼⫽|𐬻¦‖∣⎪⎟⎸⎹│┃┆┇┊┋❘❙❚.,+:-" ) SEP_REG_STR = rf"([{_BREADCRUMBS_SEP}]+|->)" SPLIT_REG = re.compile(rf"(^|\s+)[{_BREADCRUMBS_SEP}]+($|\s+)") SEP_REG = re.compile(rf"^{SEP_REG_STR}$") LSTRIP_SEP_REG = re.compile(rf"^{SEP_REG_STR}\s+") RSTRIP_SEP_REG = re.compile(rf"\s+{SEP_REG_STR}$") def extract_breadcrumbs( node: SelectorOrElement, *, base_url: Optional[str], max_search_depth: int = 10 ) -> Optional[Tuple[Breadcrumb, ...]]: """Extract breadcrumb items from node that represents breadcrumb component. It finds all anchor elements to specified maximal depth. Anchors are collected in pre-order traversal. Such strategy of traversing supports cases where structure of nodes representing breadcrumbs is flat, which means that breadcrumb's anchors are on the same depth of HTML structure and where breadcrumb items are nested, which means that element with next item can be a child of element with previous breadcrumb item. It also post-processes extracted breadcrumbs by using semantic markup or the location of breadcrumb separators. :param node: Node representing and including breadcrumb component. :param base_url: Base URL of site. :param max_search_depth: Max depth for searching anchors. :return: Tuple with breadcrumb items. """ def extract_breadcrumbs_rec( node, search_depth, breadcrumbs_accum, markup_hier_accum, separators_accum, list_tag_occured, curr_markup_hier, ): """ Traverse html tree and search for elements that represent breadcrumb items with maximal depth of searching equal to `max_search_depth`. It also extracts breadcrumb items from element's tails since it often happens that non-anchor items are placed without any surrounding element. Because breadcrumb elements may contain dropdowns, the function filters them out by doing the following: * does not go into nested HTML list elements (<ol> and <ul>). * does not go into any HTML list elements with classes that relate to drop down, like "dropdown", "drop-down", "DropDown", etc. For every found element it does the following clean-up: * extracts name of breadcrumb from element's text or `title` attribute. * name cannot be a single character with punctuation like "»" or "|". * is able to parse name and split it from separators. * breadcrumb item has to contain name or url. * relative URLs are joined with base URL. """ if node.tag in {"button"}: return if node.tag == "a" or len(node) == 0: name = first_satisfying( [ extract_text(node), node.get("title").strip() if node.get("title") else None, ] ) url = extract_link(node, base_url) left_sep, parsed_name, right_sep = _parse_breadcrumb_name(name) if left_sep and separators_accum and not separators_accum[-1]: separators_accum[-1] = left_sep if parsed_name or url: breadcrumbs_accum.append(Breadcrumb(parsed_name, url)) markup_hier_accum.append(curr_markup_hier) separators_accum.append(right_sep) else: is_list_tag = node.tag in {"ul", "ol"} skip_list_tag = is_list_tag and ( _has_special_class(node.get("class")) or list_tag_occured ) item_type = _extract_markup_type(node) if search_depth < max_search_depth and not skip_list_tag: for child in node: new_hierarchy = list(curr_markup_hier) if item_type: new_hierarchy.append(item_type) extract_breadcrumbs_rec( child, search_depth + 1, breadcrumbs_accum, markup_hier_accum, separators_accum, list_tag_occured=list_tag_occured or is_list_tag, curr_markup_hier=new_hierarchy, ) if node.tail is not None: left_sep, parsed_name, right_sep = _parse_breadcrumb_name(node.tail) if left_sep and separators_accum and not separators_accum[-1]: separators_accum[-1] = left_sep if parsed_name: breadcrumbs_accum.append(Breadcrumb(name=parsed_name)) markup_hier_accum.append(curr_markup_hier) separators_accum.append(right_sep) node = input_to_element(node) breadcrumbs: List[Breadcrumb] = [] markup_hier: List[List[str]] = [] separators: List[bool] = [] extract_breadcrumbs_rec( node, 0, breadcrumbs, markup_hier, separators, list_tag_occured=False, curr_markup_hier=[], ) assert len(breadcrumbs) == len(markup_hier) == len(separators) return _postprocess_breadcrumbs(breadcrumbs, markup_hier, separators) def _parse_breadcrumb_name( name: Optional[str], ) -> Tuple[Optional[str], Optional[str], Optional[str]]: """Split extracted name into left separator, clean name and right separator.""" if name: stripped_name = name.strip() if SEP_REG.match(stripped_name): return stripped_name.strip(), None, None left_match = LSTRIP_SEP_REG.match(stripped_name) left_sep = left_match.group().strip() if left_match else None without_left_sep = ( stripped_name[left_match.end() :] if left_match else stripped_name ) if SEP_REG.match(without_left_sep): return left_sep, None, without_left_sep.strip() right_match = RSTRIP_SEP_REG.search(without_left_sep) right_sep = right_match.group().strip() if right_match else None name = ( without_left_sep[: right_match.start()] if right_match else without_left_sep ) return left_sep, name or None, right_sep return None, None, None def _postprocess_breadcrumbs(breadcrumbs, markup_hier, separators): """ Post-process breadcrumbs using the following procedures: * If there is only a single breadcrumb with name and without link, try to split the name into separate breadcrumb items. * If markup exists, then use it for selecting correct breadcrumb items. * Otherwise, use location of separators to determine which breadcrumb items are relevant and which not (if there is separator between two items then these two items are relevant). """ if not breadcrumbs: return None if len(breadcrumbs) == 1 and breadcrumbs[0].name and not breadcrumbs[0].url: parts = (s.strip() for s in SPLIT_REG.split(breadcrumbs[0].name)) return tuple(Breadcrumb(name=p) for p in parts if p) markup_exists = any(len(h) > 0 for h in markup_hier) if markup_exists: breadcrumbs = _postprocess_using_markup(breadcrumbs, markup_hier) else: breadcrumbs = _postprocess_using_separators(breadcrumbs, separators) return tuple(_remove_duplicated_first_and_last_items(breadcrumbs)) def _postprocess_using_markup(breadcrumbs, markup_hier): breadcrumb_indices_with_markup = [ idx for idx, h in enumerate(markup_hier) if len(h) > 0 ] first_with_markup = min(breadcrumb_indices_with_markup, default=-1) last_with_markup = max(breadcrumb_indices_with_markup, default=-1) # often the items without markup at the beginning and the end are # respectively home and product items indices_to_leave = {first_with_markup - 1, last_with_markup + 1} return [ b for idx, (b, h) in enumerate(zip(breadcrumbs, markup_hier)) if idx in indices_to_leave or len(h) > 0 ] def _postprocess_using_separators(breadcrumbs, separators): def prev_sep(idx): return separators[idx - 1] if 0 <= idx - 1 < len(separators) else None most_common_seps = Counter(filter(None, separators)).most_common() main_sep = most_common_seps[0][0] if most_common_seps else None if not main_sep: return breadcrumbs return [ b for idx, (b, sep) in enumerate(zip(breadcrumbs, separators)) if sep == main_sep or (prev_sep(idx) == main_sep) ] def _extract_markup_type(node): def check_schema(name): for schema_attr in {"itemtype", "typeof"}: if name in node.get(schema_attr, "").lower(): return True return False if check_schema("data-vocabulary.org/breadcrumb"): return "data-vocabulary" if check_schema("listitem"): return "schema" def _remove_duplicated_first_and_last_items(breadcrumbs): """ Remove "go back" urls from the beginning or the end of breadcrumb element. There is an assumption that there can be only one such url. First it tries to remove url at the beginning by checking if there is any other the same url in further breadcrumb items. If not, it checks the last url by comparing it with remaining urls. """ first_url = breadcrumbs[0].url if first_url is not None and first_url in (b.url for b in breadcrumbs[1:] if b.url): return breadcrumbs[1:] last_url = breadcrumbs[-1].url if last_url is not None and last_url in (b.url for b in breadcrumbs[1:-1] if b.url): return breadcrumbs[:-1] return breadcrumbs def _has_special_class(class_attr: str) -> bool: """ Check if a given value of class attribute has a class that relates to drop down like "dropdown", "drop-down", "DropDown", etc. """ if class_attr: return any( cls_name in c.translate(_PUNCTUATION_TRANS).lower().strip() for cls_name in {"dropdown", "actions"} for c in class_attr.split() ) return False
zyte-parsers
/zyte-parsers-0.3.0.tar.gz/zyte-parsers-0.3.0/zyte_parsers/breadcrumbs.py
breadcrumbs.py
import itertools from typing import Any, Callable, Iterable, Optional from urllib.parse import urljoin import html_text from lxml.html import HtmlElement, fromstring # noqa: F401 from parsel import Selector # noqa: F401 from w3lib.html import strip_html5_whitespace from zyte_parsers.api import SelectorOrElement, input_to_element def is_js_url(url: str) -> bool: """Check if the URL is intended for handling by JS. >>> is_js_url("http://example.com") False >>> is_js_url("/foo") False >>> is_js_url("javascript:void(0)") True >>> is_js_url("#") True """ normed = url.strip().lower() if normed.startswith("javascript:") or normed.startswith("#"): return True return False def strip_urljoin(base_url: Optional[str], url: Optional[str]) -> str: r"""Strip the URL and use ``urljoin`` on it. >>> strip_urljoin("http://example.com", None) 'http://example.com' >>> strip_urljoin("http://example.com", "foo") 'http://example.com/foo' >>> strip_urljoin("http://example.com", " ") 'http://example.com' >>> strip_urljoin("http://example.com", " foo\t") 'http://example.com/foo' >>> strip_urljoin(None, "foo") 'foo' >>> strip_urljoin(None, None) '' """ if url is not None: url = strip_html5_whitespace(url) # XXX: mypy doesn't like when one passes None to urljoin return urljoin(base_url or "", url or "") def extract_link(a_node: SelectorOrElement, base_url: str) -> Optional[str]: """ Extract the absolute url link from an ``<a>`` HTML tag. >>> extract_link(fromstring("<a href=' http://example.com'"), "") 'http://example.com' >>> extract_link(fromstring("<a href='/foo '"), "http://example.com") 'http://example.com/foo' >>> extract_link(fromstring("<a href='' data-url='http://example.com'"), "") 'http://example.com' >>> extract_link(fromstring("<a href='javascript:void(0)'"), "") >>> extract_link(Selector(text="<a href='http://example.com'").css("a")[0], "") 'http://example.com' """ a_node = input_to_element(a_node) link = a_node.get("href") or a_node.get("data-url") if not link or is_js_url(link): return None try: link = strip_urljoin(base_url, link) except ValueError: link = None return link def extract_text(node: SelectorOrElement, guess_layout: bool = False) -> Optional[str]: """Extract text from HTML using ``html_text``. >>> extract_text(fromstring("<p>foo bar </p>")) 'foo bar' >>> extract_text(Selector(text="<p>foo bar </p>")) 'foo bar' """ node = input_to_element(node) value = html_text.extract_text(node, guess_layout=guess_layout) if value: return value return None def first_satisfying( xs: Iterable, condition_fun: Callable[[Any], Any] = lambda x: x, default: Any = None ) -> Any: """Return the first item in ``xs`` that satisfies the condition. >>> first_satisfying([0, "", 1]) 1 >>> first_satisfying([1, 2, 3], condition_fun=lambda x: x > 1) 2 >>> first_satisfying([0, ""], default=2) 2 """ try: return next(x for x in xs if condition_fun(x)) except StopIteration: return default def iterwalk_limited(node: HtmlElement, search_depth: int) -> Iterable[HtmlElement]: yield node if search_depth <= 0: return for child in node: yield from iterwalk_limited(child, search_depth - 1) def take(iterable: Iterable[Any], n: int): return list(itertools.islice(iterable, n))
zyte-parsers
/zyte-parsers-0.3.0.tar.gz/zyte-parsers-0.3.0/zyte_parsers/utils.py
utils.py
from typing import Union from lxml.html import HtmlElement from parsel import Selector SelectorOrElement = Union[Selector, HtmlElement] def input_to_selector(node: SelectorOrElement) -> Selector: """Convert a supported input object to a Selector.""" if isinstance(node, Selector): return node return Selector(root=node) def input_to_element(node: SelectorOrElement) -> HtmlElement: """Convert a supported input object to a HtmlElement.""" if isinstance(node, HtmlElement): return node return node.root
zyte-parsers
/zyte-parsers-0.3.0.tar.gz/zyte-parsers-0.3.0/zyte_parsers/api.py
api.py
__version__ = "0.3.0" from .api import SelectorOrElement from .brand import extract_brand_name from .breadcrumbs import Breadcrumb, extract_breadcrumbs from .price import extract_price
zyte-parsers
/zyte-parsers-0.3.0.tar.gz/zyte-parsers-0.3.0/zyte_parsers/__init__.py
__init__.py
import itertools from typing import Iterable, Optional from lxml.html import HtmlElement from . import SelectorOrElement from .api import input_to_element from .utils import extract_text, iterwalk_limited, take def extract_brand_name(node: SelectorOrElement, search_depth: int = 0) -> Optional[str]: """Extract a brand name from a node that contains it. It tries element text and image alt and title attributes. :param node: Node including the brand name. :param search_depth: Max depth for searching images. :return: The brand name or None. """ _BRAND_LENGHT_LIMIT = 50 node = input_to_element(node) extracted = _extract_brand(node, search_depth) short = (b for b in extracted if b and len(b) < _BRAND_LENGHT_LIMIT) results = take(short, 1) return results[0] if results else None def _extract_brand(node: HtmlElement, search_depth: int = 0) -> Iterable[Optional[str]]: if node.tag == "img": return extract_image_text(node, 0) value = extract_text(node) if value: return [value] return extract_image_text(node, search_depth) def extract_image_text(node: HtmlElement, search_depth: int = 0) -> Iterable[str]: def extract_text_from_image(node: HtmlElement) -> Iterable[Optional[str]]: for attrib in ["alt", "title"]: yield (node.attrib.get(attrib) or "").strip() nodes = iterwalk_limited(node, search_depth) images = filter(lambda n: n.tag == "img", nodes) attribs = map(extract_text_from_image, images) flat_attribs = itertools.chain.from_iterable(attribs) valid_attribs = (a for a in flat_attribs if a) return valid_attribs
zyte-parsers
/zyte-parsers-0.3.0.tar.gz/zyte-parsers-0.3.0/zyte_parsers/brand.py
brand.py