code
stringlengths
501
5.19M
package
stringlengths
2
81
path
stringlengths
9
304
filename
stringlengths
4
145
from utils.message_builder import image from utils.utils import scheduler, get_bot from nonebot import on_message from services.log import logger from models.group_info import GroupInfo from models.friend_user import FriendUser from nonebot.adapters.onebot.v11.exception import ActionFailed from configs.config import NICKNAME, Config from utils.manager import group_manager from pathlib import Path import shutil __zx_plugin_name__ = "定时任务相关 [Hidden]" __plugin_version__ = 0.1 __plugin_author__ = "HibiKier" __plugin_task__ = {'zwa': '早晚安'} Config.add_plugin_config( "_task", "DEFAULT_ZWA", True, help_="被动 早晚安 进群默认开关状态", default_value=True, ) Config.add_plugin_config( "_backup", "BACKUP_FLAG", True, help_="是否开启文件备份", default_value=True ) Config.add_plugin_config( "_backup", "BACKUP_DIR_OR_FILE", ['data/black_word', 'data/configs', 'data/statistics', 'data/word_bank', 'data/manager', 'configs'], name="文件备份", help_="备份的文件夹或文件", default_value=[] ) cx = on_message(priority=9, block=False) # 早上好 @scheduler.scheduled_job( "cron", hour=6, minute=1, ) async def _(): try: bot = get_bot() gl = await bot.get_group_list() gl = [g["group_id"] for g in gl] for g in gl: result = image("zao.jpg", "zhenxun") try: await bot.send_group_msg(group_id=g, message="[[_task|zwa]]早上好" + result) except ActionFailed: logger.warning(f"{g} 群被禁言中,无法发送早安") except Exception as e: logger.error(f"早晚安错误 e:{e}") # 睡觉了 @scheduler.scheduled_job( "cron", hour=23, minute=59, ) async def _(): try: bot = get_bot() gl = await bot.get_group_list() gl = [g["group_id"] for g in gl] for g in gl: result = image("sleep.jpg", "zhenxun") try: await bot.send_group_msg( group_id=g, message=f"[[_task|zwa]]{NICKNAME}要睡觉了,你们也要早点睡呀" + result ) except ActionFailed: logger.warning(f"{g} 群被禁言中,无法发送晚安") except Exception as e: logger.error(f"早晚安错误 e:{e}") # 自动更新群组信息 @scheduler.scheduled_job( "cron", hour=3, minute=1, ) async def _(): try: bot = get_bot() gl = await bot.get_group_list() gl = [g["group_id"] for g in gl] for g in gl: group_info = await bot.get_group_info(group_id=g) await GroupInfo.add_group_info( group_info["group_id"], group_info["group_name"], group_info["max_member_count"], group_info["member_count"], ) logger.info(f"自动更新群组 {g} 信息成功") except Exception as e: logger.error(f"自动更新群组信息错误 e:{e}") # 自动更新好友信息 @scheduler.scheduled_job( "cron", hour=3, minute=1, ) async def _(): try: bot = get_bot() fl = await bot.get_friend_list() for f in fl: if await FriendUser.add_friend_info(f["user_id"], f["nickname"]): logger.info(f'自动更新好友 {f["user_id"]} 信息成功') else: logger.warning(f'自动更新好友 {f["user_id"]} 信息失败') except Exception as e: logger.error(f"自动更新群组信息错误 e:{e}") # 自动备份 @scheduler.scheduled_job( "cron", hour=3, minute=25, ) async def _(): if Config.get_config("_backup", "BACKUP_FLAG"): _backup_path = Path() / 'backup' _backup_path.mkdir(exist_ok=True, parents=True) for x in Config.get_config("_backup", "BACKUP_DIR_OR_FILE"): try: path = Path(x) _p = _backup_path / x if path.exists(): if path.is_dir(): if _p.exists(): shutil.rmtree(_p, ignore_errors=True) shutil.copytree(x, _p) else: if _p.exists(): _p.unlink() shutil.copy(x, _p) logger.info(f'已完成自动备份:{x}') except Exception as e: logger.error(f"自动备份文件 {x} 发生错误 {type(e)}:{e}") # 一次性任务 # 固定时间触发,仅触发一次: # # from datetime import datetime # # @nonebot.scheduler.scheduled_job( # 'date', # run_date=datetime(2021, 1, 1, 0, 0), # # timezone=None, # ) # async def _(): # await bot.send_group_msg(group_id=123456, # message="2021,新年快乐!") # 定期任务 # 从 start_date 开始到 end_date 结束,根据类似 Cron # # 的规则触发任务: # # @nonebot.scheduler.scheduled_job( # 'cron', # # year=None, # # month=None, # # day=None, # # week=None, # day_of_week="mon,tue,wed,thu,fri", # hour=7, # # minute=None, # # second=None, # # start_date=None, # # end_date=None, # # timezone=None, # ) # async def _(): # await bot.send_group_msg(group_id=123456, # message="起床啦!") # 间隔任务 # # interval 触发器 # # 从 start_date 开始,每间隔一段时间触发,到 end_date 结束: # # @nonebot.scheduler.scheduled_job( # 'interval', # # weeks=0, # # days=0, # # hours=0, # minutes=5, # # seconds=0, # # start_date=time.now(), # # end_date=None, # ) # async def _(): # has_new_item = check_new_item() # if has_new_item: # await bot.send_group_msg(group_id=123456, # message="XX有更新啦!") # 动态的计划任务 # import datetime # # from apscheduler.triggers.date import DateTrigger # 一次性触发器 # # from apscheduler.triggers.cron import CronTrigger # 定期触发器 # # from apscheduler.triggers.interval import IntervalTrigger # 间隔触发器 # from nonebot import on_command, scheduler # # @on_command('赖床') # async def _(session: CommandSession): # await session.send('我会在5分钟后再喊你') # # # 制作一个“5分钟后”触发器 # delta = datetime.timedelta(minutes=5) # trigger = DateTrigger( # run_date=datetime.datetime.now() + delta # ) # # # 添加任务 # scheduler.add_job( # func=session.send, # 要添加任务的函数,不要带参数 # trigger=trigger, # 触发器 # args=('不要再赖床啦!',), # 函数的参数列表,注意:只有一个值时,不能省略末尾的逗号 # # kwargs=None, # misfire_grace_time=60, # 允许的误差时间,建议不要省略 # # jobstore='default', # 任务储存库,在下一小节中说明 # )
zhenxun-bot
/zhenxun_bot-0.1.4.3-py3-none-any.whl/zhenxun_bot/basic_plugins/apscheduler/__init__.py
__init__.py
from pathlib import Path from ruamel.yaml import YAML from utils.manager import plugins_manager from utils.utils import get_matchers import nonebot try: import ujson as json except ModuleNotFoundError: import json _yaml = YAML(typ="safe") def init_plugins_data(data_path): """ 初始化插件数据信息 """ plugin2data_file = data_path / "manager" / "plugin_manager.json" plugin2data_file.parent.mkdir(parents=True, exist_ok=True) _data = {} if plugin2data_file.exists(): _data = json.load(open(plugin2data_file, "r", encoding="utf8")) _matchers = get_matchers() for matcher in _matchers: _plugin = nonebot.plugin.get_plugin(matcher.plugin_name) try: _module = _plugin.module except AttributeError: if matcher.plugin_name not in _data.keys(): plugins_manager.add_plugin_data( matcher.plugin_name, matcher.plugin_name, error=True ) else: plugins_manager.set_module_data(matcher.plugin_name, "error", True) plugin_data = plugins_manager.get(matcher.plugin_name) if plugin_data: plugins_manager.set_module_data( matcher.plugin_name, "version", plugin_data.get("version") ) else: try: plugin_version = _module.__getattribute__("__plugin_version__") except AttributeError: plugin_version = None try: plugin_name = _module.__getattribute__("__zx_plugin_name__") except AttributeError: plugin_name = matcher.plugin_name try: plugin_author = _module.__getattribute__("__plugin_author__") except AttributeError: plugin_author = None if matcher.plugin_name in plugins_manager.keys(): plugins_manager.set_module_data(matcher.plugin_name, "error", False) if matcher.plugin_name not in plugins_manager.keys(): plugins_manager.add_plugin_data( matcher.plugin_name, plugin_name=plugin_name, author=plugin_author, version=plugin_version, ) elif plugins_manager[matcher.plugin_name]["version"] is None or ( plugin_version is not None and plugin_version > plugins_manager[matcher.plugin_name]["version"] ): plugins_manager.set_module_data( matcher.plugin_name, "plugin_name", plugin_name ) plugins_manager.set_module_data(matcher.plugin_name, "author", plugin_author) plugins_manager.set_module_data( matcher.plugin_name, "version", plugin_version ) if matcher.plugin_name in _data.keys(): plugins_manager.set_module_data( matcher.plugin_name, "error", _data[matcher.plugin_name]["error"] ) plugins_manager.set_module_data( matcher.plugin_name, "plugin_name", _data[matcher.plugin_name]["plugin_name"] ) plugins_manager.save()
zhenxun-bot
/zhenxun_bot-0.1.4.3-py3-none-any.whl/zhenxun_bot/basic_plugins/init_plugin_config/init_plugins_data.py
init_plugins_data.py
from pathlib import Path from utils.manager import group_manager from services.db_context import db from asyncpg.exceptions import DuplicateColumnError from services.log import logger try: import ujson as json except ModuleNotFoundError: import json try: from models.group_remind import GroupRemind except ModuleNotFoundError: pass async def init_group_manager(): """ 旧数据格式替换为新格式 初始化数据 """ old_group_level_file = Path() / "data" / "manager" / "group_level.json" old_plugin_list_file = Path() / "data" / "manager" / "plugin_list.json" if old_group_level_file.exists(): data = json.load(open(old_group_level_file, "r", encoding="utf8")) for key in data.keys(): group = key level = data[key] group_manager.set_group_level(group, level) old_group_level_file.unlink() group_manager.save() if old_plugin_list_file.exists(): data = json.load(open(old_plugin_list_file, "r", encoding="utf8")) for plugin in data.keys(): for group in data[plugin].keys(): if group == "default" and not data[plugin]["default"]: group_manager.block_plugin(plugin) elif not data[plugin][group]: group_manager.block_plugin(plugin, group) old_plugin_list_file.unlink() old_data_table = Path() / "models" / "group_remind.py" try: if old_data_table.exists(): b = { "hy": "group_welcome", "kxcz": "open_case_reset_remind", "zwa": "zwa", "blpar": "bilibili_parse", "epic": "epic_free_game", "pa": "pa", "almanac": "genshin_alc", } for group in group_manager.get_data()["group_manager"]: for remind in b: try: status = await GroupRemind.get_status(int(group), remind) if status is not None: if status: await group_manager.open_group_task(group, b[remind]) logger.info(f"读取旧数据-->{group} 开启 {b[remind]}") else: await group_manager.close_group_task(group, b[remind]) logger.info(f"读取旧数据-->{group} 关闭 {b[remind]}") except Exception as e: pass query = db.text("DROP TABLE group_reminds;") await db.first(query) old_data_table.unlink() logger.info("旧数据读取完毕,删除了舍弃表 group_reminds...") except (ModuleNotFoundError, DuplicateColumnError): pass group_manager.save()
zhenxun-bot
/zhenxun_bot-0.1.4.3-py3-none-any.whl/zhenxun_bot/basic_plugins/init_plugin_config/init_group_manager.py
init_group_manager.py
from pathlib import Path from ruamel.yaml import round_trip_load, round_trip_dump, YAML from utils.manager import ( plugins2cd_manager, plugins2block_manager, plugins2count_manager, ) from utils.utils import get_matchers from ruamel import yaml import nonebot _yaml = YAML(typ="safe") def init_plugins_cd_limit(data_path): """ 加载 cd 限制 """ plugins2cd_file = data_path / "configs" / "plugins2cd.yaml" plugins2cd_file.parent.mkdir(exist_ok=True, parents=True) _data = {} _matchers = get_matchers() for matcher in _matchers: if not plugins2cd_manager.get_plugin_cd_data(matcher.plugin_name): _plugin = nonebot.plugin.get_plugin(matcher.plugin_name) try: _module = _plugin.module plugin_cd_limit = _module.__getattribute__("__plugin_cd_limit__") plugins2cd_manager.add_cd_limit( matcher.plugin_name, data_dict=plugin_cd_limit ) except AttributeError: pass if not plugins2cd_manager.keys(): plugins2cd_manager.add_cd_limit( "这是一个示例" ) _tmp_data = {"PluginCdLimit": plugins2cd_manager.get_data()} with open(plugins2cd_file, "w", encoding="utf8") as wf: yaml.dump(_tmp_data, wf, Dumper=yaml.RoundTripDumper, allow_unicode=True) _data = round_trip_load(open(plugins2cd_file, encoding="utf8")) _data["PluginCdLimit"].yaml_set_start_comment( """# 需要cd的功能 # 自定义的功能需要cd也可以在此配置 # key:模块名称 # cd:cd 时长(秒) # status:此限制的开关状态 # check_type:'private'/'group'/'all',限制私聊/群聊/全部 # limit_type:监听对象,以user_id或group_id作为键来限制,'user':用户id,'group':群id # 示例:'user':用户N秒内触发1次,'group':群N秒内触发1次 # rst:回复的话,可以添加[at],[uname],[nickname]来对应艾特,用户群名称,昵称系统昵称 # rst 为 "" 或 None 时则不回复 # rst示例:"[uname]你冲的太快了,[nickname]先生,请稍后再冲[at]" # rst回复:"老色批你冲的太快了,欧尼酱先生,请稍后再冲@老色批" # 用户昵称↑ 昵称系统的昵称↑ 艾特用户↑""", indent=2, ) with open(plugins2cd_file, "w", encoding="utf8") as wf: round_trip_dump(_data, wf, Dumper=yaml.RoundTripDumper, allow_unicode=True) plugins2cd_manager.reload_cd_limit() def init_plugins_block_limit(data_path): """ 加载阻塞限制 """ plugins2block_file = data_path / "configs" / "plugins2block.yaml" plugins2block_file.parent.mkdir(exist_ok=True, parents=True) _data = {} _matchers = get_matchers() for matcher in _matchers: if not plugins2block_manager.get_plugin_block_data(matcher.plugin_name): _plugin = nonebot.plugin.get_plugin(matcher.plugin_name) try: _module = _plugin.module plugin_block_limit = _module.__getattribute__("__plugin_block_limit__") plugins2block_manager.add_block_limit( matcher.plugin_name, data_dict=plugin_block_limit ) except AttributeError: pass if not plugins2block_manager.keys(): plugins2block_manager.add_block_limit( "这是一个示例" ) _tmp_data = {"PluginBlockLimit": plugins2block_manager.get_data()} with open(plugins2block_file, "w", encoding="utf8") as wf: yaml.dump(_tmp_data, wf, Dumper=yaml.RoundTripDumper, allow_unicode=True) _data = round_trip_load(open(plugins2block_file, encoding="utf8")) _data["PluginBlockLimit"].yaml_set_start_comment( """# 用户调用阻塞 # 即 当用户调用此功能还未结束时 # 用发送消息阻止用户重复调用此命令直到该命令结束 # key:模块名称 # status:此限制的开关状态 # check_type:'private'/'group'/'all',限制私聊/群聊/全部 # limit_type:监听对象,以user_id或group_id作为键来限制,'user':用户id,'group':群id # 示例:'user':阻塞用户,'group':阻塞群聊 # rst:回复的话,可以添加[at],[uname],[nickname]来对应艾特,用户群名称,昵称系统昵称 # rst 为 "" 或 None 时则不回复 # rst示例:"[uname]你冲的太快了,[nickname]先生,请稍后再冲[at]" # rst回复:"老色批你冲的太快了,欧尼酱先生,请稍后再冲@老色批" # 用户昵称↑ 昵称系统的昵称↑ 艾特用户↑""", indent=2, ) with open(plugins2block_file, "w", encoding="utf8") as wf: round_trip_dump(_data, wf, Dumper=yaml.RoundTripDumper, allow_unicode=True) plugins2block_manager.reload_block_limit() def init_plugins_count_limit(data_path): """ 加载次数限制 """ plugins2count_file = data_path / "configs" / "plugins2count.yaml" plugins2count_file.parent.mkdir(exist_ok=True, parents=True) _data = {} _matchers = get_matchers() for matcher in _matchers: if not plugins2count_manager.get_plugin_count_data(matcher.plugin_name): _plugin = nonebot.plugin.get_plugin(matcher.plugin_name) try: _module = _plugin.module plugin_count_limit = _module.__getattribute__("__plugin_count_limit__") plugins2count_manager.add_count_limit( matcher.plugin_name, data_dict=plugin_count_limit ) except AttributeError: pass if not plugins2count_manager.keys(): plugins2count_manager.add_count_limit( "这是一个示例" ) _tmp_data = {"PluginCountLimit": plugins2count_manager.get_data()} with open(plugins2count_file, "w", encoding="utf8") as wf: yaml.dump(_tmp_data, wf, Dumper=yaml.RoundTripDumper, allow_unicode=True) _data = round_trip_load(open(plugins2count_file, encoding="utf8")) _data["PluginCountLimit"].yaml_set_start_comment( """# 命令每日次数限制 # 即 用户/群聊 每日可调用命令的次数 [数据内存存储,重启将会重置] # 每日调用直到 00:00 刷新 # key:模块名称 # max_count: 每日调用上限 # status:此限制的开关状态 # limit_type:监听对象,以user_id或group_id作为键来限制,'user':用户id,'group':群id # 示例:'user':用户上限,'group':群聊上限 # rst:回复的话,可以添加[at],[uname],[nickname]来对应艾特,用户群名称,昵称系统昵称 # rst 为 "" 或 None 时则不回复 # rst示例:"[uname]你冲的太快了,[nickname]先生,请稍后再冲[at]" # rst回复:"老色批你冲的太快了,欧尼酱先生,请稍后再冲@老色批" # 用户昵称↑ 昵称系统的昵称↑ 艾特用户↑""", indent=2, ) with open(plugins2count_file, "w", encoding="utf8") as wf: round_trip_dump(_data, wf, Dumper=yaml.RoundTripDumper, allow_unicode=True) plugins2count_manager.reload_count_limit()
zhenxun-bot
/zhenxun_bot-0.1.4.3-py3-none-any.whl/zhenxun_bot/basic_plugins/init_plugin_config/init_plugins_limit.py
init_plugins_limit.py
from pathlib import Path from ruamel.yaml import round_trip_load, round_trip_dump, YAML from utils.manager import plugins2settings_manager, admin_manager from services.log import logger from utils.utils import get_matchers from ruamel import yaml import nonebot _yaml = YAML(typ="safe") def init_plugins_settings(data_path: str): """ 初始化插件设置,从插件中获取 __zx_plugin_name__,__plugin_cmd__,__plugin_settings__ """ plugins2settings_file = data_path / "configs" / "plugins2settings.yaml" plugins2settings_file.parent.mkdir(exist_ok=True, parents=True) _matchers = get_matchers() _tmp_module = {} _tmp = [] for x in plugins2settings_manager.keys(): try: _plugin = nonebot.plugin.get_plugin(x) _module = _plugin.module plugin_name = _module.__getattribute__("__zx_plugin_name__") _tmp_module[x] = plugin_name except (KeyError, AttributeError) as e: logger.warning(f"配置文件 模块:{x} 获取 plugin_name 失败...{e}") _tmp_module[x] = "" for matcher in _matchers: if matcher.plugin_name not in plugins2settings_manager.keys(): _plugin = nonebot.plugin.get_plugin(matcher.plugin_name) try: _module = _plugin.module except AttributeError: logger.warning(f"插件 {matcher.plugin_name} 加载失败...,插件控制未加载.") else: try: plugin_name = _module.__getattribute__("__zx_plugin_name__") if "[admin]" in plugin_name.lower(): try: admin_settings = _module.__getattribute__( "__plugin_settings__" ) level = admin_settings["admin_level"] cmd = admin_settings.get("cmd") except (AttributeError, KeyError): level = 5 cmd = None if level is None: level = 5 admin_manager.add_admin_plugin_settings( matcher.plugin_name, cmd, level ) if ( "[hidden]" in plugin_name.lower() or "[admin]" in plugin_name.lower() or "[superuser]" in plugin_name.lower() or matcher.plugin_name in plugins2settings_manager.keys() ): continue except AttributeError: if matcher.plugin_name not in _tmp: logger.warning( f"获取插件 {matcher.plugin_name} __zx_plugin_name__ 失败...,插件控制未加载." ) else: try: _tmp_module[matcher.plugin_name] = plugin_name plugin_settings = _module.__getattribute__( "__plugin_settings__" ) if plugin_settings.get('cost_gold') is None: plugin_settings['cost_gold'] = 0 if ( plugin_settings["cmd"] is not None and plugin_name not in plugin_settings["cmd"] ): plugin_settings["cmd"].append(plugin_name) if plugins2settings_manager.get( matcher.plugin_name ) and plugins2settings_manager[matcher.plugin_name].get( "plugin_type" ): plugin_type = tuple( plugins2settings_manager.get_plugin_data( matcher.plugin_name )["plugin_type"] ) else: try: plugin_type = _module.__getattribute__( "__plugin_type__" ) except AttributeError: plugin_type = ("normal",) if plugin_settings and matcher.plugin_name: plugins2settings_manager.add_plugin_settings( matcher.plugin_name, plugin_type=plugin_type, **plugin_settings, ) except AttributeError: pass _tmp.append(matcher.plugin_name) _tmp_data = {"PluginSettings": plugins2settings_manager.get_data()} with open(plugins2settings_file, "w", encoding="utf8") as wf: yaml.dump(_tmp_data, wf, Dumper=yaml.RoundTripDumper, allow_unicode=True) _data = round_trip_load(open(plugins2settings_file, encoding="utf8")) _data["PluginSettings"].yaml_set_start_comment( """# 模块与对应命令和对应群权限 # 用于生成帮助图片 和 开关功能 # key:模块名称 # level:需要的群等级 # default_status:加入群时功能的默认开关状态 # limit_superuser: 功能状态是否限制超级用户 # cmd: 关闭[cmd] 都会触发命令 关闭对应功能,cmd列表第一个词为统计的功能名称 # plugin_type: 帮助类别 示例:('原神相关',) 或 ('原神相关', 1),1代表帮助命令列向排列,否则为横向排列""", indent=2, ) for plugin in _data["PluginSettings"].keys(): _data["PluginSettings"][plugin].yaml_set_start_comment( f"{plugin}:{_tmp_module[plugin]}", indent=2 ) with open(plugins2settings_file, "w", encoding="utf8") as wf: round_trip_dump(_data, wf, Dumper=yaml.RoundTripDumper, allow_unicode=True) logger.info(f"已成功加载 {len(plugins2settings_manager.get_data())} 个非限制插件.")
zhenxun-bot
/zhenxun_bot-0.1.4.3-py3-none-any.whl/zhenxun_bot/basic_plugins/init_plugin_config/init_plugins_settings.py
init_plugins_settings.py
from pathlib import Path from ruamel.yaml import round_trip_load, round_trip_dump, YAML from utils.manager import admin_manager, plugins_manager from configs.config import Config from services.log import logger from utils.utils import get_matchers from ruamel import yaml import nonebot _yaml = YAML(typ="safe") def init_plugins_config(data_path): """ 初始化插件数据配置 """ plugins2config_file = data_path / "configs" / "plugins2config.yaml" plugins2config_file.parent.mkdir(parents=True, exist_ok=True) _data = {} if plugins2config_file.exists(): _data = _yaml.load(open(plugins2config_file, "r", encoding="utf8")) _matchers = get_matchers() for matcher in _matchers: _plugin = nonebot.plugin.get_plugin(matcher.plugin_name) try: _module = _plugin.module except AttributeError: continue try: plugin_version = _module.__getattribute__("__plugin_version__") except AttributeError: plugin_version = None try: plugin_configs = _module.__getattribute__("__plugin_configs__") except AttributeError: continue # 插件配置版本更新或为Version为None或不在存储配置内 if ( plugin_version is None or ( _data.get(matcher.plugin_name) and _data[matcher.plugin_name].keys() != plugin_configs.keys() ) or plugin_version > plugins_manager.get(matcher.plugin_name)["version"] or matcher.plugin_name not in _data.keys() ): for key in plugin_configs: if isinstance(plugin_configs[key], dict): Config.add_plugin_config( matcher.plugin_name, key, plugin_configs[key].get("value"), help_=plugin_configs[key].get("help"), default_value=plugin_configs[key].get("default_value"), _override=True, ) else: Config.add_plugin_config(matcher.plugin_name, key, plugin_configs[key]) else: plugin_configs = _data[matcher.plugin_name] for key in plugin_configs: Config.add_plugin_config( matcher.plugin_name, key, plugin_configs[key]["value"], help_=plugin_configs[key]["help"], default_value=plugin_configs[key]["default_value"], _override=True, ) if not Config.is_empty(): Config.save() _data = round_trip_load(open(plugins2config_file, encoding="utf8")) for plugin in _data.keys(): try: plugin_name = plugins_manager.get(plugin)["plugin_name"] except (AttributeError, TypeError): plugin_name = plugin _data[plugin].yaml_set_start_comment(plugin_name, indent=2) # 初始化未设置的管理员权限等级 for k, v in Config.get_admin_level_data(): admin_manager.set_admin_level(k, v) # 存完插件基本设置 with open(plugins2config_file, "w", encoding="utf8") as wf: round_trip_dump( _data, wf, indent=2, Dumper=yaml.RoundTripDumper, allow_unicode=True ) # 再开始读取用户配置 user_config_file = Path() / "configs" / "config.yaml" _data = {} _tmp_data = {} if user_config_file.exists(): with open(user_config_file, "r", encoding="utf8") as f: _data = _yaml.load(f) # 数据替换 for plugin in Config.keys(): _tmp_data[plugin] = {} for k in Config[plugin].keys(): if _data.get(plugin) and k in _data[plugin].keys(): Config.set_config(plugin, k, _data[plugin][k]) if level2module := Config.get_level2module(plugin, k): try: admin_manager.set_admin_level(level2module, _data[plugin][k]) except KeyError: logger.warning(f"{level2module} 设置权限等级失败:{_data[plugin][k]}") _tmp_data[plugin][k] = Config.get_config(plugin, k) Config.save() temp_file = Path() / "configs" / "temp_config.yaml" try: with open(temp_file, "w", encoding="utf8") as wf: yaml.dump( _tmp_data, wf, Dumper=yaml.RoundTripDumper, allow_unicode=True ) with open(temp_file, "r", encoding="utf8") as rf: _data = round_trip_load(rf) # 添加注释 for plugin in _data.keys(): rst = "" plugin_name = None try: plugin_data = Config.get(plugin) for x in list(Config.get(plugin).keys()): try: _x = plugin_data[x].get("name") if _x: plugin_name = _x except AttributeError: pass except (KeyError, AttributeError): plugin_name = None if not plugin_name: try: plugin_name = plugins_manager.get(plugin)["plugin_name"] except (AttributeError, TypeError): plugin_name = plugin plugin_name = ( plugin_name.replace("[Hidden]", "") .replace("[Superuser]", "") .replace("[Admin]", "") .strip() ) rst += plugin_name + "\n" for k in _data[plugin].keys(): rst += f'{k}: {Config[plugin][k]["help"]}' + "\n" _data[plugin].yaml_set_start_comment(rst[:-1], indent=2) with open(Path() / "configs" / "config.yaml", "w", encoding="utf8") as wf: round_trip_dump( _data, wf, Dumper=yaml.RoundTripDumper, allow_unicode=True ) except Exception as e: logger.error(f"生成简易配置注释错误 {type(e)}:{e}") if temp_file.exists(): temp_file.unlink()
zhenxun-bot
/zhenxun_bot-0.1.4.3-py3-none-any.whl/zhenxun_bot/basic_plugins/init_plugin_config/init_plugins_config.py
init_plugins_config.py
from services.db_context import db import time class BanUser(db.Model): __tablename__ = "ban_users" user_qq = db.Column(db.BigInteger(), nullable=False, primary_key=True) ban_level = db.Column(db.Integer(), nullable=False) ban_time = db.Column(db.BigInteger()) duration = db.Column(db.BigInteger()) _idx1 = db.Index("ban_group_users_idx1", "user_qq", unique=True) @classmethod async def check_ban_level(cls, user_qq: int, level: int) -> bool: """ 说明: 检测ban掉目标的用户与unban用户的权限等级大小 参数: :param user_qq: unban用户的qq号 :param level: ban掉目标用户的权限等级 """ user = await cls.query.where((cls.user_qq == user_qq)).gino.first() if not user: return False if user.ban_level > level: return True return False @classmethod async def check_ban_time(cls, user_qq: int) -> str: """ 说明: 检测用户被ban时长 参数: :param user_qq: qq号 """ query = cls.query.where((cls.user_qq == user_qq)) user = await query.gino.first() if not user: return "" if time.time() - (user.ban_time + user.duration) > 0 and user.duration != -1: return "" if user.duration == -1: return "∞" return time.time() - user.ban_time - user.duration @classmethod async def is_ban(cls, user_qq: int) -> bool: """ 说明: 判断用户是否被ban 参数: :param user_qq: qq号 """ if await cls.check_ban_time(user_qq): return True else: await cls.unban(user_qq) return False @classmethod async def is_super_ban(cls, user_qq: int) -> bool: """ 说明: 判断用户是否被ban 参数: :param user_qq: qq号 """ user = await cls.query.where((cls.user_qq == user_qq)).gino.first() if not user: return False if user.ban_level == 10: return True @classmethod async def ban(cls, user_qq: int, ban_level: int, duration: int) -> bool: """ 说明: ban掉目标用户 参数: :param user_qq: 目标用户qq号 :param ban_level: 使用ban命令用户的权限 :param duration: ban时长,秒 """ query = cls.query.where((cls.user_qq == user_qq)) query = query.with_for_update() user = await query.gino.first() if not await cls.check_ban_time(user_qq): await cls.unban(user_qq) user = None if user is None: await cls.create( user_qq=user_qq, ban_level=ban_level, ban_time=time.time(), duration=duration, ) return True else: return False @classmethod async def unban(cls, user_qq: int) -> bool: """ 说明: unban用户 参数: :param user_qq: qq号 """ query = cls.query.where((cls.user_qq == user_qq)) query = query.with_for_update() user = await query.gino.first() if user is None: return False else: await cls.delete.where((cls.user_qq == user_qq)).gino.status() return True
zhenxun-bot
/zhenxun_bot-0.1.4.3-py3-none-any.whl/zhenxun_bot/models/ban_user.py
ban_user.py
from datetime import datetime from configs.config import Config from services.db_context import db from typing import List, Optional class GroupInfoUser(db.Model): __tablename__ = "group_info_users" id = db.Column(db.Integer(), primary_key=True) user_qq = db.Column(db.BigInteger(), nullable=False) user_name = db.Column(db.Unicode(), nullable=False) group_id = db.Column(db.BigInteger(), nullable=False) user_join_time = db.Column(db.DateTime(), nullable=False) nickname = db.Column(db.Unicode()) uid = db.Column(db.BigInteger()) _idx1 = db.Index("info_group_users_idx1", "user_qq", "group_id", unique=True) @classmethod async def add_member_info( cls, user_qq: int, group_id: int, user_name: str, user_join_time: datetime, uid: Optional[int] = None, ) -> bool: """ 说明: 添加群内用户信息 参数: :param user_qq: qq号 :param group_id: 群号 :param user_name: 用户名称 :param user_join_time: 入群时间 :param uid: 用户唯一 id(自动生成) """ query = cls.query.where( (cls.user_qq == user_qq) & (cls.group_id == group_id) ) try: if not await query.gino.first(): await cls.create( user_qq=user_qq, user_name=user_name, group_id=group_id, user_join_time=user_join_time, uid=uid ) return True except Exception: return False @classmethod async def get_member_info( cls, user_qq: int, group_id: int ) -> "GroupInfoUser": """ 说明: 查询群员信息 参数: :param user_qq: qq号 :param group_id: 群号 """ query = cls.query.where( (cls.user_qq == user_qq) & (cls.group_id == group_id) ) return await query.gino.first() @classmethod async def delete_member_info(cls, user_qq: int, group_id: int) -> bool: """ 说明: 删除群员信息 参数: :param user_qq: qq号 :param group_id: 群号 """ query = cls.query.where( (cls.user_qq == user_qq) & (cls.group_id == group_id) ) query = query.with_for_update() user = await query.gino.first() try: if user is None: return True else: await cls.delete.where( (cls.user_qq == user_qq) & (cls.group_id == group_id) ).gino.status() return True except Exception: return False @classmethod async def get_group_member_id_list(cls, group_id: int) -> List[int]: """ 说明: 获取该群所有用户qq 参数: :param group_id: 群号 """ member_list = [] query = cls.query.where((cls.group_id == group_id)) for user in await query.gino.all(): member_list.append(user.user_qq) return member_list @classmethod async def set_group_member_nickname( cls, user_qq: int, group_id: int, nickname: str ) -> bool: """ 说明: 设置群员在该群内的昵称 参数: :param user_qq: qq号 :param group_id: 群号 :param nickname: 昵称 """ query = cls.query.where( (cls.user_qq == user_qq) & (cls.group_id == group_id) ) user = await query.with_for_update().gino.first() if user: await user.update(nickname=nickname).apply() return True return False @classmethod async def get_user_all_group(cls, user_qq: int) -> List[int]: """ 说明: 获取该用户所在的所有群聊 参数: :param user_qq: 用户qq """ query = await cls.query.where(cls.user_qq == user_qq).gino.all() if query: query = [x.group_id for x in query] return query @classmethod async def get_group_member_nickname(cls, user_qq: int, group_id: int) -> str: """ 说明: 获取用户在该群的昵称 参数: :param user_qq: qq号 :param group_id: 群号 """ query = cls.query.where( (cls.user_qq == user_qq) & (cls.group_id == group_id) ) user = await query.gino.first() if user: if user.nickname: _tmp = "" black_word = Config.get_config("nickname", "BLACK_WORD") if black_word: for x in user.nickname: _tmp += "*" if x in black_word else x return _tmp return "" @classmethod async def get_group_member_uid(cls, user_qq: int, group_id: int) -> Optional[str]: query = cls.query.where( (cls.user_qq == user_qq) & (cls.group_id == group_id) ) user = await query.gino.first() _max_uid = cls.query.where((cls.user_qq == 114514) & (cls.group_id == 114514)).with_for_update() _max_uid_user = await _max_uid.gino.first() _max_uid = _max_uid_user.uid if not user or not user.uid: all_user = await cls.query.where(cls.user_qq == user_qq).gino.all() for x in all_user: if x.uid: return x.uid else: if not user: await GroupInfoUser.add_member_info(user_qq, group_id, '', datetime.min) user = await cls.query.where( (cls.user_qq == user_qq) & (cls.group_id == group_id) ).gino.first() await user.update( uid=_max_uid + 1, ).apply() await _max_uid_user.update( uid=_max_uid + 1, ).apply() return user.uid if user and user.uid else None
zhenxun-bot
/zhenxun_bot-0.1.4.3-py3-none-any.whl/zhenxun_bot/models/group_member_info.py
group_member_info.py
from services.db_context import db from typing import Dict from typing import Optional, List from services.log import logger class BagUser(db.Model): __tablename__ = "bag_users" id = db.Column(db.Integer(), primary_key=True) user_qq = db.Column(db.BigInteger(), nullable=False) group_id = db.Column(db.BigInteger(), nullable=False) gold = db.Column(db.Integer(), default=100) props = db.Column(db.TEXT(), nullable=False, default="") # 旧道具字段(废弃) spend_total_gold = db.Column(db.Integer(), default=0) get_total_gold = db.Column(db.Integer(), default=0) get_today_gold = db.Column(db.Integer(), default=0) spend_today_gold = db.Column(db.Integer(), default=0) property = db.Column(db.JSON(), nullable=False, default={}) # 新道具字段 _idx1 = db.Index("bag_group_users_idx1", "user_qq", "group_id", unique=True) @classmethod async def get_user_total_gold(cls, user_qq: int, group_id: int) -> str: """ 说明: 获取金币概况 参数: :param user_qq: qq号 :param group_id: 所在群号 """ query = cls.query.where((cls.user_qq == user_qq) & (cls.group_id == group_id)) user = await query.gino.first() if not user: user = await cls.create( user_qq=user_qq, group_id=group_id, ) return ( f"当前金币:{user.gold}\n今日获取金币:{user.get_today_gold}\n今日花费金币:{user.spend_today_gold}" f"\n今日收益:{user.get_today_gold - user.spend_today_gold}" f"\n总赚取金币:{user.get_total_gold}\n总花费金币:{user.spend_total_gold}" ) @classmethod async def get_gold(cls, user_qq: int, group_id: int) -> int: """ 说明: 获取当前金币 参数: :param user_qq: qq号 :param group_id: 所在群号 """ query = cls.query.where((cls.user_qq == user_qq) & (cls.group_id == group_id)) user = await query.gino.first() if user: return user.gold else: await cls.create( user_qq=user_qq, group_id=group_id, ) return 100 @classmethod async def get_property(cls, user_qq: int, group_id: int) -> Dict[str, int]: """ 说明: 获取当前道具 参数: :param user_qq: qq号 :param group_id: 所在群号 """ query = cls.query.where((cls.user_qq == user_qq) & (cls.group_id == group_id)) user = await query.gino.first() if user: return user.property else: await cls.create( user_qq=user_qq, group_id=group_id, ) return {} @classmethod async def add_gold(cls, user_qq: int, group_id: int, num: int): """ 说明: 增加金币 参数: :param user_qq: qq号 :param group_id: 所在群号 :param num: 金币数量 """ query = cls.query.where((cls.user_qq == user_qq) & (cls.group_id == group_id)) query = query.with_for_update() user = await query.gino.first() if user: await user.update( gold=user.gold + num, get_total_gold=user.get_total_gold + num, get_today_gold=user.get_today_gold + num, ).apply() else: await cls.create( user_qq=user_qq, group_id=group_id, gold=100 + num, get_total_gold=num, get_today_gold=num, ) @classmethod async def spend_gold(cls, user_qq: int, group_id: int, num: int): """ 说明: 花费金币 参数: :param user_qq: qq号 :param group_id: 所在群号 :param num: 金币数量 """ query = cls.query.where((cls.user_qq == user_qq) & (cls.group_id == group_id)) query = query.with_for_update() user = await query.gino.first() if user: await user.update( gold=user.gold - num, spend_total_gold=user.spend_total_gold + num, spend_today_gold=user.spend_today_gold + num, ).apply() else: await cls.create( user_qq=user_qq, group_id=group_id, gold=100 - num, spend_total_gold=num, spend_today_gold=num, ) @classmethod async def add_property(cls, user_qq: int, group_id: int, name: str): """ 说明: 增加道具 参数: :param user_qq: qq号 :param group_id: 所在群号 :param name: 道具名称 """ query = cls.query.where((cls.user_qq == user_qq) & (cls.group_id == group_id)) query = query.with_for_update() user = await query.gino.first() if user: p = user.property if p.get(name) is None: p[name] = 1 else: p[name] += 1 await user.update(property=p).apply() else: await cls.create(user_qq=user_qq, group_id=group_id, property={name: 1}) @classmethod async def delete_property( cls, user_qq: int, group_id: int, name: str, num: int = 1 ) -> bool: """ 说明: 使用/删除 道具 参数: :param user_qq: qq号 :param group_id: 所在群号 :param name: 道具名称 :param num: 使用个数 """ query = cls.query.where((cls.user_qq == user_qq) & (cls.group_id == group_id)) query = query.with_for_update() user = await query.gino.first() if user: property_ = user.property if name in property_: if property_.get(name) == num: del property_[name] else: property_[name] -= num await user.update(property=property_).apply() return True return False @classmethod async def buy_property( cls, user_qq: int, group_id: int, goods: "GoodsInfo", goods_num: int ) -> bool: """ 说明: 购买道具 参数: :param user_qq: 用户qq :param group_id: 所在群聊 :param goods: 商品 :param goods_num: 商品数量 """ try: # 折扣后金币 spend_gold = goods.goods_discount * goods.goods_price * goods_num await BagUser.spend_gold(user_qq, group_id, spend_gold) for _ in range(goods_num): await BagUser.add_property(user_qq, group_id, goods.goods_name) return True except Exception as e: logger.error(f"buy_property 发生错误 {type(e)}:{e}") return False @classmethod async def get_all_users(cls, group_id: Optional[int] = None) -> List["BagUser"]: """ 说明: 获取所有用户数据 参数: :param group_id: 群号 """ if not group_id: query = await cls.query.gino.all() else: query = await cls.query.where((cls.group_id == group_id)).gino.all() return query
zhenxun-bot
/zhenxun_bot-0.1.4.3-py3-none-any.whl/zhenxun_bot/models/bag_user.py
bag_user.py
from datetime import datetime from typing import List from services.db_context import db class SignGroupUser(db.Model): __tablename__ = "sign_group_users" id = db.Column(db.Integer(), primary_key=True) user_qq = db.Column(db.BigInteger(), nullable=False) group_id = db.Column(db.BigInteger(), nullable=False) checkin_count = db.Column(db.Integer(), nullable=False) checkin_time_last = db.Column(db.DateTime(timezone=True), nullable=False) impression = db.Column(db.Numeric(scale=3, asdecimal=False), nullable=False) add_probability = db.Column( db.Numeric(scale=3, asdecimal=False), nullable=False, default=0 ) specify_probability = db.Column( db.Numeric(scale=3, asdecimal=False), nullable=False, default=0 ) _idx1 = db.Index("sign_group_users_idx1", "user_qq", "group_id", unique=True) @classmethod async def ensure( cls, user_qq: int, group_id: int, for_update: bool = False ) -> "SignGroupUser": """ 说明: 获取签到用户 参数: :param user_qq: 用户qq :param group_id: 所在群聊 :param for_update: 是否存在修改数据 """ query = cls.query.where( (cls.user_qq == user_qq) & (cls.group_id == group_id) ) if for_update: query = query.with_for_update() user = await query.gino.first() return user or await cls.create( user_qq=user_qq, group_id=group_id, checkin_count=0, checkin_time_last=datetime.min, # 从未签到过 impression=0, ) @classmethod async def get_user_all_data(cls, user_qq: int) -> List["SignGroupUser"]: """ 说明: 获取某用户所有数据 参数: :param user_qq: 用户qq """ query = cls.query.where(cls.user_qq == user_qq) query = query.with_for_update() return await query.gino.all() @classmethod async def sign(cls, user: "SignGroupUser", impression: float, checkin_time_last: datetime): """ 说明: 签到 说明: :param user: 用户 :param impression: 增加的好感度 :param checkin_time_last: 签到时间 """ await user.update( checkin_count=user.checkin_count + 1, checkin_time_last=checkin_time_last, impression=user.impression + impression, add_probability=0, specify_probability=0, ).apply() @classmethod async def get_all_impression(cls, group_id: int) -> "list, list, list": """ 说明: 获取该群所有用户 id 及对应 好感度 参数: :param group_id: 群号 """ impression_list = [] user_qq_list = [] user_group = [] if group_id: query = cls.query.where(cls.group_id == group_id) else: query = cls.query for user in await query.gino.all(): impression_list.append(user.impression) user_qq_list.append(user.user_qq) user_group.append(user.group_id) return user_qq_list, impression_list, user_group
zhenxun-bot
/zhenxun_bot-0.1.4.3-py3-none-any.whl/zhenxun_bot/models/sign_group_user.py
sign_group_user.py
from services.db_context import db from configs.config import Config class FriendUser(db.Model): __tablename__ = "friend_users" id = db.Column(db.Integer(), primary_key=True) user_id = db.Column(db.BigInteger(), nullable=False) user_name = db.Column(db.Unicode(), nullable=False, default="") nickname = db.Column(db.Unicode()) _idx1 = db.Index("friend_users_idx1", "user_id", unique=True) @classmethod async def get_user_name(cls, user_id: int) -> str: """ 说明: 获取好友用户名称 参数: :param user_id: qq号 """ query = cls.query.where(cls.user_id == user_id) user = await query.gino.first() if user: return user.user_name else: return "" @classmethod async def add_friend_info(cls, user_id: int, user_name: str) -> bool: """ 说明: 添加好友信息 参数: :param user_id: qq号 :param user_name: 用户名称 """ try: query = cls.query.where(cls.user_id == user_id) user = await query.with_for_update().gino.first() if not user: await cls.create( user_id=user_id, user_name=user_name, ) else: await user.update( user_name=user_name, ).apply() return True except Exception: return False @classmethod async def delete_friend_info(cls, user_id: int) -> bool: """ 说明: 删除好友信息 参数: :param user_id: qq号 """ try: query = cls.query.where(cls.user_id == user_id) user = await query.with_for_update().gino.first() if user: await user.delete() return True except Exception: return False @classmethod async def get_friend_nickname(cls, user_id: int) -> str: """ 说明: 获取用户昵称 参数: :param user_id: qq号 """ query = cls.query.where(cls.user_id == user_id) user = await query.gino.first() if user: if user.nickname: _tmp = "" black_word = Config.get_config("nickname", "BLACK_WORD") if black_word: for x in user.nickname: _tmp += "*" if x in black_word else x return _tmp return "" @classmethod async def set_friend_nickname(cls, user_id: int, nickname: str) -> bool: """ 说明: 设置用户昵称 参数: :param user_id: qq号 :param nickname: 昵称 """ try: query = cls.query.where(cls.user_id == user_id) user = await query.with_for_update().gino.first() if not user: await cls.create( user_id=user_id, nickname=nickname, ) else: await user.update( nickname=nickname, ).apply() return True except Exception: return False
zhenxun-bot
/zhenxun_bot-0.1.4.3-py3-none-any.whl/zhenxun_bot/models/friend_user.py
friend_user.py
from services.db_context import db class LevelUser(db.Model): __tablename__ = "level_users" id = db.Column(db.Integer(), primary_key=True) user_qq = db.Column(db.BigInteger(), nullable=False) group_id = db.Column(db.BigInteger(), nullable=False) user_level = db.Column(db.BigInteger(), nullable=False) group_flag = db.Column(db.Integer(), nullable=False, default=0) _idx1 = db.Index("level_group_users_idx1", "user_qq", "group_id", unique=True) @classmethod async def get_user_level(cls, user_qq: int, group_id: int) -> int: """ 说明: 获取用户在群内的等级 参数: :param user_qq: qq号 :param group_id: 群号 """ query = cls.query.where((cls.user_qq == user_qq) & (cls.group_id == group_id)) user = await query.gino.first() if user: return user.user_level else: return -1 @classmethod async def set_level( cls, user_qq: int, group_id: int, level: int, group_flag: int = 0 ) -> bool: """ 说明: 设置用户在群内的权限 参数: :param user_qq: qq号 :param group_id: 群号 :param level: 权限等级 :param group_flag: 是否被自动更新刷新权限 0:是,1:否 """ query = cls.query.where((cls.user_qq == user_qq) & (cls.group_id == group_id)) query = query.with_for_update() user = await query.gino.first() if user is None: await cls.create( user_qq=user_qq, group_id=group_id, user_level=level, group_flag=group_flag, ) return True else: await user.update(user_level=level, group_flag=group_flag).apply() return False @classmethod async def delete_level(cls, user_qq: int, group_id: int) -> bool: """ 说明: 删除用户权限 参数: :param user_qq: qq号 :param group_id: 群号 """ query = cls.query.where((cls.user_qq == user_qq) & (cls.group_id == group_id)) query = query.with_for_update() user = await query.gino.first() if user is None: return False else: await user.delete() return True @classmethod async def check_level(cls, user_qq: int, group_id: int, level: int) -> bool: """ 说明: 检查用户权限等级是否大于 level 参数: :param user_qq: qq号 :param group_id: 群号 :param level: 权限等级 """ if group_id != 0: query = cls.query.where( (cls.user_qq == user_qq) & (cls.group_id == group_id) ) user = await query.gino.first() if user is None: return False user_level = user.user_level else: query = cls.query.where(cls.user_qq == user_qq) highest_level = 0 for user in await query.gino.all(): if user.user_level > highest_level: highest_level = user.user_level user_level = highest_level if user_level >= level: return True else: return False @classmethod async def is_group_flag(cls, user_qq: int, group_id: int) -> bool: """ 说明: 检测是否会被自动更新刷新权限 参数: :param user_qq: qq号 :param group_id: 群号 """ user = await cls.query.where( (cls.user_qq == user_qq) & (cls.group_id == group_id) ).gino.first() if not user: return False if user.group_flag == 1: return True else: return False
zhenxun-bot
/zhenxun_bot-0.1.4.3-py3-none-any.whl/zhenxun_bot/models/level_user.py
level_user.py
from services.db_context import db from services.log import logger from typing import List, Optional class GroupInfo(db.Model): __tablename__ = "group_info" group_id = db.Column(db.BigInteger(), nullable=False, primary_key=True) group_name = db.Column(db.Unicode(), nullable=False, default="") max_member_count = db.Column(db.Integer(), nullable=False, default=0) member_count = db.Column(db.Integer(), nullable=False, default=0) group_flag = db.Column(db.Integer(), nullable=False, default=0) _idx1 = db.Index("group_info_idx1", "group_id", unique=True) @classmethod async def get_group_info(cls, group_id: int) -> "GroupInfo": """ 说明: 获取群信息 参数: :param group_id: 群号 """ query = cls.query.where(cls.group_id == group_id) return await query.gino.first() @classmethod async def add_group_info( cls, group_id: int, group_name: str, max_member_count: int, member_count: int, group_flag: Optional[int] = None, ) -> bool: """ 说明: 添加群信息 参数: :param group_id: 群号 :param group_name: 群名称 :param max_member_count: 群员最大数量 :param member_count: 群员数量 :param group_flag: 群认证,0为未认证,1为认证 """ try: group = ( await cls.query.where(cls.group_id == group_id) .with_for_update() .gino.first() ) if group: await group.update( group_name=group_name, max_member_count=max_member_count, member_count=member_count, ).apply() if group_flag is not None: await group.update(group_flag=group_flag).apply() else: await cls.create( group_id=group_id, group_name=group_name, max_member_count=max_member_count, member_count=member_count, group_flag=group_flag, ) return True except Exception as e: logger.info(f"GroupInfo 调用 add_group_info 发生错误 {type(e)}:{e}") return False @classmethod async def delete_group_info(cls, group_id: int): """ 说明: 删除群信息 参数: :param group_id: 群号 """ await cls.delete.where(cls.group_id == group_id).gino.status() @classmethod async def get_all_group(cls) -> List["GroupInfo"]: """ 说明: 获取所有群对象 """ query = await cls.query.gino.all() return query @classmethod async def set_group_flag(cls, group_id: int, group_flag: int) -> bool: """ 设置群认证 :param group_id: 群号 :param group_flag: 群认证,0为未认证,1为认证 """ group = ( await cls.query.where(cls.group_id == group_id) .with_for_update() .gino.first() ) if group: if group.group_flag != group_flag: await group.update( group_flag=group_flag, ).apply() return True return False
zhenxun-bot
/zhenxun_bot-0.1.4.3-py3-none-any.whl/zhenxun_bot/models/group_info.py
group_info.py
from services.db_context import db from typing import Optional, List from services.log import logger class GoodsInfo(db.Model): __tablename__ = "goods_info" id = db.Column(db.Integer(), primary_key=True) goods_name = db.Column(db.TEXT(), nullable=False) # 名称 goods_price = db.Column(db.Integer(), nullable=False) # 价格 goods_description = db.Column(db.TEXT(), nullable=False) # 商品描述 goods_discount = db.Column(db.Numeric(scale=3, asdecimal=False), default=1) # 打折 goods_limit_time = db.Column(db.BigInteger(), default=0) # 限时 _idx1 = db.Index("goods_group_users_idx1", "goods_name", unique=True) @classmethod async def add_goods( cls, goods_name: str, goods_price: int, goods_description: str, goods_discount: float = 1, goods_limit_time: int = 0, ) -> bool: """ 说明: 添加商品 参数: :param goods_name: 商品名称 :param goods_price: 商品价格 :param goods_description: 商品简介 :param goods_discount: 商品折扣 :param goods_limit_time: 商品限时 """ try: if not await cls.get_goods_info(goods_name): await cls.create( goods_name=goods_name, goods_price=goods_price, goods_description=goods_description, goods_discount=goods_discount, goods_limit_time=goods_limit_time, ) return True except Exception as e: logger.error(f"GoodsInfo add_goods 发生错误 {type(e)}:{e}") return False @classmethod async def delete_goods(cls, goods_name: str) -> bool: """ 说明: 删除商品 参数: :param goods_name: 商品名称 """ query = ( await cls.query.where(cls.goods_name == goods_name) .with_for_update() .gino.first() ) if not query: return False await query.delete() return True @classmethod async def update_goods( cls, goods_name: str, goods_price: Optional[int] = None, goods_description: Optional[str] = None, goods_discount: Optional[float] = None, goods_limit_time: Optional[int] = None, ) -> bool: """ 说明: 更新商品信息 参数: :param goods_name: 商品名称 :param goods_price: 商品价格 :param goods_description: 商品简介 :param goods_discount: 商品折扣 :param goods_limit_time: 商品限时时间 """ try: query = ( await cls.query.where(cls.goods_name == goods_name) .with_for_update() .gino.first() ) if not query: return False if goods_price: await query.update(goods_price=goods_price).apply() if goods_description: await query.update(goods_description=goods_description).apply() if goods_discount: await query.update(goods_discount=goods_discount).apply() if goods_limit_time: await query.update(goods_limit_time=goods_limit_time).apply() return True except Exception as e: logger.error(f"GoodsInfo update_goods 发生错误 {type(e)}:{e}") return False @classmethod async def get_goods_info(cls, goods_name: str) -> "GoodsInfo": """ 说明: 获取商品对象 参数: :param goods_name: 商品名称 """ query = await cls.query.where(cls.goods_name == goods_name).gino.first() return query @classmethod async def get_all_goods(cls) -> List["GoodsInfo"]: """ 说明: 获得全部有序商品对象 """ query = await cls.query.gino.all() id_lst = [x.id for x in query] goods_lst = [] for _ in range(len(query)): min_id = min(id_lst) goods_lst.append([x for x in query if x.id == min_id][0]) id_lst.remove(min_id) return goods_lst
zhenxun-bot
/zhenxun_bot-0.1.4.3-py3-none-any.whl/zhenxun_bot/models/goods_info.py
goods_info.py
zh2Hant = { '呆': '獃', "打印机": "印表機", '帮助文件': '說明檔案', "画": "畫", "龙": "竜", "板": "板", "表": "表", "才": "才", "丑": "醜", "出": "出", "淀": "澱", "冬": "冬", "范": "範", "丰": "豐", "刮": "刮", "后": "後", "胡": "胡", "回": "回", "伙": "夥", "姜": "薑", "借": "借", "克": "克", "困": "困", "漓": "漓", "里": "里", "帘": "簾", "霉": "霉", "面": "面", "蔑": "蔑", "千": "千", "秋": "秋", "松": "松", "咸": "咸", "向": "向", "余": "餘", "郁": "鬱", "御": "御", "愿": "願", "云": "雲", "芸": "芸", "沄": "沄", "致": "致", "制": "制", "朱": "朱", "筑": "築", "准": "準", "厂": "廠", "广": "廣", "辟": "闢", "别": "別", "卜": "卜", "沈": "沈", "冲": "沖", "种": "種", "虫": "蟲", "担": "擔", "党": "黨", "斗": "鬥", "儿": "兒", "干": "乾", "谷": "谷", "柜": "櫃", "合": "合", "划": "劃", "坏": "壞", "几": "幾", "系": "系", "家": "家", "价": "價", "据": "據", "卷": "捲", "适": "適", "蜡": "蠟", "腊": "臘", "了": "了", "累": "累", "么": "麽", "蒙": "蒙", "万": "萬", "宁": "寧", "朴": "樸", "苹": "蘋", "仆": "僕", "曲": "曲", "确": "確", "舍": "舍", "胜": "勝", "术": "術", "台": "台", "体": "體", "涂": "塗", "叶": "葉", "吁": "吁", "旋": "旋", "佣": "傭", "与": "與", "折": "折", "征": "徵", "症": "症", "恶": "惡", "发": "發", "复": "復", "汇": "匯", "获": "獲", "饥": "飢", "尽": "盡", "历": "歷", "卤": "滷", "弥": "彌", "签": "簽", "纤": "纖", "苏": "蘇", "坛": "壇", "团": "團", "须": "須", "脏": "臟", "只": "只", "钟": "鐘", "药": "藥", "同": "同", "志": "志", "杯": "杯", "岳": "岳", "布": "布", "当": "當", "吊": "弔", "仇": "仇", "蕴": "蘊", "线": "線", "为": "為", "产": "產", "众": "眾", "伪": "偽", "凫": "鳧", "厕": "廁", "启": "啟", "墙": "牆", "壳": "殼", "奖": "獎", "妫": "媯", "并": "並", "录": "錄", "悫": "愨", "极": "極", "沩": "溈", "瘘": "瘺", "硷": "鹼", "竖": "豎", "绝": "絕", "绣": "繡", "绦": "絛", "绱": "緔", "绷": "綳", "绿": "綠", "缰": "韁", "苧": "苎", "莼": "蒓", "说": "說", "谣": "謠", "谫": "譾", "赃": "贓", "赍": "齎", "赝": "贗", "酝": "醞", "采": "採", "钩": "鉤", "钵": "缽", "锈": "銹", "锐": "銳", "锨": "杴", "镌": "鐫", "镢": "钁", "阅": "閱", "颓": "頹", "颜": "顏", "骂": "罵", "鲇": "鯰", "鲞": "鯗", "鳄": "鱷", "鸡": "雞", "鹚": "鶿", "荡": "盪", "锤": "錘", "㟆": "㠏", "㛟": "𡞵", "专": "專", "业": "業", "丛": "叢", "东": "東", "丝": "絲", "丢": "丟", "两": "兩", "严": "嚴", "丧": "喪", "个": "個", "临": "臨", "丽": "麗", "举": "舉", "义": "義", "乌": "烏", "乐": "樂", "乔": "喬", "习": "習", "乡": "鄉", "书": "書", "买": "買", "乱": "亂", "争": "爭", "于": "於", "亏": "虧", "亚": "亞", "亩": "畝", "亲": "親", "亵": "褻", "亸": "嚲", "亿": "億", "仅": "僅", "从": "從", "仑": "侖", "仓": "倉", "仪": "儀", "们": "們", "优": "優", "会": "會", "伛": "傴", "伞": "傘", "伟": "偉", "传": "傳", "伣": "俔", "伤": "傷", "伥": "倀", "伦": "倫", "伧": "傖", "伫": "佇", "佥": "僉", "侠": "俠", "侣": "侶", "侥": "僥", "侦": "偵", "侧": "側", "侨": "僑", "侩": "儈", "侪": "儕", "侬": "儂", "俣": "俁", "俦": "儔", "俨": "儼", "俩": "倆", "俪": "儷", "俫": "倈", "俭": "儉", "债": "債", "倾": "傾", "偬": "傯", "偻": "僂", "偾": "僨", "偿": "償", "傥": "儻", "傧": "儐", "储": "儲", "傩": "儺", "㑩": "儸", "兑": "兌", "兖": "兗", "兰": "蘭", "关": "關", "兴": "興", "兹": "茲", "养": "養", "兽": "獸", "冁": "囅", "内": "內", "冈": "岡", "册": "冊", "写": "寫", "军": "軍", "农": "農", "冯": "馮", "决": "決", "况": "況", "冻": "凍", "净": "凈", "凉": "涼", "减": "減", "凑": "湊", "凛": "凜", "凤": "鳳", "凭": "憑", "凯": "凱", "击": "擊", "凿": "鑿", "刍": "芻", "刘": "劉", "则": "則", "刚": "剛", "创": "創", "删": "刪", "刬": "剗", "刭": "剄", "刹": "剎", "刽": "劊", "刿": "劌", "剀": "剴", "剂": "劑", "剐": "剮", "剑": "劍", "剥": "剝", "剧": "劇", "㓥": "劏", "㔉": "劚", "劝": "勸", "办": "辦", "务": "務", "劢": "勱", "动": "動", "励": "勵", "劲": "勁", "劳": "勞", "势": "勢", "勋": "勛", "勚": "勩", "匀": "勻", "匦": "匭", "匮": "匱", "区": "區", "医": "醫", "华": "華", "协": "協", "单": "單", "卖": "賣", "卢": "盧", "卫": "衛", "却": "卻", "厅": "廳", "厉": "厲", "压": "壓", "厌": "厭", "厍": "厙", "厐": "龎", "厘": "釐", "厢": "廂", "厣": "厴", "厦": "廈", "厨": "廚", "厩": "廄", "厮": "廝", "县": "縣", "叁": "叄", "参": "參", "双": "雙", "变": "變", "叙": "敘", "叠": "疊", "号": "號", "叹": "嘆", "叽": "嘰", "吓": "嚇", "吕": "呂", "吗": "嗎", "吣": "唚", "吨": "噸", "听": "聽", "吴": "吳", "呐": "吶", "呒": "嘸", "呓": "囈", "呕": "嘔", "呖": "嚦", "呗": "唄", "员": "員", "呙": "咼", "呛": "嗆", "呜": "嗚", "咏": "詠", "咙": "嚨", "咛": "嚀", "咝": "噝", "响": "響", "哑": "啞", "哒": "噠", "哓": "嘵", "哔": "嗶", "哕": "噦", "哗": "嘩", "哙": "噲", "哜": "嚌", "哝": "噥", "哟": "喲", "唛": "嘜", "唝": "嗊", "唠": "嘮", "唡": "啢", "唢": "嗩", "唤": "喚", "啧": "嘖", "啬": "嗇", "啭": "囀", "啮": "嚙", "啴": "嘽", "啸": "嘯", "㖞": "喎", "喷": "噴", "喽": "嘍", "喾": "嚳", "嗫": "囁", "嗳": "噯", "嘘": "噓", "嘤": "嚶", "嘱": "囑", "㖊": "噚", "噜": "嚕", "嚣": "囂", "园": "園", "囱": "囪", "围": "圍", "囵": "圇", "国": "國", "图": "圖", "圆": "圓", "圣": "聖", "圹": "壙", "场": "場", "坂": "阪", "块": "塊", "坚": "堅", "坜": "壢", "坝": "壩", "坞": "塢", "坟": "墳", "坠": "墜", "垄": "壟", "垅": "壠", "垆": "壚", "垒": "壘", "垦": "墾", "垩": "堊", "垫": "墊", "垭": "埡", "垱": "壋", "垲": "塏", "垴": "堖", "埘": "塒", "埙": "塤", "埚": "堝", "埯": "垵", "堑": "塹", "堕": "墮", "𡒄": "壈", "壮": "壯", "声": "聲", "壶": "壺", "壸": "壼", "处": "處", "备": "備", "够": "夠", "头": "頭", "夸": "誇", "夹": "夾", "夺": "奪", "奁": "奩", "奂": "奐", "奋": "奮", "奥": "奧", "奸": "姦", "妆": "妝", "妇": "婦", "妈": "媽", "妩": "嫵", "妪": "嫗", "姗": "姍", "姹": "奼", "娄": "婁", "娅": "婭", "娆": "嬈", "娇": "嬌", "娈": "孌", "娱": "娛", "娲": "媧", "娴": "嫻", "婳": "嫿", "婴": "嬰", "婵": "嬋", "婶": "嬸", "媪": "媼", "嫒": "嬡", "嫔": "嬪", "嫱": "嬙", "嬷": "嬤", "孙": "孫", "学": "學", "孪": "孿", "宝": "寶", "实": "實", "宠": "寵", "审": "審", "宪": "憲", "宫": "宮", "宽": "寬", "宾": "賓", "寝": "寢", "对": "對", "寻": "尋", "导": "導", "寿": "壽", "将": "將", "尔": "爾", "尘": "塵", "尝": "嘗", "尧": "堯", "尴": "尷", "尸": "屍", "层": "層", "屃": "屓", "屉": "屜", "届": "屆", "属": "屬", "屡": "屢", "屦": "屨", "屿": "嶼", "岁": "歲", "岂": "豈", "岖": "嶇", "岗": "崗", "岘": "峴", "岙": "嶴", "岚": "嵐", "岛": "島", "岭": "嶺", "岽": "崬", "岿": "巋", "峄": "嶧", "峡": "峽", "峣": "嶢", "峤": "嶠", "峥": "崢", "峦": "巒", "崂": "嶗", "崃": "崍", "崄": "嶮", "崭": "嶄", "嵘": "嶸", "嵚": "嶔", "嵝": "嶁", "巅": "巔", "巩": "鞏", "巯": "巰", "币": "幣", "帅": "帥", "师": "師", "帏": "幃", "帐": "帳", "帜": "幟", "带": "帶", "帧": "幀", "帮": "幫", "帱": "幬", "帻": "幘", "帼": "幗", "幂": "冪", "庄": "莊", "庆": "慶", "庐": "廬", "庑": "廡", "库": "庫", "应": "應", "庙": "廟", "庞": "龐", "废": "廢", "廪": "廩", "开": "開", "异": "異", "弃": "棄", "弑": "弒", "张": "張", "弪": "弳", "弯": "彎", "弹": "彈", "强": "強", "归": "歸", "彝": "彞", "彦": "彥", "彻": "徹", "径": "徑", "徕": "徠", "忆": "憶", "忏": "懺", "忧": "憂", "忾": "愾", "怀": "懷", "态": "態", "怂": "慫", "怃": "憮", "怄": "慪", "怅": "悵", "怆": "愴", "怜": "憐", "总": "總", "怼": "懟", "怿": "懌", "恋": "戀", "恒": "恆", "恳": "懇", "恸": "慟", "恹": "懨", "恺": "愷", "恻": "惻", "恼": "惱", "恽": "惲", "悦": "悅", "悬": "懸", "悭": "慳", "悮": "悞", "悯": "憫", "惊": "驚", "惧": "懼", "惨": "慘", "惩": "懲", "惫": "憊", "惬": "愜", "惭": "慚", "惮": "憚", "惯": "慣", "愠": "慍", "愤": "憤", "愦": "憒", "慑": "懾", "懑": "懣", "懒": "懶", "懔": "懍", "戆": "戇", "戋": "戔", "戏": "戲", "戗": "戧", "战": "戰", "戬": "戩", "戯": "戱", "户": "戶", "扑": "撲", "执": "執", "扩": "擴", "扪": "捫", "扫": "掃", "扬": "揚", "扰": "擾", "抚": "撫", "抛": "拋", "抟": "摶", "抠": "摳", "抡": "掄", "抢": "搶", "护": "護", "报": "報", "拟": "擬", "拢": "攏", "拣": "揀", "拥": "擁", "拦": "攔", "拧": "擰", "拨": "撥", "择": "擇", "挂": "掛", "挚": "摯", "挛": "攣", "挜": "掗", "挝": "撾", "挞": "撻", "挟": "挾", "挠": "撓", "挡": "擋", "挢": "撟", "挣": "掙", "挤": "擠", "挥": "揮", "挦": "撏", "挽": "輓", "捝": "挩", "捞": "撈", "损": "損", "捡": "撿", "换": "換", "捣": "搗", "掳": "擄", "掴": "摑", "掷": "擲", "掸": "撣", "掺": "摻", "掼": "摜", "揽": "攬", "揾": "搵", "揿": "撳", "搀": "攙", "搁": "擱", "搂": "摟", "搅": "攪", "携": "攜", "摄": "攝", "摅": "攄", "摆": "擺", "摇": "搖", "摈": "擯", "摊": "攤", "撄": "攖", "撑": "撐", "㧑": "撝", "撵": "攆", "撷": "擷", "撸": "擼", "撺": "攛", "㧟": "擓", "擞": "擻", "攒": "攢", "敌": "敵", "敛": "斂", "数": "數", "斋": "齋", "斓": "斕", "斩": "斬", "断": "斷", "无": "無", "旧": "舊", "时": "時", "旷": "曠", "旸": "暘", "昙": "曇", "昼": "晝", "昽": "曨", "显": "顯", "晋": "晉", "晒": "曬", "晓": "曉", "晔": "曄", "晕": "暈", "晖": "暉", "暂": "暫", "暧": "曖", "机": "機", "杀": "殺", "杂": "雜", "权": "權", "杆": "桿", "条": "條", "来": "來", "杨": "楊", "杩": "榪", "杰": "傑", "构": "構", "枞": "樅", "枢": "樞", "枣": "棗", "枥": "櫪", "枧": "梘", "枨": "棖", "枪": "槍", "枫": "楓", "枭": "梟", "柠": "檸", "柽": "檉", "栀": "梔", "栅": "柵", "标": "標", "栈": "棧", "栉": "櫛", "栊": "櫳", "栋": "棟", "栌": "櫨", "栎": "櫟", "栏": "欄", "树": "樹", "栖": "棲", "栗": "慄", "样": "樣", "栾": "欒", "桠": "椏", "桡": "橈", "桢": "楨", "档": "檔", "桤": "榿", "桥": "橋", "桦": "樺", "桧": "檜", "桨": "槳", "桩": "樁", "梦": "夢", "梼": "檮", "梾": "棶", "梿": "槤", "检": "檢", "棁": "梲", "棂": "欞", "椁": "槨", "椟": "櫝", "椠": "槧", "椤": "欏", "椭": "橢", "楼": "樓", "榄": "欖", "榅": "榲", "榇": "櫬", "榈": "櫚", "榉": "櫸", "槚": "檟", "槛": "檻", "槟": "檳", "槠": "櫧", "横": "橫", "樯": "檣", "樱": "櫻", "橥": "櫫", "橱": "櫥", "橹": "櫓", "橼": "櫞", "檩": "檁", "欢": "歡", "欤": "歟", "欧": "歐", "歼": "殲", "殁": "歿", "殇": "殤", "残": "殘", "殒": "殞", "殓": "殮", "殚": "殫", "殡": "殯", "㱮": "殨", "㱩": "殰", "殴": "毆", "毁": "毀", "毂": "轂", "毕": "畢", "毙": "斃", "毡": "氈", "毵": "毿", "氇": "氌", "气": "氣", "氢": "氫", "氩": "氬", "氲": "氳", "汉": "漢", "汤": "湯", "汹": "洶", "沟": "溝", "没": "沒", "沣": "灃", "沤": "漚", "沥": "瀝", "沦": "淪", "沧": "滄", "沪": "滬", "泞": "濘", "注": "註", "泪": "淚", "泶": "澩", "泷": "瀧", "泸": "瀘", "泺": "濼", "泻": "瀉", "泼": "潑", "泽": "澤", "泾": "涇", "洁": "潔", "洒": "灑", "洼": "窪", "浃": "浹", "浅": "淺", "浆": "漿", "浇": "澆", "浈": "湞", "浊": "濁", "测": "測", "浍": "澮", "济": "濟", "浏": "瀏", "浐": "滻", "浑": "渾", "浒": "滸", "浓": "濃", "浔": "潯", "涛": "濤", "涝": "澇", "涞": "淶", "涟": "漣", "涠": "潿", "涡": "渦", "涣": "渙", "涤": "滌", "润": "潤", "涧": "澗", "涨": "漲", "涩": "澀", "渊": "淵", "渌": "淥", "渍": "漬", "渎": "瀆", "渐": "漸", "渑": "澠", "渔": "漁", "渖": "瀋", "渗": "滲", "温": "溫", "湾": "灣", "湿": "濕", "溃": "潰", "溅": "濺", "溆": "漵", "滗": "潷", "滚": "滾", "滞": "滯", "滟": "灧", "滠": "灄", "满": "滿", "滢": "瀅", "滤": "濾", "滥": "濫", "滦": "灤", "滨": "濱", "滩": "灘", "滪": "澦", "漤": "灠", "潆": "瀠", "潇": "瀟", "潋": "瀲", "潍": "濰", "潜": "潛", "潴": "瀦", "澜": "瀾", "濑": "瀨", "濒": "瀕", "㲿": "瀇", "灏": "灝", "灭": "滅", "灯": "燈", "灵": "靈", "灶": "竈", "灾": "災", "灿": "燦", "炀": "煬", "炉": "爐", "炖": "燉", "炜": "煒", "炝": "熗", "点": "點", "炼": "煉", "炽": "熾", "烁": "爍", "烂": "爛", "烃": "烴", "烛": "燭", "烟": "煙", "烦": "煩", "烧": "燒", "烨": "燁", "烩": "燴", "烫": "燙", "烬": "燼", "热": "熱", "焕": "煥", "焖": "燜", "焘": "燾", "㶽": "煱", "煴": "熅", "㶶": "燶", "爱": "愛", "爷": "爺", "牍": "牘", "牦": "氂", "牵": "牽", "牺": "犧", "犊": "犢", "状": "狀", "犷": "獷", "犸": "獁", "犹": "猶", "狈": "狽", "狝": "獮", "狞": "獰", "独": "獨", "狭": "狹", "狮": "獅", "狯": "獪", "狰": "猙", "狱": "獄", "狲": "猻", "猃": "獫", "猎": "獵", "猕": "獼", "猡": "玀", "猪": "豬", "猫": "貓", "猬": "蝟", "献": "獻", "獭": "獺", "㺍": "獱", "玑": "璣", "玚": "瑒", "玛": "瑪", "玮": "瑋", "环": "環", "现": "現", "玱": "瑲", "玺": "璽", "珐": "琺", "珑": "瓏", "珰": "璫", "珲": "琿", "琏": "璉", "琐": "瑣", "琼": "瓊", "瑶": "瑤", "瑷": "璦", "璎": "瓔", "瓒": "瓚", "瓯": "甌", "电": "電", "画": "畫", "畅": "暢", "畴": "疇", "疖": "癤", "疗": "療", "疟": "瘧", "疠": "癘", "疡": "瘍", "疬": "癧", "疭": "瘲", "疮": "瘡", "疯": "瘋", "疱": "皰", "疴": "痾", "痈": "癰", "痉": "痙", "痒": "癢", "痖": "瘂", "痨": "癆", "痪": "瘓", "痫": "癇", "瘅": "癉", "瘆": "瘮", "瘗": "瘞", "瘪": "癟", "瘫": "癱", "瘾": "癮", "瘿": "癭", "癞": "癩", "癣": "癬", "癫": "癲", "皑": "皚", "皱": "皺", "皲": "皸", "盏": "盞", "盐": "鹽", "监": "監", "盖": "蓋", "盗": "盜", "盘": "盤", "眍": "瞘", "眦": "眥", "眬": "矓", "睁": "睜", "睐": "睞", "睑": "瞼", "瞆": "瞶", "瞒": "瞞", "䁖": "瞜", "瞩": "矚", "矫": "矯", "矶": "磯", "矾": "礬", "矿": "礦", "砀": "碭", "码": "碼", "砖": "磚", "砗": "硨", "砚": "硯", "砜": "碸", "砺": "礪", "砻": "礱", "砾": "礫", "础": "礎", "硁": "硜", "硕": "碩", "硖": "硤", "硗": "磽", "硙": "磑", "碍": "礙", "碛": "磧", "碜": "磣", "碱": "鹼", "礼": "禮", "祃": "禡", "祎": "禕", "祢": "禰", "祯": "禎", "祷": "禱", "祸": "禍", "禀": "稟", "禄": "祿", "禅": "禪", "离": "離", "秃": "禿", "秆": "稈", "积": "積", "称": "稱", "秽": "穢", "秾": "穠", "稆": "穭", "税": "稅", "䅉": "稏", "稣": "穌", "稳": "穩", "穑": "穡", "穷": "窮", "窃": "竊", "窍": "竅", "窎": "窵", "窑": "窯", "窜": "竄", "窝": "窩", "窥": "窺", "窦": "竇", "窭": "窶", "竞": "競", "笃": "篤", "笋": "筍", "笔": "筆", "笕": "筧", "笺": "箋", "笼": "籠", "笾": "籩", "筚": "篳", "筛": "篩", "筜": "簹", "筝": "箏", "䇲": "筴", "筹": "籌", "筼": "篔", "简": "簡", "箓": "籙", "箦": "簀", "箧": "篋", "箨": "籜", "箩": "籮", "箪": "簞", "箫": "簫", "篑": "簣", "篓": "簍", "篮": "籃", "篱": "籬", "簖": "籪", "籁": "籟", "籴": "糴", "类": "類", "籼": "秈", "粜": "糶", "粝": "糲", "粤": "粵", "粪": "糞", "粮": "糧", "糁": "糝", "糇": "餱", "紧": "緊", "䌷": "紬", "䌹": "絅", "絷": "縶", "䌼": "綐", "䌽": "綵", "䌸": "縳", "䍁": "繸", "䍀": "繿", "纟": "糹", "纠": "糾", "纡": "紆", "红": "紅", "纣": "紂", "纥": "紇", "约": "約", "级": "級", "纨": "紈", "纩": "纊", "纪": "紀", "纫": "紉", "纬": "緯", "纭": "紜", "纮": "紘", "纯": "純", "纰": "紕", "纱": "紗", "纲": "綱", "纳": "納", "纴": "紝", "纵": "縱", "纶": "綸", "纷": "紛", "纸": "紙", "纹": "紋", "纺": "紡", "纻": "紵", "纼": "紖", "纽": "紐", "纾": "紓", "绀": "紺", "绁": "紲", "绂": "紱", "练": "練", "组": "組", "绅": "紳", "细": "細", "织": "織", "终": "終", "绉": "縐", "绊": "絆", "绋": "紼", "绌": "絀", "绍": "紹", "绎": "繹", "经": "經", "绐": "紿", "绑": "綁", "绒": "絨", "结": "結", "绔": "絝", "绕": "繞", "绖": "絰", "绗": "絎", "绘": "繪", "给": "給", "绚": "絢", "绛": "絳", "络": "絡", "绞": "絞", "统": "統", "绠": "綆", "绡": "綃", "绢": "絹", "绤": "綌", "绥": "綏", "继": "繼", "绨": "綈", "绩": "績", "绪": "緒", "绫": "綾", "绬": "緓", "续": "續", "绮": "綺", "绯": "緋", "绰": "綽", "绲": "緄", "绳": "繩", "维": "維", "绵": "綿", "绶": "綬", "绸": "綢", "绹": "綯", "绺": "綹", "绻": "綣", "综": "綜", "绽": "綻", "绾": "綰", "缀": "綴", "缁": "緇", "缂": "緙", "缃": "緗", "缄": "緘", "缅": "緬", "缆": "纜", "缇": "緹", "缈": "緲", "缉": "緝", "缊": "縕", "缋": "繢", "缌": "緦", "缍": "綞", "缎": "緞", "缏": "緶", "缑": "緱", "缒": "縋", "缓": "緩", "缔": "締", "缕": "縷", "编": "編", "缗": "緡", "缘": "緣", "缙": "縉", "缚": "縛", "缛": "縟", "缜": "縝", "缝": "縫", "缞": "縗", "缟": "縞", "缠": "纏", "缡": "縭", "缢": "縊", "缣": "縑", "缤": "繽", "缥": "縹", "缦": "縵", "缧": "縲", "缨": "纓", "缩": "縮", "缪": "繆", "缫": "繅", "缬": "纈", "缭": "繚", "缮": "繕", "缯": "繒", "缱": "繾", "缲": "繰", "缳": "繯", "缴": "繳", "缵": "纘", "罂": "罌", "网": "網", "罗": "羅", "罚": "罰", "罢": "罷", "罴": "羆", "羁": "羈", "羟": "羥", "翘": "翹", "耢": "耮", "耧": "耬", "耸": "聳", "耻": "恥", "聂": "聶", "聋": "聾", "职": "職", "聍": "聹", "联": "聯", "聩": "聵", "聪": "聰", "肃": "肅", "肠": "腸", "肤": "膚", "肮": "骯", "肴": "餚", "肾": "腎", "肿": "腫", "胀": "脹", "胁": "脅", "胆": "膽", "胧": "朧", "胨": "腖", "胪": "臚", "胫": "脛", "胶": "膠", "脉": "脈", "脍": "膾", "脐": "臍", "脑": "腦", "脓": "膿", "脔": "臠", "脚": "腳", "脱": "脫", "脶": "腡", "脸": "臉", "腭": "齶", "腻": "膩", "腼": "靦", "腽": "膃", "腾": "騰", "膑": "臏", "臜": "臢", "舆": "輿", "舣": "艤", "舰": "艦", "舱": "艙", "舻": "艫", "艰": "艱", "艳": "艷", "艺": "藝", "节": "節", "芈": "羋", "芗": "薌", "芜": "蕪", "芦": "蘆", "苁": "蓯", "苇": "葦", "苈": "藶", "苋": "莧", "苌": "萇", "苍": "蒼", "苎": "苧", "茎": "莖", "茏": "蘢", "茑": "蔦", "茔": "塋", "茕": "煢", "茧": "繭", "荆": "荊", "荐": "薦", "荙": "薘", "荚": "莢", "荛": "蕘", "荜": "蓽", "荞": "蕎", "荟": "薈", "荠": "薺", "荣": "榮", "荤": "葷", "荥": "滎", "荦": "犖", "荧": "熒", "荨": "蕁", "荩": "藎", "荪": "蓀", "荫": "蔭", "荬": "蕒", "荭": "葒", "荮": "葤", "莅": "蒞", "莱": "萊", "莲": "蓮", "莳": "蒔", "莴": "萵", "莶": "薟", "莸": "蕕", "莹": "瑩", "莺": "鶯", "萝": "蘿", "萤": "螢", "营": "營", "萦": "縈", "萧": "蕭", "萨": "薩", "葱": "蔥", "蒇": "蕆", "蒉": "蕢", "蒋": "蔣", "蒌": "蔞", "蓝": "藍", "蓟": "薊", "蓠": "蘺", "蓣": "蕷", "蓥": "鎣", "蓦": "驀", "蔂": "虆", "蔷": "薔", "蔹": "蘞", "蔺": "藺", "蔼": "藹", "蕰": "薀", "蕲": "蘄", "薮": "藪", "䓕": "薳", "藓": "蘚", "蘖": "櫱", "虏": "虜", "虑": "慮", "虚": "虛", "虬": "虯", "虮": "蟣", "虽": "雖", "虾": "蝦", "虿": "蠆", "蚀": "蝕", "蚁": "蟻", "蚂": "螞", "蚕": "蠶", "蚬": "蜆", "蛊": "蠱", "蛎": "蠣", "蛏": "蟶", "蛮": "蠻", "蛰": "蟄", "蛱": "蛺", "蛲": "蟯", "蛳": "螄", "蛴": "蠐", "蜕": "蛻", "蜗": "蝸", "蝇": "蠅", "蝈": "蟈", "蝉": "蟬", "蝼": "螻", "蝾": "蠑", "螀": "螿", "螨": "蟎", "䗖": "螮", "蟏": "蠨", "衅": "釁", "衔": "銜", "补": "補", "衬": "襯", "衮": "袞", "袄": "襖", "袅": "裊", "袆": "褘", "袜": "襪", "袭": "襲", "袯": "襏", "装": "裝", "裆": "襠", "裈": "褌", "裢": "褳", "裣": "襝", "裤": "褲", "裥": "襇", "褛": "褸", "褴": "襤", "䙓": "襬", "见": "見", "观": "觀", "觃": "覎", "规": "規", "觅": "覓", "视": "視", "觇": "覘", "览": "覽", "觉": "覺", "觊": "覬", "觋": "覡", "觌": "覿", "觍": "覥", "觎": "覦", "觏": "覯", "觐": "覲", "觑": "覷", "觞": "觴", "触": "觸", "觯": "觶", "訚": "誾", "䜣": "訢", "誉": "譽", "誊": "謄", "䜧": "譅", "讠": "訁", "计": "計", "订": "訂", "讣": "訃", "认": "認", "讥": "譏", "讦": "訐", "讧": "訌", "讨": "討", "让": "讓", "讪": "訕", "讫": "訖", "讬": "託", "训": "訓", "议": "議", "讯": "訊", "记": "記", "讱": "訒", "讲": "講", "讳": "諱", "讴": "謳", "讵": "詎", "讶": "訝", "讷": "訥", "许": "許", "讹": "訛", "论": "論", "讻": "訩", "讼": "訟", "讽": "諷", "设": "設", "访": "訪", "诀": "訣", "证": "證", "诂": "詁", "诃": "訶", "评": "評", "诅": "詛", "识": "識", "诇": "詗", "诈": "詐", "诉": "訴", "诊": "診", "诋": "詆", "诌": "謅", "词": "詞", "诎": "詘", "诏": "詔", "诐": "詖", "译": "譯", "诒": "詒", "诓": "誆", "诔": "誄", "试": "試", "诖": "詿", "诗": "詩", "诘": "詰", "诙": "詼", "诚": "誠", "诛": "誅", "诜": "詵", "话": "話", "诞": "誕", "诟": "詬", "诠": "詮", "诡": "詭", "询": "詢", "诣": "詣", "诤": "諍", "该": "該", "详": "詳", "诧": "詫", "诨": "諢", "诩": "詡", "诪": "譸", "诫": "誡", "诬": "誣", "语": "語", "诮": "誚", "误": "誤", "诰": "誥", "诱": "誘", "诲": "誨", "诳": "誑", "诵": "誦", "诶": "誒", "请": "請", "诸": "諸", "诹": "諏", "诺": "諾", "读": "讀", "诼": "諑", "诽": "誹", "课": "課", "诿": "諉", "谀": "諛", "谁": "誰", "谂": "諗", "调": "調", "谄": "諂", "谅": "諒", "谆": "諄", "谇": "誶", "谈": "談", "谊": "誼", "谋": "謀", "谌": "諶", "谍": "諜", "谎": "謊", "谏": "諫", "谐": "諧", "谑": "謔", "谒": "謁", "谓": "謂", "谔": "諤", "谕": "諭", "谖": "諼", "谗": "讒", "谘": "諮", "谙": "諳", "谚": "諺", "谛": "諦", "谜": "謎", "谝": "諞", "谞": "諝", "谟": "謨", "谠": "讜", "谡": "謖", "谢": "謝", "谤": "謗", "谥": "謚", "谦": "謙", "谧": "謐", "谨": "謹", "谩": "謾", "谪": "謫", "谬": "謬", "谭": "譚", "谮": "譖", "谯": "譙", "谰": "讕", "谱": "譜", "谲": "譎", "谳": "讞", "谴": "譴", "谵": "譫", "谶": "讖", "豮": "豶", "䝙": "貙", "䞐": "賰", "贝": "貝", "贞": "貞", "负": "負", "贠": "貟", "贡": "貢", "财": "財", "责": "責", "贤": "賢", "败": "敗", "账": "賬", "货": "貨", "质": "質", "贩": "販", "贪": "貪", "贫": "貧", "贬": "貶", "购": "購", "贮": "貯", "贯": "貫", "贰": "貳", "贱": "賤", "贲": "賁", "贳": "貰", "贴": "貼", "贵": "貴", "贶": "貺", "贷": "貸", "贸": "貿", "费": "費", "贺": "賀", "贻": "貽", "贼": "賊", "贽": "贄", "贾": "賈", "贿": "賄", "赀": "貲", "赁": "賃", "赂": "賂", "资": "資", "赅": "賅", "赆": "贐", "赇": "賕", "赈": "賑", "赉": "賚", "赊": "賒", "赋": "賦", "赌": "賭", "赎": "贖", "赏": "賞", "赐": "賜", "赑": "贔", "赒": "賙", "赓": "賡", "赔": "賠", "赕": "賧", "赖": "賴", "赗": "賵", "赘": "贅", "赙": "賻", "赚": "賺", "赛": "賽", "赜": "賾", "赞": "贊", "赟": "贇", "赠": "贈", "赡": "贍", "赢": "贏", "赣": "贛", "赪": "赬", "赵": "趙", "赶": "趕", "趋": "趨", "趱": "趲", "趸": "躉", "跃": "躍", "跄": "蹌", "跞": "躒", "践": "踐", "跶": "躂", "跷": "蹺", "跸": "蹕", "跹": "躚", "跻": "躋", "踊": "踴", "踌": "躊", "踪": "蹤", "踬": "躓", "踯": "躑", "蹑": "躡", "蹒": "蹣", "蹰": "躕", "蹿": "躥", "躏": "躪", "躜": "躦", "躯": "軀", "车": "車", "轧": "軋", "轨": "軌", "轩": "軒", "轪": "軑", "轫": "軔", "转": "轉", "轭": "軛", "轮": "輪", "软": "軟", "轰": "轟", "轱": "軲", "轲": "軻", "轳": "轤", "轴": "軸", "轵": "軹", "轶": "軼", "轷": "軤", "轸": "軫", "轹": "轢", "轺": "軺", "轻": "輕", "轼": "軾", "载": "載", "轾": "輊", "轿": "轎", "辀": "輈", "辁": "輇", "辂": "輅", "较": "較", "辄": "輒", "辅": "輔", "辆": "輛", "辇": "輦", "辈": "輩", "辉": "輝", "辊": "輥", "辋": "輞", "辌": "輬", "辍": "輟", "辎": "輜", "辏": "輳", "辐": "輻", "辑": "輯", "辒": "轀", "输": "輸", "辔": "轡", "辕": "轅", "辖": "轄", "辗": "輾", "辘": "轆", "辙": "轍", "辚": "轔", "辞": "辭", "辩": "辯", "辫": "辮", "边": "邊", "辽": "遼", "达": "達", "迁": "遷", "过": "過", "迈": "邁", "运": "運", "还": "還", "这": "這", "进": "進", "远": "遠", "违": "違", "连": "連", "迟": "遲", "迩": "邇", "迳": "逕", "迹": "跡", "选": "選", "逊": "遜", "递": "遞", "逦": "邐", "逻": "邏", "遗": "遺", "遥": "遙", "邓": "鄧", "邝": "鄺", "邬": "鄔", "邮": "郵", "邹": "鄒", "邺": "鄴", "邻": "鄰", "郏": "郟", "郐": "鄶", "郑": "鄭", "郓": "鄆", "郦": "酈", "郧": "鄖", "郸": "鄲", "酂": "酇", "酦": "醱", "酱": "醬", "酽": "釅", "酾": "釃", "酿": "釀", "释": "釋", "鉴": "鑒", "銮": "鑾", "錾": "鏨", "𨱏": "鎝", "钅": "釒", "钆": "釓", "钇": "釔", "针": "針", "钉": "釘", "钊": "釗", "钋": "釙", "钌": "釕", "钍": "釷", "钎": "釺", "钏": "釧", "钐": "釤", "钑": "鈒", "钒": "釩", "钓": "釣", "钔": "鍆", "钕": "釹", "钖": "鍚", "钗": "釵", "钘": "鈃", "钙": "鈣", "钚": "鈈", "钛": "鈦", "钜": "鉅", "钝": "鈍", "钞": "鈔", "钠": "鈉", "钡": "鋇", "钢": "鋼", "钣": "鈑", "钤": "鈐", "钥": "鑰", "钦": "欽", "钧": "鈞", "钨": "鎢", "钪": "鈧", "钫": "鈁", "钬": "鈥", "钭": "鈄", "钮": "鈕", "钯": "鈀", "钰": "鈺", "钱": "錢", "钲": "鉦", "钳": "鉗", "钴": "鈷", "钶": "鈳", "钷": "鉕", "钸": "鈽", "钹": "鈸", "钺": "鉞", "钻": "鑽", "钼": "鉬", "钽": "鉭", "钾": "鉀", "钿": "鈿", "铀": "鈾", "铁": "鐵", "铂": "鉑", "铃": "鈴", "铄": "鑠", "铅": "鉛", "铆": "鉚", "铇": "鉋", "铈": "鈰", "铉": "鉉", "铊": "鉈", "铋": "鉍", "铌": "鈮", "铍": "鈹", "铎": "鐸", "铏": "鉶", "铐": "銬", "铑": "銠", "铒": "鉺", "铓": "鋩", "铔": "錏", "铕": "銪", "铖": "鋮", "铗": "鋏", "铘": "鋣", "铙": "鐃", "铚": "銍", "铛": "鐺", "铜": "銅", "铝": "鋁", "铞": "銱", "铟": "銦", "铠": "鎧", "铡": "鍘", "铢": "銖", "铣": "銑", "铤": "鋌", "铥": "銩", "铦": "銛", "铧": "鏵", "铨": "銓", "铩": "鎩", "铪": "鉿", "铫": "銚", "铬": "鉻", "铭": "銘", "铮": "錚", "铯": "銫", "铰": "鉸", "铱": "銥", "铲": "鏟", "铳": "銃", "铴": "鐋", "铵": "銨", "银": "銀", "铷": "銣", "铸": "鑄", "铹": "鐒", "铺": "鋪", "铻": "鋙", "铼": "錸", "铽": "鋱", "链": "鏈", "铿": "鏗", "销": "銷", "锁": "鎖", "锂": "鋰", "锃": "鋥", "锄": "鋤", "锅": "鍋", "锆": "鋯", "锇": "鋨", "锉": "銼", "锊": "鋝", "锋": "鋒", "锌": "鋅", "锍": "鋶", "锎": "鐦", "锏": "鐧", "锑": "銻", "锒": "鋃", "锓": "鋟", "锔": "鋦", "锕": "錒", "锖": "錆", "锗": "鍺", "锘": "鍩", "错": "錯", "锚": "錨", "锛": "錛", "锜": "錡", "锝": "鍀", "锞": "錁", "锟": "錕", "锠": "錩", "锡": "錫", "锢": "錮", "锣": "鑼", "锥": "錐", "锦": "錦", "锧": "鑕", "锩": "錈", "锪": "鍃", "锫": "錇", "锬": "錟", "锭": "錠", "键": "鍵", "锯": "鋸", "锰": "錳", "锱": "錙", "锲": "鍥", "锳": "鍈", "锴": "鍇", "锵": "鏘", "锶": "鍶", "锷": "鍔", "锸": "鍤", "锹": "鍬", "锺": "鍾", "锻": "鍛", "锼": "鎪", "锽": "鍠", "锾": "鍰", "锿": "鎄", "镀": "鍍", "镁": "鎂", "镂": "鏤", "镃": "鎡", "镄": "鐨", "镅": "鎇", "镆": "鏌", "镇": "鎮", "镈": "鎛", "镉": "鎘", "镊": "鑷", "镋": "鎲", "镍": "鎳", "镎": "鎿", "镏": "鎦", "镐": "鎬", "镑": "鎊", "镒": "鎰", "镓": "鎵", "镔": "鑌", "镕": "鎔", "镖": "鏢", "镗": "鏜", "镘": "鏝", "镙": "鏍", "镚": "鏰", "镛": "鏞", "镜": "鏡", "镝": "鏑", "镞": "鏃", "镟": "鏇", "镠": "鏐", "镡": "鐔", "镣": "鐐", "镤": "鏷", "镥": "鑥", "镦": "鐓", "镧": "鑭", "镨": "鐠", "镩": "鑹", "镪": "鏹", "镫": "鐙", "镬": "鑊", "镭": "鐳", "镮": "鐶", "镯": "鐲", "镰": "鐮", "镱": "鐿", "镲": "鑔", "镳": "鑣", "镴": "鑞", "镵": "鑱", "镶": "鑲", "长": "長", "门": "門", "闩": "閂", "闪": "閃", "闫": "閆", "闬": "閈", "闭": "閉", "问": "問", "闯": "闖", "闰": "閏", "闱": "闈", "闲": "閑", "闳": "閎", "间": "間", "闵": "閔", "闶": "閌", "闷": "悶", "闸": "閘", "闹": "鬧", "闺": "閨", "闻": "聞", "闼": "闥", "闽": "閩", "闾": "閭", "闿": "闓", "阀": "閥", "阁": "閣", "阂": "閡", "阃": "閫", "阄": "鬮", "阆": "閬", "阇": "闍", "阈": "閾", "阉": "閹", "阊": "閶", "阋": "鬩", "阌": "閿", "阍": "閽", "阎": "閻", "阏": "閼", "阐": "闡", "阑": "闌", "阒": "闃", "阓": "闠", "阔": "闊", "阕": "闋", "阖": "闔", "阗": "闐", "阘": "闒", "阙": "闕", "阚": "闞", "阛": "闤", "队": "隊", "阳": "陽", "阴": "陰", "阵": "陣", "阶": "階", "际": "際", "陆": "陸", "陇": "隴", "陈": "陳", "陉": "陘", "陕": "陝", "陧": "隉", "陨": "隕", "险": "險", "随": "隨", "隐": "隱", "隶": "隸", "隽": "雋", "难": "難", "雏": "雛", "雠": "讎", "雳": "靂", "雾": "霧", "霁": "霽", "霡": "霢", "霭": "靄", "靓": "靚", "静": "靜", "靥": "靨", "䩄": "靦", "鞑": "韃", "鞒": "鞽", "鞯": "韉", "韦": "韋", "韧": "韌", "韨": "韍", "韩": "韓", "韪": "韙", "韫": "韞", "韬": "韜", "韵": "韻", "页": "頁", "顶": "頂", "顷": "頃", "顸": "頇", "项": "項", "顺": "順", "顼": "頊", "顽": "頑", "顾": "顧", "顿": "頓", "颀": "頎", "颁": "頒", "颂": "頌", "颃": "頏", "预": "預", "颅": "顱", "领": "領", "颇": "頗", "颈": "頸", "颉": "頡", "颊": "頰", "颋": "頲", "颌": "頜", "颍": "潁", "颎": "熲", "颏": "頦", "颐": "頤", "频": "頻", "颒": "頮", "颔": "頷", "颕": "頴", "颖": "穎", "颗": "顆", "题": "題", "颙": "顒", "颚": "顎", "颛": "顓", "额": "額", "颞": "顳", "颟": "顢", "颠": "顛", "颡": "顙", "颢": "顥", "颤": "顫", "颥": "顬", "颦": "顰", "颧": "顴", "风": "風", "飏": "颺", "飐": "颭", "飑": "颮", "飒": "颯", "飓": "颶", "飔": "颸", "飕": "颼", "飖": "颻", "飗": "飀", "飘": "飄", "飙": "飆", "飚": "飈", "飞": "飛", "飨": "饗", "餍": "饜", "饣": "飠", "饤": "飣", "饦": "飥", "饧": "餳", "饨": "飩", "饩": "餼", "饪": "飪", "饫": "飫", "饬": "飭", "饭": "飯", "饮": "飲", "饯": "餞", "饰": "飾", "饱": "飽", "饲": "飼", "饳": "飿", "饴": "飴", "饵": "餌", "饶": "饒", "饷": "餉", "饸": "餄", "饹": "餎", "饺": "餃", "饻": "餏", "饼": "餅", "饽": "餑", "饾": "餖", "饿": "餓", "馀": "餘", "馁": "餒", "馂": "餕", "馃": "餜", "馄": "餛", "馅": "餡", "馆": "館", "馇": "餷", "馈": "饋", "馉": "餶", "馊": "餿", "馋": "饞", "馌": "饁", "馍": "饃", "馎": "餺", "馏": "餾", "馐": "饈", "馑": "饉", "馒": "饅", "馓": "饊", "馔": "饌", "馕": "饢", "䯄": "騧", "马": "馬", "驭": "馭", "驮": "馱", "驯": "馴", "驰": "馳", "驱": "驅", "驲": "馹", "驳": "駁", "驴": "驢", "驵": "駔", "驶": "駛", "驷": "駟", "驸": "駙", "驹": "駒", "驺": "騶", "驻": "駐", "驼": "駝", "驽": "駑", "驾": "駕", "驿": "驛", "骀": "駘", "骁": "驍", "骃": "駰", "骄": "驕", "骅": "驊", "骆": "駱", "骇": "駭", "骈": "駢", "骉": "驫", "骊": "驪", "骋": "騁", "验": "驗", "骍": "騂", "骎": "駸", "骏": "駿", "骐": "騏", "骑": "騎", "骒": "騍", "骓": "騅", "骔": "騌", "骕": "驌", "骖": "驂", "骗": "騙", "骘": "騭", "骙": "騤", "骚": "騷", "骛": "騖", "骜": "驁", "骝": "騮", "骞": "騫", "骟": "騸", "骠": "驃", "骡": "騾", "骢": "驄", "骣": "驏", "骤": "驟", "骥": "驥", "骦": "驦", "骧": "驤", "髅": "髏", "髋": "髖", "髌": "髕", "鬓": "鬢", "魇": "魘", "魉": "魎", "鱼": "魚", "鱽": "魛", "鱾": "魢", "鱿": "魷", "鲀": "魨", "鲁": "魯", "鲂": "魴", "鲃": "䰾", "鲄": "魺", "鲅": "鮁", "鲆": "鮃", "鲈": "鱸", "鲉": "鮋", "鲊": "鮓", "鲋": "鮒", "鲌": "鮊", "鲍": "鮑", "鲎": "鱟", "鲏": "鮍", "鲐": "鮐", "鲑": "鮭", "鲒": "鮚", "鲓": "鮳", "鲔": "鮪", "鲕": "鮞", "鲖": "鮦", "鲗": "鰂", "鲘": "鮜", "鲙": "鱠", "鲚": "鱭", "鲛": "鮫", "鲜": "鮮", "鲝": "鮺", "鲟": "鱘", "鲠": "鯁", "鲡": "鱺", "鲢": "鰱", "鲣": "鰹", "鲤": "鯉", "鲥": "鰣", "鲦": "鰷", "鲧": "鯀", "鲨": "鯊", "鲩": "鯇", "鲪": "鮶", "鲫": "鯽", "鲬": "鯒", "鲭": "鯖", "鲮": "鯪", "鲯": "鯕", "鲰": "鯫", "鲱": "鯡", "鲲": "鯤", "鲳": "鯧", "鲴": "鯝", "鲵": "鯢", "鲶": "鯰", "鲷": "鯛", "鲸": "鯨", "鲹": "鰺", "鲺": "鯴", "鲻": "鯔", "鲼": "鱝", "鲽": "鰈", "鲾": "鰏", "鲿": "鱨", "鳀": "鯷", "鳁": "鰮", "鳂": "鰃", "鳃": "鰓", "鳅": "鰍", "鳆": "鰒", "鳇": "鰉", "鳈": "鰁", "鳉": "鱂", "鳊": "鯿", "鳋": "鰠", "鳌": "鰲", "鳍": "鰭", "鳎": "鰨", "鳏": "鰥", "鳐": "鰩", "鳑": "鰟", "鳒": "鰜", "鳓": "鰳", "鳔": "鰾", "鳕": "鱈", "鳖": "鱉", "鳗": "鰻", "鳘": "鰵", "鳙": "鱅", "鳚": "䲁", "鳛": "鰼", "鳜": "鱖", "鳝": "鱔", "鳞": "鱗", "鳟": "鱒", "鳠": "鱯", "鳡": "鱤", "鳢": "鱧", "鳣": "鱣", "䴓": "鳾", "䴕": "鴷", "䴔": "鵁", "䴖": "鶄", "䴗": "鶪", "䴘": "鷈", "䴙": "鷿", "㶉": "鸂", "鸟": "鳥", "鸠": "鳩", "鸢": "鳶", "鸣": "鳴", "鸤": "鳲", "鸥": "鷗", "鸦": "鴉", "鸧": "鶬", "鸨": "鴇", "鸩": "鴆", "鸪": "鴣", "鸫": "鶇", "鸬": "鸕", "鸭": "鴨", "鸮": "鴞", "鸯": "鴦", "鸰": "鴒", "鸱": "鴟", "鸲": "鴝", "鸳": "鴛", "鸴": "鷽", "鸵": "鴕", "鸶": "鷥", "鸷": "鷙", "鸸": "鴯", "鸹": "鴰", "鸺": "鵂", "鸻": "鴴", "鸼": "鵃", "鸽": "鴿", "鸾": "鸞", "鸿": "鴻", "鹀": "鵐", "鹁": "鵓", "鹂": "鸝", "鹃": "鵑", "鹄": "鵠", "鹅": "鵝", "鹆": "鵒", "鹇": "鷳", "鹈": "鵜", "鹉": "鵡", "鹊": "鵲", "鹋": "鶓", "鹌": "鵪", "鹍": "鵾", "鹎": "鵯", "鹏": "鵬", "鹐": "鵮", "鹑": "鶉", "鹒": "鶊", "鹓": "鵷", "鹔": "鷫", "鹕": "鶘", "鹖": "鶡", "鹗": "鶚", "鹘": "鶻", "鹙": "鶖", "鹛": "鶥", "鹜": "鶩", "鹝": "鷊", "鹞": "鷂", "鹟": "鶲", "鹠": "鶹", "鹡": "鶺", "鹢": "鷁", "鹣": "鶼", "鹤": "鶴", "鹥": "鷖", "鹦": "鸚", "鹧": "鷓", "鹨": "鷚", "鹩": "鷯", "鹪": "鷦", "鹫": "鷲", "鹬": "鷸", "鹭": "鷺", "鹯": "鸇", "鹰": "鷹", "鹱": "鸌", "鹲": "鸏", "鹳": "鸛", "鹴": "鸘", "鹾": "鹺", "麦": "麥", "麸": "麩", "黄": "黃", "黉": "黌", "黡": "黶", "黩": "黷", "黪": "黲", "黾": "黽", "鼋": "黿", "鼍": "鼉", "鼗": "鞀", "鼹": "鼴", "齐": "齊", "齑": "齏", "齿": "齒", "龀": "齔", "龁": "齕", "龂": "齗", "龃": "齟", "龄": "齡", "龅": "齙", "龆": "齠", "龇": "齜", "龈": "齦", "龉": "齬", "龊": "齪", "龋": "齲", "龌": "齷", "龙": "龍", "龚": "龔", "龛": "龕", "龟": "龜", "一伙": "一伙", "一并": "一併", "一准": "一准", "一划": "一划", "一地里": "一地裡", "一干": "一干", "一树百获": "一樹百穫", "一台": "一臺", "一冲": "一衝", "一只": "一隻", "一发千钧": "一髮千鈞", "一出": "一齣", "七只": "七隻", "三元里": "三元裡", "三国志": "三國誌", "三复": "三複", "三只": "三隻", "上吊": "上吊", "上台": "上臺", "下不了台": "下不了臺", "下台": "下臺", "下面": "下麵", "不准": "不准", "不吊": "不吊", "不知就里": "不知就裡", "不知所云": "不知所云", "不锈钢": "不鏽鋼", "丑剧": "丑劇", "丑旦": "丑旦", "丑角": "丑角", "并存着": "並存著", "中岳": "中嶽", "中台医专": "中臺醫專", "丰南": "丰南", "丰台": "丰台", "丰姿": "丰姿", "丰采": "丰采", "丰韵": "丰韻", "主干": "主幹", "么么唱唱": "么么唱唱", "么儿": "么兒", "么喝": "么喝", "么妹": "么妹", "么弟": "么弟", "么爷": "么爺", "九世之雠": "九世之讎", "九只": "九隻", "干丝": "乾絲", "干着急": "乾著急", "乱发": "亂髮", "云云": "云云", "云尔": "云爾", "五岳": "五嶽", "五斗柜": "五斗櫃", "五斗橱": "五斗櫥", "五谷": "五穀", "五行生克": "五行生剋", "五只": "五隻", "五出": "五齣", "交卷": "交卷", "人云亦云": "人云亦云", "人物志": "人物誌", "什锦面": "什錦麵", "什么": "什麼", "仆倒": "仆倒", "介系词": "介係詞", "介系词": "介繫詞", "仿制": "仿製", "伙伕": "伙伕", "伙伴": "伙伴", "伙同": "伙同", "伙夫": "伙夫", "伙房": "伙房", "伙计": "伙計", "伙食": "伙食", "布下": "佈下", "布告": "佈告", "布哨": "佈哨", "布局": "佈局", "布岗": "佈崗", "布施": "佈施", "布景": "佈景", "布满": "佈滿", "布线": "佈線", "布置": "佈置", "布署": "佈署", "布道": "佈道", "布达": "佈達", "布防": "佈防", "布阵": "佈陣", "布雷": "佈雷", "体育锻鍊": "体育鍛鍊", "何干": "何干", "作准": "作准", "佣人": "佣人", "佣工": "佣工", "佣金": "佣金", "并入": "併入", "并列": "併列", "并到": "併到", "并合": "併合", "并吞": "併吞", "并在": "併在", "并成": "併成", "并排": "併排", "并拢": "併攏", "并案": "併案", "并为": "併為", "并发": "併發", "并科": "併科", "并购": "併購", "并进": "併進", "来复": "來複", "供制": "供製", "依依不舍": "依依不捨", "侵并": "侵併", "便辟": "便辟", "系数": "係數", "系为": "係為", "保险柜": "保險柜", "信号台": "信號臺", "修复": "修複", "修胡刀": "修鬍刀", "俯冲": "俯衝", "个里": "個裡", "借着": "借著", "假发": "假髮", "停制": "停製", "偷鸡不着": "偷雞不著", "家伙": "傢伙", "家俱": "傢俱", "家具": "傢具", "传布": "傳佈", "债台高筑": "債臺高築", "傻里傻气": "傻裡傻氣", "倾家荡产": "傾家蕩產", "倾覆": "傾複", "倾覆": "傾覆", "僱佣": "僱佣", "仪表": "儀錶", "亿只": "億隻", "尽尽": "儘儘", "尽先": "儘先", "尽其所有": "儘其所有", "尽力": "儘力", "尽快": "儘快", "尽早": "儘早", "尽是": "儘是", "尽管": "儘管", "尽速": "儘速", "尽量": "儘量", "允准": "允准", "兄台": "兄臺", "充饥": "充饑", "光采": "光采", "克里": "克裡", "克复": "克複", "入伙": "入伙", "内制": "內製", "两只": "兩隻", "八字胡": "八字鬍", "八只": "八隻", "公布": "公佈", "公干": "公幹", "公斗": "公斗", "公历": "公曆", "六只": "六隻", "六出": "六齣", "兼并": "兼併", "冤雠": "冤讎", "准予": "准予", "准假": "准假", "准将": "准將", "准考证": "准考證", "准许": "准許", "几几": "几几", "几案": "几案", "几丝": "几絲", "凹洞里": "凹洞裡", "出征": "出征", "出锤": "出鎚", "刀削面": "刀削麵", "刁斗": "刁斗", "分布": "分佈", "切面": "切麵", "刊布": "刊佈", "划上": "划上", "划下": "划下", "划不来": "划不來", "划了": "划了", "划具": "划具", "划出": "划出", "划到": "划到", "划动": "划動", "划去": "划去", "划子": "划子", "划得来": "划得來", "划拳": "划拳", "划桨": "划槳", "划水": "划水", "划算": "划算", "划船": "划船", "划艇": "划艇", "划着": "划著", "划着走": "划著走", "划行": "划行", "划走": "划走", "划起": "划起", "划进": "划進", "划过": "划過", "初征": "初征", "别致": "別緻", "别着": "別著", "别只": "別隻", "利比里亚": "利比裡亞", "刮着": "刮著", "刮胡刀": "刮鬍刀", "剃发": "剃髮", "剃须": "剃鬚", "削发": "削髮", "克制": "剋制", "克星": "剋星", "克服": "剋服", "克死": "剋死", "克薄": "剋薄", "前仆后继": "前仆後繼", "前台": "前臺", "前车之复": "前車之覆", "刚才": "剛纔", "剪发": "剪髮", "割舍": "割捨", "创制": "創製", "加里宁": "加裡寧", "动荡": "動蕩", "劳力士表": "勞力士錶", "包准": "包准", "包谷": "包穀", "北斗": "北斗", "北回": "北迴", "匡复": "匡複", "匪干": "匪幹", "十卷": "十卷", "十台": "十臺", "十只": "十隻", "十出": "十齣", "千丝万缕": "千絲萬縷", "千回百折": "千迴百折", "千回百转": "千迴百轉", "千钧一发": "千鈞一髮", "千只": "千隻", "升斗小民": "升斗小民", "半只": "半隻", "南岳": "南嶽", "南征": "南征", "南台": "南臺", "南回": "南迴", "卡里": "卡裡", "印制": "印製", "卷入": "卷入", "卷取": "卷取", "卷土重来": "卷土重來", "卷子": "卷子", "卷宗": "卷宗", "卷尺": "卷尺", "卷层云": "卷層雲", "卷帙": "卷帙", "卷扬机": "卷揚機", "卷曲": "卷曲", "卷染": "卷染", "卷烟": "卷煙", "卷筒": "卷筒", "卷纬": "卷緯", "卷绕": "卷繞", "卷装": "卷裝", "卷轴": "卷軸", "卷云": "卷雲", "卷领": "卷領", "卷发": "卷髮", "卷须": "卷鬚", "参与": "參与", "参与者": "參与者", "参合": "參合", "参考价值": "參考價值", "参与": "參與", "参与人员": "參與人員", "参与制": "參與制", "参与感": "參與感", "参与者": "參與者", "参观团": "參觀團", "参观团体": "參觀團體", "参阅": "參閱", "反冲": "反衝", "反复": "反複", "反复": "反覆", "取舍": "取捨", "口里": "口裡", "只准": "只准", "只冲": "只衝", "叮当": "叮噹", "可怜虫": "可憐虫", "可紧可松": "可緊可鬆", "台制": "台製", "司令台": "司令臺", "吃着不尽": "吃著不盡", "吃里扒外": "吃裡扒外", "吃里爬外": "吃裡爬外", "各吊": "各吊", "合伙": "合伙", "合并": "合併", "吊上": "吊上", "吊下": "吊下", "吊了": "吊了", "吊个": "吊個", "吊儿郎当": "吊兒郎當", "吊到": "吊到", "吊去": "吊去", "吊取": "吊取", "吊吊": "吊吊", "吊嗓": "吊嗓", "吊好": "吊好", "吊子": "吊子", "吊带": "吊帶", "吊带裤": "吊帶褲", "吊床": "吊床", "吊得": "吊得", "吊挂": "吊掛", "吊挂着": "吊掛著", "吊杆": "吊杆", "吊架": "吊架", "吊桶": "吊桶", "吊杆": "吊桿", "吊桥": "吊橋", "吊死": "吊死", "吊灯": "吊燈", "吊环": "吊環", "吊盘": "吊盤", "吊索": "吊索", "吊着": "吊著", "吊装": "吊裝", "吊裤": "吊褲", "吊裤带": "吊褲帶", "吊袜": "吊襪", "吊走": "吊走", "吊起": "吊起", "吊车": "吊車", "吊钩": "吊鉤", "吊销": "吊銷", "吊钟": "吊鐘", "同伙": "同伙", "名表": "名錶", "后冠": "后冠", "后土": "后土", "后妃": "后妃", "后座": "后座", "后稷": "后稷", "后羿": "后羿", "后里": "后里", "向着": "向著", "吞并": "吞併", "吹发": "吹髮", "吕后": "呂后", "獃里獃气": "呆裡呆氣", "周而复始": "周而複始", "呼吁": "呼籲", "和面": "和麵", "哪里": "哪裡", "哭脏": "哭髒", "问卷": "問卷", "喝采": "喝采", "单干": "單干", "单只": "單隻", "嘴里": "嘴裡", "恶心": "噁心", "当啷": "噹啷", "当当": "噹噹", "噜苏": "嚕囌", "向导": "嚮導", "向往": "嚮往", "向应": "嚮應", "向日": "嚮日", "向迩": "嚮邇", "严丝合缝": "嚴絲合縫", "严复": "嚴複", "四舍五入": "四捨五入", "四只": "四隻", "四出": "四齣", "回丝": "回絲", "回着": "回著", "回荡": "回蕩", "回复": "回覆", "回采": "回采", "圈子里": "圈子裡", "圈里": "圈裡", "国历": "國曆", "国雠": "國讎", "园里": "園裡", "图里": "圖裡", "土里": "土裡", "土制": "土製", "地志": "地誌", "坍台": "坍臺", "坑里": "坑裡", "坦荡": "坦蕩", "垂发": "垂髮", "垮台": "垮臺", "埋布": "埋佈", "城里": "城裡", "基干": "基幹", "报复": "報複", "塌台": "塌臺", "塔台": "塔臺", "涂着": "塗著", "墓志": "墓誌", "墨斗": "墨斗", "墨索里尼": "墨索裡尼", "垦复": "墾複", "垄断价格": "壟斷價格", "垄断资产": "壟斷資產", "垄断集团": "壟斷集團", "壶里": "壺裡", "寿面": "壽麵", "夏天里": "夏天裡", "夏历": "夏曆", "外制": "外製", "多冲": "多衝", "多采多姿": "多采多姿", "多么": "多麼", "夜光表": "夜光錶", "夜里": "夜裡", "梦里": "夢裡", "大伙": "大伙", "大卷": "大卷", "大干": "大干", "大干": "大幹", "大锤": "大鎚", "大只": "大隻", "天后": "天后", "天干": "天干", "天文台": "天文臺", "太后": "太后", "奏折": "奏摺", "女丑": "女丑", "女佣": "女佣", "好家夥": "好傢夥", "好戏连台": "好戲連臺", "如法泡制": "如法泡製", "妆台": "妝臺", "姜太公": "姜太公", "姜子牙": "姜子牙", "姜丝": "姜絲", "字汇": "字彙", "字里行间": "字裡行間", "存折": "存摺", "孟姜女": "孟姜女", "宇宙志": "宇宙誌", "定准": "定准", "定制": "定製", "宣布": "宣佈", "宫里": "宮裡", "家伙": "家伙", "家里": "家裡", "密布": "密佈", "寇雠": "寇讎", "实干": "實幹", "写字台": "寫字檯", "写字台": "寫字臺", "宽松": "寬鬆", "封面里": "封面裡", "射干": "射干", "对表": "對錶", "小丑": "小丑", "小伙": "小伙", "小只": "小隻", "少吊": "少吊", "尺布斗粟": "尺布斗粟", "尼克松": "尼克鬆", "尼采": "尼采", "尿斗": "尿斗", "局里": "局裡", "居里": "居裡", "屋子里": "屋子裡", "屋里": "屋裡", "展布": "展佈", "屡仆屡起": "屢仆屢起", "屯里": "屯裡", "山岳": "山嶽", "山里": "山裡", "峰回": "峰迴", "巡回": "巡迴", "巧干": "巧幹", "巴尔干": "巴爾幹", "巴里": "巴裡", "巷里": "巷裡", "市里": "市裡", "布谷": "布穀", "希腊": "希腊", "帘子": "帘子", "帘布": "帘布", "席卷": "席卷", "带团参加": "帶團參加", "带发修行": "帶髮修行", "干休": "干休", "干系": "干係", "干卿何事": "干卿何事", "干将": "干將", "干戈": "干戈", "干挠": "干撓", "干扰": "干擾", "干支": "干支", "干政": "干政", "干时": "干時", "干涉": "干涉", "干犯": "干犯", "干与": "干與", "干着急": "干著急", "干贝": "干貝", "干预": "干預", "平台": "平臺", "年历": "年曆", "年里": "年裡", "干上": "幹上", "干下去": "幹下去", "干了": "幹了", "干事": "幹事", "干些": "幹些", "干个": "幹個", "干劲": "幹勁", "干员": "幹員", "干吗": "幹嗎", "干嘛": "幹嘛", "干坏事": "幹壞事", "干完": "幹完", "干得": "幹得", "干性油": "幹性油", "干才": "幹才", "干掉": "幹掉", "干校": "幹校", "干活": "幹活", "干流": "幹流", "干球温度": "幹球溫度", "干线": "幹線", "干练": "幹練", "干警": "幹警", "干起来": "幹起來", "干路": "幹路", "干道": "幹道", "干部": "幹部", "干么": "幹麼", "几丝": "幾絲", "几只": "幾隻", "几出": "幾齣", "底里": "底裡", "康采恩": "康采恩", "庙里": "廟裡", "建台": "建臺", "弄脏": "弄髒", "弔卷": "弔卷", "弘历": "弘曆", "别扭": "彆扭", "别拗": "彆拗", "别气": "彆氣", "别脚": "彆腳", "别着": "彆著", "弹子台": "彈子檯", "弹药": "彈葯", "汇报": "彙報", "汇整": "彙整", "汇编": "彙編", "汇总": "彙總", "汇纂": "彙纂", "汇辑": "彙輯", "汇集": "彙集", "形单影只": "形單影隻", "影后": "影后", "往里": "往裡", "往复": "往複", "征伐": "征伐", "征兵": "征兵", "征尘": "征塵", "征夫": "征夫", "征战": "征戰", "征收": "征收", "征服": "征服", "征求": "征求", "征发": "征發", "征衣": "征衣", "征讨": "征討", "征途": "征途", "后台": "後臺", "从里到外": "從裡到外", "从里向外": "從裡向外", "复雠": "復讎", "复辟": "復辟", "德干高原": "德干高原", "心愿": "心愿", "心荡神驰": "心蕩神馳", "心里": "心裡", "忙里": "忙裡", "快干": "快幹", "快冲": "快衝", "怎么": "怎麼", "怎么着": "怎麼著", "怒发冲冠": "怒髮衝冠", "急冲而下": "急衝而下", "怪里怪气": "怪裡怪氣", "恩准": "恩准", "情有所钟": "情有所鍾", "意面": "意麵", "慌里慌张": "慌裡慌張", "慰借": "慰藉", "忧郁": "憂郁", "凭吊": "憑吊", "凭借": "憑藉", "凭借着": "憑藉著", "蒙懂": "懞懂", "怀里": "懷裡", "怀表": "懷錶", "悬吊": "懸吊", "恋恋不舍": "戀戀不捨", "戏台": "戲臺", "戴表": "戴錶", "戽斗": "戽斗", "房里": "房裡", "手不释卷": "手不釋卷", "手卷": "手卷", "手折": "手摺", "手里": "手裡", "手表": "手錶", "手松": "手鬆", "才干": "才幹", "才高八斗": "才高八斗", "打谷": "打穀", "扞御": "扞禦", "批准": "批准", "批复": "批複", "批复": "批覆", "承制": "承製", "抗御": "抗禦", "折冲": "折衝", "披复": "披覆", "披发": "披髮", "抱朴": "抱朴", "抵御": "抵禦", "拆伙": "拆伙", "拆台": "拆臺", "拈须": "拈鬚", "拉纤": "拉縴", "拉面": "拉麵", "拖吊": "拖吊", "拗别": "拗彆", "拮据": "拮据", "振荡": "振蕩", "捍御": "捍禦", "舍不得": "捨不得", "舍出": "捨出", "舍去": "捨去", "舍命": "捨命", "舍己从人": "捨己從人", "舍己救人": "捨己救人", "舍己为人": "捨己為人", "舍己为公": "捨己為公", "舍己为国": "捨己為國", "舍得": "捨得", "舍我其谁": "捨我其誰", "舍本逐末": "捨本逐末", "舍弃": "捨棄", "舍死忘生": "捨死忘生", "舍生": "捨生", "舍短取长": "捨短取長", "舍身": "捨身", "舍车保帅": "捨車保帥", "舍近求远": "捨近求遠", "捲发": "捲髮", "捵面": "捵麵", "扫荡": "掃蕩", "掌柜": "掌柜", "排骨面": "排骨麵", "挂帘": "掛帘", "挂面": "掛麵", "接着说": "接著說", "提心吊胆": "提心吊膽", "插图卷": "插圖卷", "换吊": "換吊", "换只": "換隻", "换发": "換髮", "摇荡": "搖蕩", "搭伙": "搭伙", "折合": "摺合", "折奏": "摺奏", "折子": "摺子", "折尺": "摺尺", "折扇": "摺扇", "折梯": "摺梯", "折椅": "摺椅", "折叠": "摺疊", "折痕": "摺痕", "折篷": "摺篷", "折纸": "摺紙", "折裙": "摺裙", "撒布": "撒佈", "撚须": "撚鬚", "撞球台": "撞球檯", "擂台": "擂臺", "担仔面": "擔仔麵", "担担面": "擔擔麵", "担着": "擔著", "担负着": "擔負著", "据云": "據云", "擢发难数": "擢髮難數", "摆布": "擺佈", "摄制": "攝製", "支干": "支幹", "收获": "收穫", "改制": "改製", "攻克": "攻剋", "放荡": "放蕩", "放松": "放鬆", "叙说着": "敘說著", "散伙": "散伙", "散布": "散佈", "散荡": "散蕩", "散发": "散髮", "整只": "整隻", "整出": "整齣", "文采": "文采", "斗六": "斗六", "斗南": "斗南", "斗大": "斗大", "斗子": "斗子", "斗室": "斗室", "斗方": "斗方", "斗栱": "斗栱", "斗笠": "斗笠", "斗箕": "斗箕", "斗篷": "斗篷", "斗胆": "斗膽", "斗转参横": "斗轉參橫", "斗量": "斗量", "斗门": "斗門", "料斗": "料斗", "斯里兰卡": "斯裡蘭卡", "新历": "新曆", "断头台": "斷頭臺", "方才": "方纔", "施舍": "施捨", "旋绕着": "旋繞著", "旋回": "旋迴", "族里": "族裡", "日历": "日曆", "日志": "日誌", "日进斗金": "日進斗金", "明了": "明瞭", "明窗净几": "明窗淨几", "明里": "明裡", "星斗": "星斗", "星历": "星曆", "星移斗换": "星移斗換", "星移斗转": "星移斗轉", "星罗棋布": "星羅棋佈", "星辰表": "星辰錶", "春假里": "春假裡", "春天里": "春天裡", "晃荡": "晃蕩", "景致": "景緻", "暗地里": "暗地裡", "暗沟里": "暗溝裡", "暗里": "暗裡", "历数": "曆數", "历书": "曆書", "历法": "曆法", "书卷": "書卷", "会干": "會幹", "会里": "會裡", "月历": "月曆", "月台": "月臺", "有只": "有隻", "木制": "木製", "本台": "本臺", "朴子": "朴子", "朴实": "朴實", "朴硝": "朴硝", "朴素": "朴素", "朴资茅斯": "朴資茅斯", "村里": "村裡", "束发": "束髮", "东岳": "東嶽", "东征": "東征", "松赞干布": "松贊干布", "板着脸": "板著臉", "板荡": "板蕩", "枕借": "枕藉", "林宏岳": "林宏嶽", "枝干": "枝幹", "枯干": "枯幹", "某只": "某隻", "染发": "染髮", "柜上": "柜上", "柜台": "柜台", "柜子": "柜子", "查卷": "查卷", "查号台": "查號臺", "校雠学": "校讎學", "核准": "核准", "核复": "核覆", "格里": "格裡", "案卷": "案卷", "条干": "條幹", "棉卷": "棉卷", "棉制": "棉製", "植发": "植髮", "楼台": "樓臺", "标志着": "標志著", "标致": "標緻", "标志": "標誌", "模制": "模製", "树干": "樹幹", "横征暴敛": "橫征暴斂", "横冲": "橫衝", "档卷": "檔卷", "检复": "檢覆", "台子": "檯子", "台布": "檯布", "台灯": "檯燈", "台球": "檯球", "台面": "檯面", "柜台": "櫃檯", "柜台": "櫃臺", "栏干": "欄干", "欺蒙": "欺矇", "歌后": "歌后", "欧几里得": "歐幾裡得", "正当着": "正當著", "武后": "武后", "武松": "武鬆", "归并": "歸併", "死里求生": "死裡求生", "死里逃生": "死裡逃生", "残卷": "殘卷", "杀虫药": "殺虫藥", "壳里": "殼裡", "母后": "母后", "每只": "每隻", "比干": "比干", "毛卷": "毛卷", "毛发": "毛髮", "毫发": "毫髮", "气冲牛斗": "氣沖牛斗", "气象台": "氣象臺", "氯霉素": "氯黴素", "水斗": "水斗", "水里": "水裡", "水表": "水錶", "永历": "永曆", "污蔑": "汙衊", "池里": "池裡", "污蔑": "污衊", "沈着": "沈著", "没事干": "沒事幹", "没精打采": "沒精打采", "冲着": "沖著", "沙里淘金": "沙裡淘金", "河里": "河裡", "油面": "油麵", "泡面": "泡麵", "泰斗": "泰斗", "洗手不干": "洗手不幹", "洗发精": "洗髮精", "派团参加": "派團參加", "流荡": "流蕩", "浩荡": "浩蕩", "浪琴表": "浪琴錶", "浪荡": "浪蕩", "浮荡": "浮蕩", "海里": "海裡", "涂着": "涂著", "液晶表": "液晶錶", "凉面": "涼麵", "淡朱": "淡硃", "淫荡": "淫蕩", "测验卷": "測驗卷", "港制": "港製", "游荡": "游蕩", "凑合着": "湊合著", "湖里": "湖裡", "汤团": "湯糰", "汤面": "湯麵", "卤制": "滷製", "卤面": "滷麵", "满布": "滿佈", "漂荡": "漂蕩", "漏斗": "漏斗", "演奏台": "演奏臺", "潭里": "潭裡", "激荡": "激蕩", "浓郁": "濃郁", "浓发": "濃髮", "湿地松": "濕地鬆", "蒙蒙": "濛濛", "蒙雾": "濛霧", "瀛台": "瀛臺", "弥漫": "瀰漫", "弥漫着": "瀰漫著", "火并": "火併", "灰蒙": "灰濛", "炒面": "炒麵", "炮制": "炮製", "炸药": "炸葯", "炸酱面": "炸醬麵", "为着": "為著", "乌干达": "烏干達", "乌苏里江": "烏蘇裡江", "乌发": "烏髮", "乌龙面": "烏龍麵", "烘制": "烘製", "烽火台": "烽火臺", "无干": "無干", "无精打采": "無精打采", "炼制": "煉製", "烟卷儿": "煙卷兒", "烟斗": "煙斗", "烟斗丝": "煙斗絲", "烟台": "煙臺", "照准": "照准", "熨斗": "熨斗", "灯台": "燈臺", "燎发": "燎髮", "烫发": "燙髮", "烫面": "燙麵", "烛台": "燭臺", "炉台": "爐臺", "爽荡": "爽蕩", "片言只语": "片言隻語", "牛肉面": "牛肉麵", "牛只": "牛隻", "特准": "特准", "特征": "特征", "特里": "特裡", "特制": "特製", "牵系": "牽繫", "狼借": "狼藉", "猛冲": "猛衝", "奖杯": "獎盃", "获准": "獲准", "率团参加": "率團參加", "王侯后": "王侯后", "王后": "王后", "班里": "班裡", "理发": "理髮", "瑶台": "瑤臺", "甚么": "甚麼", "甜面酱": "甜麵醬", "生力面": "生力麵", "生锈": "生鏽", "生发": "生髮", "田里": "田裡", "由馀": "由余", "男佣": "男佣", "男用表": "男用錶", "留发": "留髮", "畚斗": "畚斗", "当着": "當著", "疏松": "疏鬆", "疲困": "疲睏", "病症": "病癥", "症候": "癥候", "症状": "癥狀", "症结": "癥結", "登台": "登臺", "发布": "發佈", "发着": "發著", "发面": "發麵", "发霉": "發黴", "白卷": "白卷", "白干儿": "白干兒", "白发": "白髮", "白面": "白麵", "百里": "百裡", "百只": "百隻", "皇后": "皇后", "皇历": "皇曆", "皓发": "皓髮", "皮里阳秋": "皮裏陽秋", "皮里春秋": "皮裡春秋", "皮制": "皮製", "皱折": "皺摺", "盒里": "盒裡", "监制": "監製", "盘里": "盤裡", "盘回": "盤迴", "直接参与": "直接參与", "直冲": "直衝", "相克": "相剋", "相干": "相干", "相冲": "相衝", "看台": "看臺", "眼帘": "眼帘", "眼眶里": "眼眶裡", "眼里": "眼裡", "困乏": "睏乏", "睡着了": "睡著了", "了如": "瞭如", "了望": "瞭望", "了然": "瞭然", "了若指掌": "瞭若指掌", "了解": "瞭解", "蒙住": "矇住", "蒙昧无知": "矇昧無知", "蒙混": "矇混", "蒙蒙": "矇矇", "蒙眬": "矇矓", "蒙蔽": "矇蔽", "蒙骗": "矇騙", "短发": "短髮", "石英表": "石英錶", "研制": "研製", "砰当": "砰噹", "砲台": "砲臺", "朱唇皓齿": "硃唇皓齒", "朱批": "硃批", "朱砂": "硃砂", "朱笔": "硃筆", "朱红色": "硃紅色", "朱色": "硃色", "硬干": "硬幹", "砚台": "硯臺", "碑志": "碑誌", "磁制": "磁製", "磨制": "磨製", "示复": "示覆", "社里": "社裡", "神采": "神采", "御侮": "禦侮", "御寇": "禦寇", "御寒": "禦寒", "御敌": "禦敵", "秃发": "禿髮", "秀发": "秀髮", "私下里": "私下裡", "秋天里": "秋天裡", "秋裤": "秋褲", "秒表": "秒錶", "稀松": "稀鬆", "禀复": "稟覆", "稻谷": "稻穀", "稽征": "稽征", "谷仓": "穀倉", "谷场": "穀場", "谷子": "穀子", "谷壳": "穀殼", "谷物": "穀物", "谷皮": "穀皮", "谷神": "穀神", "谷粒": "穀粒", "谷舱": "穀艙", "谷苗": "穀苗", "谷草": "穀草", "谷贱伤农": "穀賤傷農", "谷道": "穀道", "谷雨": "穀雨", "谷类": "穀類", "积极参与": "積极參与", "积极参加": "積极參加", "空荡": "空蕩", "窗帘": "窗帘", "窗明几净": "窗明几淨", "窗台": "窗檯", "窗台": "窗臺", "窝里": "窩裡", "窝阔台": "窩闊臺", "穷追不舍": "窮追不捨", "笆斗": "笆斗", "笑里藏刀": "笑裡藏刀", "第一卷": "第一卷", "筋斗": "筋斗", "答卷": "答卷", "答复": "答複", "答复": "答覆", "筵几": "筵几", "箕斗": "箕斗", "签着": "簽著", "吁求": "籲求", "吁请": "籲請", "粗制": "粗製", "粗卤": "粗鹵", "精干": "精幹", "精明强干": "精明強幹", "精致": "精緻", "精制": "精製", "精辟": "精辟", "精采": "精采", "糊里糊涂": "糊裡糊塗", "团子": "糰子", "系着": "系著", "纪历": "紀曆", "红发": "紅髮", "红霉素": "紅黴素", "纡回": "紆迴", "纳采": "納采", "素食面": "素食麵", "素面": "素麵", "紫微斗数": "紫微斗數", "细致": "細緻", "组里": "組裡", "结发": "結髮", "绝对参照": "絕對參照", "丝来线去": "絲來線去", "丝布": "絲布", "丝板": "絲板", "丝瓜布": "絲瓜布", "丝绒布": "絲絨布", "丝线": "絲線", "丝织厂": "絲織廠", "丝虫": "絲蟲", "綑吊": "綑吊", "经卷": "經卷", "绿霉素": "綠黴素", "维系": "維繫", "绾发": "綰髮", "网里": "網裡", "紧绷": "緊繃", "紧绷着": "緊繃著", "紧追不舍": "緊追不捨", "编制": "編製", "编发": "編髮", "缓冲": "緩衝", "致密": "緻密", "萦回": "縈迴", "县里": "縣裡", "县志": "縣誌", "缝里": "縫裡", "缝制": "縫製", "纤夫": "縴夫", "繁复": "繁複", "绷住": "繃住", "绷子": "繃子", "绷带": "繃帶", "绷紧": "繃緊", "绷脸": "繃臉", "绷着": "繃著", "绷着脸": "繃著臉", "绷着脸儿": "繃著臉兒", "绷开": "繃開", "绘制": "繪製", "系上": "繫上", "系到": "繫到", "系囚": "繫囚", "系心": "繫心", "系念": "繫念", "系怀": "繫懷", "系数": "繫數", "系于": "繫於", "系系": "繫系", "系紧": "繫緊", "系绳": "繫繩", "系着": "繫著", "系辞": "繫辭", "缴卷": "繳卷", "累囚": "纍囚", "累累": "纍纍", "坛子": "罈子", "坛坛罐罐": "罈罈罐罐", "骂着": "罵著", "美制": "美製", "美发": "美髮", "翻来覆去": "翻來覆去", "翻云覆雨": "翻雲覆雨", "老么": "老么", "老板": "老闆", "考卷": "考卷", "耕获": "耕穫", "聊斋志异": "聊齋誌異", "联系": "聯係", "联系": "聯繫", "肉丝面": "肉絲麵", "肉羹面": "肉羹麵", "肉松": "肉鬆", "肢体": "肢体", "背向着": "背向著", "背地里": "背地裡", "胡里胡涂": "胡裡胡塗", "能干": "能幹", "脉冲": "脈衝", "脱发": "脫髮", "腊味": "腊味", "腊笔": "腊筆", "腊肉": "腊肉", "脑子里": "腦子裡", "腰里": "腰裡", "胶卷": "膠卷", "自制": "自製", "自觉自愿": "自覺自愿", "台上": "臺上", "台下": "臺下", "台中": "臺中", "台北": "臺北", "台南": "臺南", "台地": "臺地", "台塑": "臺塑", "台大": "臺大", "台币": "臺幣", "台座": "臺座", "台东": "臺東", "台柱": "臺柱", "台榭": "臺榭", "台汽": "臺汽", "台海": "臺海", "台澎金马": "臺澎金馬", "台湾": "臺灣", "台灯": "臺燈", "台球": "臺球", "台省": "臺省", "台端": "臺端", "台糖": "臺糖", "台肥": "臺肥", "台航": "臺航", "台视": "臺視", "台词": "臺詞", "台车": "臺車", "台铁": "臺鐵", "台阶": "臺階", "台电": "臺電", "台面": "臺面", "舂谷": "舂穀", "兴致": "興緻", "兴高采烈": "興高采烈", "旧历": "舊曆", "舒卷": "舒卷", "舞台": "舞臺", "航海历": "航海曆", "船只": "船隻", "舰只": "艦隻", "芬郁": "芬郁", "花卷": "花卷", "花盆里": "花盆裡", "花采": "花采", "苑里": "苑裡", "若干": "若干", "苦干": "苦幹", "苦里": "苦裏", "苦卤": "苦鹵", "范仲淹": "范仲淹", "范蠡": "范蠡", "范阳": "范陽", "茅台": "茅臺", "茶几": "茶几", "草丛里": "草叢裡", "庄里": "莊裡", "茎干": "莖幹", "莽荡": "莽蕩", "菌丝体": "菌絲体", "菌丝体": "菌絲體", "华里": "華裡", "华发": "華髮", "万卷": "萬卷", "万历": "萬曆", "万只": "萬隻", "落发": "落髮", "着儿": "著兒", "着书立说": "著書立說", "着色软体": "著色軟體", "着重指出": "著重指出", "着录": "著錄", "着录规则": "著錄規則", "蓄发": "蓄髮", "蓄须": "蓄鬚", "蓬发": "蓬髮", "蓬松": "蓬鬆", "莲台": "蓮臺", "荡来荡去": "蕩來蕩去", "荡女": "蕩女", "荡妇": "蕩婦", "荡寇": "蕩寇", "荡平": "蕩平", "荡涤": "蕩滌", "荡漾": "蕩漾", "荡然": "蕩然", "荡舟": "蕩舟", "荡船": "蕩船", "荡荡": "蕩蕩", "薑丝": "薑絲", "薙发": "薙髮", "借以": "藉以", "借口": "藉口", "借故": "藉故", "借机": "藉機", "借此": "藉此", "借由": "藉由", "借端": "藉端", "借着": "藉著", "借借": "藉藉", "借词": "藉詞", "借资": "藉資", "借酒浇愁": "藉酒澆愁", "藤制": "藤製", "蕴含着": "蘊含著", "蕴涵着": "蘊涵著", "蕴借": "蘊藉", "萝卜": "蘿蔔", "虎须": "虎鬚", "号志": "號誌", "蜂后": "蜂后", "蛮干": "蠻幹", "行事历": "行事曆", "胡同": "衚衕", "冲上": "衝上", "冲下": "衝下", "冲来": "衝來", "冲倒": "衝倒", "冲出": "衝出", "冲到": "衝到", "冲刺": "衝刺", "冲克": "衝剋", "冲力": "衝力", "冲劲": "衝勁", "冲动": "衝動", "冲去": "衝去", "冲口": "衝口", "冲垮": "衝垮", "冲堂": "衝堂", "冲压": "衝壓", "冲天": "衝天", "冲掉": "衝掉", "冲撞": "衝撞", "冲击": "衝擊", "冲散": "衝散", "冲决": "衝決", "冲浪": "衝浪", "冲激": "衝激", "冲破": "衝破", "冲程": "衝程", "冲突": "衝突", "冲线": "衝線", "冲着": "衝著", "冲冲": "衝衝", "冲要": "衝要", "冲起": "衝起", "冲进": "衝進", "冲过": "衝過", "冲锋": "衝鋒", "表里": "表裡", "袖里": "袖裡", "被里": "被裡", "被复": "被複", "被复": "被覆", "被复着": "被覆著", "被发": "被髮", "裁并": "裁併", "裁制": "裁製", "里面": "裏面", "里人": "裡人", "里加": "裡加", "里外": "裡外", "里子": "裡子", "里屋": "裡屋", "里层": "裡層", "里布": "裡布", "里带": "裡帶", "里弦": "裡弦", "里应外合": "裡應外合", "里拉": "裡拉", "里斯": "裡斯", "里海": "裡海", "里脊": "裡脊", "里衣": "裡衣", "里里": "裡裡", "里通外国": "裡通外國", "里通外敌": "裡通外敵", "里边": "裡邊", "里间": "裡間", "里面": "裡面", "里头": "裡頭", "制件": "製件", "制作": "製作", "制做": "製做", "制备": "製備", "制冰": "製冰", "制冷": "製冷", "制剂": "製劑", "制品": "製品", "制图": "製圖", "制成": "製成", "制法": "製法", "制为": "製為", "制片": "製片", "制版": "製版", "制程": "製程", "制糖": "製糖", "制纸": "製紙", "制药": "製藥", "制表": "製表", "制裁": "製裁", "制造": "製造", "制革": "製革", "制鞋": "製鞋", "制盐": "製鹽", "复仞年如": "複仞年如", "复以百万": "複以百萬", "复位": "複位", "复信": "複信", "复分数": "複分數", "复列": "複列", "复利": "複利", "复印": "複印", "复原": "複原", "复句": "複句", "复合": "複合", "复名": "複名", "复员": "複員", "复壁": "複壁", "复壮": "複壯", "复姓": "複姓", "复字键": "複字鍵", "复审": "複審", "复写": "複寫", "复式": "複式", "复复": "複復", "复数": "複數", "复本": "複本", "复查": "複查", "复核": "複核", "复检": "複檢", "复次": "複次", "复比": "複比", "复决": "複決", "复活": "複活", "复测": "複測", "复亩珍": "複畝珍", "复发": "複發", "复目": "複目", "复眼": "複眼", "复种": "複種", "复线": "複線", "复习": "複習", "复兴社": "複興社", "复旧": "複舊", "复色": "複色", "复叶": "複葉", "复盖": "複蓋", "复苏": "複蘇", "复制": "複製", "复诊": "複診", "复词": "複詞", "复试": "複試", "复课": "複課", "复议": "複議", "复变函数": "複變函數", "复赛": "複賽", "复述": "複述", "复选": "複選", "复钱": "複錢", "复杂": "複雜", "复电": "複電", "复音": "複音", "复韵": "複韻", "衬里": "襯裡", "西岳": "西嶽", "西征": "西征", "西历": "西曆", "要冲": "要衝", "要么": "要麼", "复上": "覆上", "复亡": "覆亡", "复住": "覆住", "复信": "覆信", "复命": "覆命", "复在": "覆在", "复审": "覆審", "复成": "覆成", "复败": "覆敗", "复文": "覆文", "复校": "覆校", "复核": "覆核", "覆水难收": "覆水難收", "复没": "覆沒", "复灭": "覆滅", "复盆": "覆盆", "复舟": "覆舟", "复着": "覆著", "复盖": "覆蓋", "复盖着": "覆蓋著", "复试": "覆試", "复议": "覆議", "复车": "覆車", "复载": "覆載", "复辙": "覆轍", "复电": "覆電", "见复": "見覆", "亲征": "親征", "观众台": "觀眾臺", "观台": "觀臺", "观象台": "觀象臺", "角落里": "角落裡", "觔斗": "觔斗", "触须": "觸鬚", "订制": "訂製", "诉说着": "訴說著", "词汇": "詞彙", "试卷": "試卷", "诗卷": "詩卷", "话里有话": "話裡有話", "志哀": "誌哀", "志喜": "誌喜", "志庆": "誌慶", "语云": "語云", "语汇": "語彙", "诬蔑": "誣衊", "诵经台": "誦經臺", "说着": "說著", "课征": "課征", "调制": "調製", "调频台": "調頻臺", "请参阅": "請參閱", "讲台": "講臺", "谢绝参观": "謝絕參觀", "护发": "護髮", "雠隙": "讎隙", "豆腐干": "豆腐干", "竖着": "豎著", "丰富多采": "豐富多采", "丰滨": "豐濱", "丰滨乡": "豐濱鄉", "丰采": "豐采", "象征着": "象徵著", "贵干": "貴幹", "贾后": "賈后", "赈饥": "賑饑", "贤后": "賢后", "质朴": "質朴", "赌台": "賭檯", "购并": "購併", "赤松": "赤鬆", "起吊": "起吊", "起复": "起複", "赶制": "趕製", "跌荡": "跌蕩", "跟斗": "跟斗", "跳荡": "跳蕩", "跳表": "跳錶", "踬仆": "躓仆", "躯干": "軀幹", "车库里": "車庫裡", "车站里": "車站裡", "车里": "車裡", "轻松": "輕鬆", "轮回": "輪迴", "转台": "轉檯", "辛丑": "辛丑", "辟邪": "辟邪", "办伙": "辦伙", "办公台": "辦公檯", "辞汇": "辭彙", "农历": "農曆", "迂回": "迂迴", "近日里": "近日裡", "迥然回异": "迥然迴異", "回光返照": "迴光返照", "回向": "迴向", "回圈": "迴圈", "回廊": "迴廊", "回形夹": "迴形夾", "回文": "迴文", "回旋": "迴旋", "回流": "迴流", "回环": "迴環", "回荡": "迴盪", "回纹针": "迴紋針", "回绕": "迴繞", "回肠": "迴腸", "回荡": "迴蕩", "回诵": "迴誦", "回路": "迴路", "回转": "迴轉", "回递性": "迴遞性", "回避": "迴避", "回响": "迴響", "回风": "迴風", "回首": "迴首", "迷蒙": "迷濛", "退伙": "退伙", "这么着": "這么著", "这里": "這裏", "这里": "這裡", "这只": "這隻", "这么": "這麼", "这么着": "這麼著", "通心面": "通心麵", "速食面": "速食麵", "连系": "連繫", "连台好戏": "連臺好戲", "游荡": "遊蕩", "遍布": "遍佈", "递回": "遞迴", "远征": "遠征", "适才": "適纔", "遮复": "遮覆", "还冲": "還衝", "邋里邋遢": "邋裡邋遢", "那里": "那裡", "那只": "那隻", "那么": "那麼", "那么着": "那麼著", "邪辟": "邪辟", "郁烈": "郁烈", "郁穆": "郁穆", "郁郁": "郁郁", "郁闭": "郁閉", "郁馥": "郁馥", "乡愿": "鄉愿", "乡里": "鄉裡", "邻里": "鄰裡", "配合着": "配合著", "配制": "配製", "酒杯": "酒盃", "酒坛": "酒罈", "酥松": "酥鬆", "醋坛": "醋罈", "酝借": "醞藉", "酝酿着": "醞釀著", "医药": "醫葯", "醲郁": "醲郁", "酿制": "釀製", "采地": "采地", "采女": "采女", "采声": "采聲", "采色": "采色", "采邑": "采邑", "里程表": "里程錶", "重折": "重摺", "重复": "重複", "重复": "重覆", "重锤": "重鎚", "野台戏": "野臺戲", "金斗": "金斗", "金表": "金錶", "金发": "金髮", "金霉素": "金黴素", "钉锤": "釘鎚", "银朱": "銀硃", "银发": "銀髮", "铜制": "銅製", "铝制": "鋁製", "钢制": "鋼製", "录着": "錄著", "录制": "錄製", "表带": "錶帶", "表店": "錶店", "表厂": "錶廠", "表壳": "錶殼", "表链": "錶鏈", "表面": "錶面", "锅台": "鍋臺", "锻鍊出": "鍛鍊出", "锻鍊身体": "鍛鍊身体", "锲而不舍": "鍥而不捨", "锤儿": "鎚兒", "锤子": "鎚子", "锤头": "鎚頭", "链霉素": "鏈黴素", "镜台": "鏡臺", "锈病": "鏽病", "锈菌": "鏽菌", "锈蚀": "鏽蝕", "钟表": "鐘錶", "铁锤": "鐵鎚", "铁锈": "鐵鏽", "长征": "長征", "长发": "長髮", "长须鲸": "長鬚鯨", "门帘": "門帘", "门斗": "門斗", "门里": "門裡", "开伙": "開伙", "开卷": "開卷", "开诚布公": "開誠佈公", "开采": "開采", "閒情逸致": "閒情逸緻", "閒荡": "閒蕩", "间不容发": "間不容髮", "闵采尔": "閔采爾", "阅卷": "閱卷", "阑干": "闌干", "关系": "關係", "关系着": "關係著", "防御": "防禦", "防锈": "防鏽", "防台": "防颱", "阿斗": "阿斗", "阿里": "阿裡", "除旧布新": "除舊佈新", "阴干": "陰干", "阴历": "陰曆", "阴郁": "陰郁", "陆征祥": "陸征祥", "阳春面": "陽春麵", "阳历": "陽曆", "阳台": "陽臺", "只字": "隻字", "只影": "隻影", "只手遮天": "隻手遮天", "只眼": "隻眼", "只言片语": "隻言片語", "只身": "隻身", "雅致": "雅緻", "雇佣": "雇佣", "双折": "雙摺", "杂志": "雜誌", "鸡丝": "雞絲", "鸡丝面": "雞絲麵", "鸡腿面": "雞腿麵", "鸡只": "雞隻", "难舍": "難捨", "雪里": "雪裡", "云须": "雲鬚", "电子表": "電子錶", "电台": "電臺", "电冲": "電衝", "电复": "電覆", "电视台": "電視臺", "电表": "電錶", "震荡": "震蕩", "雾里": "霧裡", "露台": "露臺", "灵台": "靈臺", "青瓦台": "青瓦臺", "青霉": "青黴", "面朝着": "面朝著", "面临着": "面臨著", "鞋里": "鞋裡", "鞣制": "鞣製", "秋千": "鞦韆", "鞭辟入里": "鞭辟入裡", "韩国制": "韓國製", "韩制": "韓製", "预制": "預製", "颁布": "頒佈", "头里": "頭裡", "头发": "頭髮", "颊须": "頰鬚", "颠仆": "顛仆", "颠复": "顛複", "颠复": "顛覆", "显着标志": "顯著標志", "风土志": "風土誌", "风斗": "風斗", "风物志": "風物誌", "风里": "風裡", "风采": "風采", "台风": "颱風", "刮了": "颳了", "刮倒": "颳倒", "刮去": "颳去", "刮得": "颳得", "刮着": "颳著", "刮走": "颳走", "刮起": "颳起", "刮风": "颳風", "飘荡": "飄蕩", "饭团": "飯糰", "饼干": "餅干", "馄饨面": "餛飩麵", "饥不择食": "饑不擇食", "饥寒": "饑寒", "饥民": "饑民", "饥渴": "饑渴", "饥溺": "饑溺", "饥荒": "饑荒", "饥饱": "饑飽", "饥饿": "饑餓", "饥馑": "饑饉", "首当其冲": "首當其衝", "香郁": "香郁", "馥郁": "馥郁", "马里": "馬裡", "马表": "馬錶", "骀荡": "駘蕩", "腾冲": "騰衝", "骨子里": "骨子裡", "骨干": "骨幹", "骨灰坛": "骨灰罈", "肮脏": "骯髒", "脏乱": "髒亂", "脏兮兮": "髒兮兮", "脏字": "髒字", "脏得": "髒得", "脏东西": "髒東西", "脏水": "髒水", "脏的": "髒的", "脏话": "髒話", "脏钱": "髒錢", "高干": "高幹", "高台": "高臺", "髭须": "髭鬚", "发型": "髮型", "发夹": "髮夾", "发妻": "髮妻", "发姐": "髮姐", "发带": "髮帶", "发廊": "髮廊", "发式": "髮式", "发指": "髮指", "发捲": "髮捲", "发根": "髮根", "发毛": "髮毛", "发油": "髮油", "发状": "髮狀", "发短心长": "髮短心長", "发端": "髮端", "发结": "髮結", "发丝": "髮絲", "发网": "髮網", "发肤": "髮膚", "发胶": "髮膠", "发菜": "髮菜", "发蜡": "髮蠟", "发辫": "髮辮", "发针": "髮針", "发长": "髮長", "发际": "髮際", "发霜": "髮霜", "发髻": "髮髻", "发鬓": "髮鬢", "鬅松": "鬅鬆", "松了": "鬆了", "松些": "鬆些", "松劲": "鬆勁", "松动": "鬆動", "松口": "鬆口", "松土": "鬆土", "松弛": "鬆弛", "松快": "鬆快", "松懈": "鬆懈", "松手": "鬆手", "松散": "鬆散", "松林": "鬆林", "松柔": "鬆柔", "松毛虫": "鬆毛蟲", "松浮": "鬆浮", "松涛": "鬆濤", "松科": "鬆科", "松节油": "鬆節油", "松绑": "鬆綁", "松紧": "鬆緊", "松缓": "鬆緩", "松脆": "鬆脆", "松脱": "鬆脫", "松起": "鬆起", "松软": "鬆軟", "松通": "鬆通", "松开": "鬆開", "松饼": "鬆餅", "松松": "鬆鬆", "鬈发": "鬈髮", "胡子": "鬍子", "胡梢": "鬍梢", "胡渣": "鬍渣", "胡髭": "鬍髭", "胡须": "鬍鬚", "须根": "鬚根", "须毛": "鬚毛", "须生": "鬚生", "须眉": "鬚眉", "须发": "鬚髮", "须须": "鬚鬚", "鬓发": "鬢髮", "斗着": "鬥著", "闹着玩儿": "鬧著玩儿", "闹着玩儿": "鬧著玩兒", "郁郁": "鬱郁", "鱼松": "魚鬆", "鲸须": "鯨鬚", "鲇鱼": "鯰魚", "鹤发": "鶴髮", "卤化": "鹵化", "卤味": "鹵味", "卤族": "鹵族", "卤水": "鹵水", "卤汁": "鹵汁", "卤簿": "鹵簿", "卤素": "鹵素", "卤莽": "鹵莽", "卤钝": "鹵鈍", "咸味": "鹹味", "咸土": "鹹土", "咸度": "鹹度", "咸得": "鹹得", "咸水": "鹹水", "咸海": "鹹海", "咸淡": "鹹淡", "咸湖": "鹹湖", "咸汤": "鹹湯", "咸的": "鹹的", "咸肉": "鹹肉", "咸菜": "鹹菜", "咸蛋": "鹹蛋", "咸猪肉": "鹹豬肉", "咸类": "鹹類", "咸鱼": "鹹魚", "咸鸭蛋": "鹹鴨蛋", "咸卤": "鹹鹵", "咸咸": "鹹鹹", "盐卤": "鹽鹵", "面价": "麵價", "面包": "麵包", "面团": "麵團", "面店": "麵店", "面厂": "麵廠", "面杖": "麵杖", "面条": "麵條", "面灰": "麵灰", "面皮": "麵皮", "面筋": "麵筋", "面粉": "麵粉", "面糊": "麵糊", "面线": "麵線", "面茶": "麵茶", "面食": "麵食", "面饺": "麵餃", "面饼": "麵餅", "麻酱面": "麻醬麵", "黄历": "黃曆", "黄发垂髫": "黃髮垂髫", "黑发": "黑髮", "黑松": "黑鬆", "霉毒": "黴毒", "霉菌": "黴菌", "鼓里": "鼓裡", "冬冬": "鼕鼕", "龙卷": "龍卷", "龙须": "龍鬚", } zh2Hans = { '顯著': '显著', '土著': '土著', '印表機': '打印机', '說明檔案': '帮助文件', "瀋": "沈", "畫": "划", "鍾": "钟", "靦": "腼", "餘": "余", "鯰": "鲇", "鹼": "碱", "㠏": "㟆", "𡞵": "㛟", "万": "万", "与": "与", "丑": "丑", "丟": "丢", "並": "并", "丰": "丰", "么": "么", "乾": "干", "乾坤": "乾坤", "乾隆": "乾隆", "亂": "乱", "云": "云", "亙": "亘", "亞": "亚", "仆": "仆", "价": "价", "伙": "伙", "佇": "伫", "佈": "布", "体": "体", "余": "余", "佣": "佣", "併": "并", "來": "来", "侖": "仑", "侶": "侣", "俁": "俣", "係": "系", "俔": "伣", "俠": "侠", "倀": "伥", "倆": "俩", "倈": "俫", "倉": "仓", "個": "个", "們": "们", "倫": "伦", "偉": "伟", "側": "侧", "偵": "侦", "偽": "伪", "傑": "杰", "傖": "伧", "傘": "伞", "備": "备", "傢": "家", "傭": "佣", "傯": "偬", "傳": "传", "傴": "伛", "債": "债", "傷": "伤", "傾": "倾", "僂": "偻", "僅": "仅", "僉": "佥", "僑": "侨", "僕": "仆", "僞": "伪", "僥": "侥", "僨": "偾", "價": "价", "儀": "仪", "儂": "侬", "億": "亿", "儈": "侩", "儉": "俭", "儐": "傧", "儔": "俦", "儕": "侪", "儘": "尽", "償": "偿", "優": "优", "儲": "储", "儷": "俪", "儸": "㑩", "儺": "傩", "儻": "傥", "儼": "俨", "儿": "儿", "兇": "凶", "兌": "兑", "兒": "儿", "兗": "兖", "党": "党", "內": "内", "兩": "两", "冊": "册", "冪": "幂", "准": "准", "凈": "净", "凍": "冻", "凜": "凛", "几": "几", "凱": "凯", "划": "划", "別": "别", "刪": "删", "剄": "刭", "則": "则", "剋": "克", "剎": "刹", "剗": "刬", "剛": "刚", "剝": "剥", "剮": "剐", "剴": "剀", "創": "创", "劃": "划", "劇": "剧", "劉": "刘", "劊": "刽", "劌": "刿", "劍": "剑", "劏": "㓥", "劑": "剂", "劚": "㔉", "勁": "劲", "動": "动", "務": "务", "勛": "勋", "勝": "胜", "勞": "劳", "勢": "势", "勩": "勚", "勱": "劢", "勵": "励", "勸": "劝", "勻": "匀", "匭": "匦", "匯": "汇", "匱": "匮", "區": "区", "協": "协", "卷": "卷", "卻": "却", "厂": "厂", "厙": "厍", "厠": "厕", "厭": "厌", "厲": "厉", "厴": "厣", "參": "参", "叄": "叁", "叢": "丛", "台": "台", "叶": "叶", "吊": "吊", "后": "后", "吳": "吴", "吶": "呐", "呂": "吕", "獃": "呆", "咼": "呙", "員": "员", "唄": "呗", "唚": "吣", "問": "问", "啓": "启", "啞": "哑", "啟": "启", "啢": "唡", "喎": "㖞", "喚": "唤", "喪": "丧", "喬": "乔", "單": "单", "喲": "哟", "嗆": "呛", "嗇": "啬", "嗊": "唝", "嗎": "吗", "嗚": "呜", "嗩": "唢", "嗶": "哔", "嘆": "叹", "嘍": "喽", "嘔": "呕", "嘖": "啧", "嘗": "尝", "嘜": "唛", "嘩": "哗", "嘮": "唠", "嘯": "啸", "嘰": "叽", "嘵": "哓", "嘸": "呒", "嘽": "啴", "噁": "恶", "噓": "嘘", "噚": "㖊", "噝": "咝", "噠": "哒", "噥": "哝", "噦": "哕", "噯": "嗳", "噲": "哙", "噴": "喷", "噸": "吨", "噹": "当", "嚀": "咛", "嚇": "吓", "嚌": "哜", "嚕": "噜", "嚙": "啮", "嚥": "咽", "嚦": "呖", "嚨": "咙", "嚮": "向", "嚲": "亸", "嚳": "喾", "嚴": "严", "嚶": "嘤", "囀": "啭", "囁": "嗫", "囂": "嚣", "囅": "冁", "囈": "呓", "囌": "苏", "囑": "嘱", "囪": "囱", "圇": "囵", "國": "国", "圍": "围", "園": "园", "圓": "圆", "圖": "图", "團": "团", "坏": "坏", "垵": "埯", "埡": "垭", "埰": "采", "執": "执", "堅": "坚", "堊": "垩", "堖": "垴", "堝": "埚", "堯": "尧", "報": "报", "場": "场", "塊": "块", "塋": "茔", "塏": "垲", "塒": "埘", "塗": "涂", "塚": "冢", "塢": "坞", "塤": "埙", "塵": "尘", "塹": "堑", "墊": "垫", "墜": "坠", "墮": "堕", "墳": "坟", "墻": "墙", "墾": "垦", "壇": "坛", "壈": "𡒄", "壋": "垱", "壓": "压", "壘": "垒", "壙": "圹", "壚": "垆", "壞": "坏", "壟": "垄", "壠": "垅", "壢": "坜", "壩": "坝", "壯": "壮", "壺": "壶", "壼": "壸", "壽": "寿", "夠": "够", "夢": "梦", "夾": "夹", "奐": "奂", "奧": "奥", "奩": "奁", "奪": "夺", "奬": "奖", "奮": "奋", "奼": "姹", "妝": "妆", "姍": "姗", "姜": "姜", "姦": "奸", "娛": "娱", "婁": "娄", "婦": "妇", "婭": "娅", "媧": "娲", "媯": "妫", "媼": "媪", "媽": "妈", "嫗": "妪", "嫵": "妩", "嫻": "娴", "嫿": "婳", "嬀": "妫", "嬈": "娆", "嬋": "婵", "嬌": "娇", "嬙": "嫱", "嬡": "嫒", "嬤": "嬷", "嬪": "嫔", "嬰": "婴", "嬸": "婶", "孌": "娈", "孫": "孙", "學": "学", "孿": "孪", "宁": "宁", "宮": "宫", "寢": "寝", "實": "实", "寧": "宁", "審": "审", "寫": "写", "寬": "宽", "寵": "宠", "寶": "宝", "將": "将", "專": "专", "尋": "寻", "對": "对", "導": "导", "尷": "尴", "屆": "届", "屍": "尸", "屓": "屃", "屜": "屉", "屢": "屡", "層": "层", "屨": "屦", "屬": "属", "岡": "冈", "峴": "岘", "島": "岛", "峽": "峡", "崍": "崃", "崗": "岗", "崢": "峥", "崬": "岽", "嵐": "岚", "嶁": "嵝", "嶄": "崭", "嶇": "岖", "嶔": "嵚", "嶗": "崂", "嶠": "峤", "嶢": "峣", "嶧": "峄", "嶮": "崄", "嶴": "岙", "嶸": "嵘", "嶺": "岭", "嶼": "屿", "嶽": "岳", "巋": "岿", "巒": "峦", "巔": "巅", "巰": "巯", "帘": "帘", "帥": "帅", "師": "师", "帳": "帐", "帶": "带", "幀": "帧", "幃": "帏", "幗": "帼", "幘": "帻", "幟": "帜", "幣": "币", "幫": "帮", "幬": "帱", "幹": "干", "幺": "么", "幾": "几", "广": "广", "庫": "库", "廁": "厕", "廂": "厢", "廄": "厩", "廈": "厦", "廚": "厨", "廝": "厮", "廟": "庙", "廠": "厂", "廡": "庑", "廢": "废", "廣": "广", "廩": "廪", "廬": "庐", "廳": "厅", "弒": "弑", "弳": "弪", "張": "张", "強": "强", "彆": "别", "彈": "弹", "彌": "弥", "彎": "弯", "彙": "汇", "彞": "彝", "彥": "彦", "征": "征", "後": "后", "徑": "径", "從": "从", "徠": "徕", "復": "复", "徵": "征", "徹": "彻", "志": "志", "恆": "恒", "恥": "耻", "悅": "悦", "悞": "悮", "悵": "怅", "悶": "闷", "惡": "恶", "惱": "恼", "惲": "恽", "惻": "恻", "愛": "爱", "愜": "惬", "愨": "悫", "愴": "怆", "愷": "恺", "愾": "忾", "愿": "愿", "慄": "栗", "態": "态", "慍": "愠", "慘": "惨", "慚": "惭", "慟": "恸", "慣": "惯", "慤": "悫", "慪": "怄", "慫": "怂", "慮": "虑", "慳": "悭", "慶": "庆", "憂": "忧", "憊": "惫", "憐": "怜", "憑": "凭", "憒": "愦", "憚": "惮", "憤": "愤", "憫": "悯", "憮": "怃", "憲": "宪", "憶": "忆", "懇": "恳", "應": "应", "懌": "怿", "懍": "懔", "懞": "蒙", "懟": "怼", "懣": "懑", "懨": "恹", "懲": "惩", "懶": "懒", "懷": "怀", "懸": "悬", "懺": "忏", "懼": "惧", "懾": "慑", "戀": "恋", "戇": "戆", "戔": "戋", "戧": "戗", "戩": "戬", "戰": "战", "戱": "戯", "戲": "戏", "戶": "户", "担": "担", "拋": "抛", "挩": "捝", "挾": "挟", "捨": "舍", "捫": "扪", "据": "据", "掃": "扫", "掄": "抡", "掗": "挜", "掙": "挣", "掛": "挂", "採": "采", "揀": "拣", "揚": "扬", "換": "换", "揮": "挥", "損": "损", "搖": "摇", "搗": "捣", "搵": "揾", "搶": "抢", "摑": "掴", "摜": "掼", "摟": "搂", "摯": "挚", "摳": "抠", "摶": "抟", "摺": "折", "摻": "掺", "撈": "捞", "撏": "挦", "撐": "撑", "撓": "挠", "撝": "㧑", "撟": "挢", "撣": "掸", "撥": "拨", "撫": "抚", "撲": "扑", "撳": "揿", "撻": "挞", "撾": "挝", "撿": "捡", "擁": "拥", "擄": "掳", "擇": "择", "擊": "击", "擋": "挡", "擓": "㧟", "擔": "担", "據": "据", "擠": "挤", "擬": "拟", "擯": "摈", "擰": "拧", "擱": "搁", "擲": "掷", "擴": "扩", "擷": "撷", "擺": "摆", "擻": "擞", "擼": "撸", "擾": "扰", "攄": "摅", "攆": "撵", "攏": "拢", "攔": "拦", "攖": "撄", "攙": "搀", "攛": "撺", "攜": "携", "攝": "摄", "攢": "攒", "攣": "挛", "攤": "摊", "攪": "搅", "攬": "揽", "敗": "败", "敘": "叙", "敵": "敌", "數": "数", "斂": "敛", "斃": "毙", "斕": "斓", "斗": "斗", "斬": "斩", "斷": "断", "於": "于", "時": "时", "晉": "晋", "晝": "昼", "暈": "晕", "暉": "晖", "暘": "旸", "暢": "畅", "暫": "暂", "曄": "晔", "曆": "历", "曇": "昙", "曉": "晓", "曏": "向", "曖": "暧", "曠": "旷", "曨": "昽", "曬": "晒", "書": "书", "會": "会", "朧": "胧", "朮": "术", "术": "术", "朴": "朴", "東": "东", "杴": "锨", "极": "极", "柜": "柜", "柵": "栅", "桿": "杆", "梔": "栀", "梘": "枧", "條": "条", "梟": "枭", "梲": "棁", "棄": "弃", "棖": "枨", "棗": "枣", "棟": "栋", "棧": "栈", "棲": "栖", "棶": "梾", "椏": "桠", "楊": "杨", "楓": "枫", "楨": "桢", "業": "业", "極": "极", "榪": "杩", "榮": "荣", "榲": "榅", "榿": "桤", "構": "构", "槍": "枪", "槤": "梿", "槧": "椠", "槨": "椁", "槳": "桨", "樁": "桩", "樂": "乐", "樅": "枞", "樓": "楼", "標": "标", "樞": "枢", "樣": "样", "樸": "朴", "樹": "树", "樺": "桦", "橈": "桡", "橋": "桥", "機": "机", "橢": "椭", "橫": "横", "檁": "檩", "檉": "柽", "檔": "档", "檜": "桧", "檟": "槚", "檢": "检", "檣": "樯", "檮": "梼", "檯": "台", "檳": "槟", "檸": "柠", "檻": "槛", "櫃": "柜", "櫓": "橹", "櫚": "榈", "櫛": "栉", "櫝": "椟", "櫞": "橼", "櫟": "栎", "櫥": "橱", "櫧": "槠", "櫨": "栌", "櫪": "枥", "櫫": "橥", "櫬": "榇", "櫱": "蘖", "櫳": "栊", "櫸": "榉", "櫻": "樱", "欄": "栏", "權": "权", "欏": "椤", "欒": "栾", "欖": "榄", "欞": "棂", "欽": "钦", "歐": "欧", "歟": "欤", "歡": "欢", "歲": "岁", "歷": "历", "歸": "归", "歿": "殁", "殘": "残", "殞": "殒", "殤": "殇", "殨": "㱮", "殫": "殚", "殮": "殓", "殯": "殡", "殰": "㱩", "殲": "歼", "殺": "杀", "殻": "壳", "殼": "壳", "毀": "毁", "毆": "殴", "毿": "毵", "氂": "牦", "氈": "毡", "氌": "氇", "氣": "气", "氫": "氢", "氬": "氩", "氳": "氲", "汙": "污", "決": "决", "沒": "没", "沖": "冲", "況": "况", "洶": "汹", "浹": "浃", "涂": "涂", "涇": "泾", "涼": "凉", "淀": "淀", "淒": "凄", "淚": "泪", "淥": "渌", "淨": "净", "淩": "凌", "淪": "沦", "淵": "渊", "淶": "涞", "淺": "浅", "渙": "涣", "減": "减", "渦": "涡", "測": "测", "渾": "浑", "湊": "凑", "湞": "浈", "湯": "汤", "溈": "沩", "準": "准", "溝": "沟", "溫": "温", "滄": "沧", "滅": "灭", "滌": "涤", "滎": "荥", "滬": "沪", "滯": "滞", "滲": "渗", "滷": "卤", "滸": "浒", "滻": "浐", "滾": "滚", "滿": "满", "漁": "渔", "漚": "沤", "漢": "汉", "漣": "涟", "漬": "渍", "漲": "涨", "漵": "溆", "漸": "渐", "漿": "浆", "潁": "颍", "潑": "泼", "潔": "洁", "潙": "沩", "潛": "潜", "潤": "润", "潯": "浔", "潰": "溃", "潷": "滗", "潿": "涠", "澀": "涩", "澆": "浇", "澇": "涝", "澐": "沄", "澗": "涧", "澠": "渑", "澤": "泽", "澦": "滪", "澩": "泶", "澮": "浍", "澱": "淀", "濁": "浊", "濃": "浓", "濕": "湿", "濘": "泞", "濛": "蒙", "濟": "济", "濤": "涛", "濫": "滥", "濰": "潍", "濱": "滨", "濺": "溅", "濼": "泺", "濾": "滤", "瀅": "滢", "瀆": "渎", "瀇": "㲿", "瀉": "泻", "瀋": "沈", "瀏": "浏", "瀕": "濒", "瀘": "泸", "瀝": "沥", "瀟": "潇", "瀠": "潆", "瀦": "潴", "瀧": "泷", "瀨": "濑", "瀰": "弥", "瀲": "潋", "瀾": "澜", "灃": "沣", "灄": "滠", "灑": "洒", "灕": "漓", "灘": "滩", "灝": "灏", "灠": "漤", "灣": "湾", "灤": "滦", "灧": "滟", "災": "灾", "為": "为", "烏": "乌", "烴": "烃", "無": "无", "煉": "炼", "煒": "炜", "煙": "烟", "煢": "茕", "煥": "焕", "煩": "烦", "煬": "炀", "煱": "㶽", "熅": "煴", "熒": "荧", "熗": "炝", "熱": "热", "熲": "颎", "熾": "炽", "燁": "烨", "燈": "灯", "燉": "炖", "燒": "烧", "燙": "烫", "燜": "焖", "營": "营", "燦": "灿", "燭": "烛", "燴": "烩", "燶": "㶶", "燼": "烬", "燾": "焘", "爍": "烁", "爐": "炉", "爛": "烂", "爭": "争", "爲": "为", "爺": "爷", "爾": "尔", "牆": "墙", "牘": "牍", "牽": "牵", "犖": "荦", "犢": "犊", "犧": "牺", "狀": "状", "狹": "狭", "狽": "狈", "猙": "狰", "猶": "犹", "猻": "狲", "獁": "犸", "獄": "狱", "獅": "狮", "獎": "奖", "獨": "独", "獪": "狯", "獫": "猃", "獮": "狝", "獰": "狞", "獱": "㺍", "獲": "获", "獵": "猎", "獷": "犷", "獸": "兽", "獺": "獭", "獻": "献", "獼": "猕", "玀": "猡", "現": "现", "琺": "珐", "琿": "珲", "瑋": "玮", "瑒": "玚", "瑣": "琐", "瑤": "瑶", "瑩": "莹", "瑪": "玛", "瑲": "玱", "璉": "琏", "璣": "玑", "璦": "瑷", "璫": "珰", "環": "环", "璽": "玺", "瓊": "琼", "瓏": "珑", "瓔": "璎", "瓚": "瓒", "甌": "瓯", "產": "产", "産": "产", "畝": "亩", "畢": "毕", "異": "异", "畵": "画", "當": "当", "疇": "畴", "疊": "叠", "痙": "痉", "痾": "疴", "瘂": "痖", "瘋": "疯", "瘍": "疡", "瘓": "痪", "瘞": "瘗", "瘡": "疮", "瘧": "疟", "瘮": "瘆", "瘲": "疭", "瘺": "瘘", "瘻": "瘘", "療": "疗", "癆": "痨", "癇": "痫", "癉": "瘅", "癘": "疠", "癟": "瘪", "癢": "痒", "癤": "疖", "癥": "症", "癧": "疬", "癩": "癞", "癬": "癣", "癭": "瘿", "癮": "瘾", "癰": "痈", "癱": "瘫", "癲": "癫", "發": "发", "皚": "皑", "皰": "疱", "皸": "皲", "皺": "皱", "盃": "杯", "盜": "盗", "盞": "盏", "盡": "尽", "監": "监", "盤": "盘", "盧": "卢", "盪": "荡", "眥": "眦", "眾": "众", "睏": "困", "睜": "睁", "睞": "睐", "瞘": "眍", "瞜": "䁖", "瞞": "瞒", "瞭": "了", "瞶": "瞆", "瞼": "睑", "矇": "蒙", "矓": "眬", "矚": "瞩", "矯": "矫", "硃": "朱", "硜": "硁", "硤": "硖", "硨": "砗", "确": "确", "硯": "砚", "碩": "硕", "碭": "砀", "碸": "砜", "確": "确", "碼": "码", "磑": "硙", "磚": "砖", "磣": "碜", "磧": "碛", "磯": "矶", "磽": "硗", "礆": "硷", "礎": "础", "礙": "碍", "礦": "矿", "礪": "砺", "礫": "砾", "礬": "矾", "礱": "砻", "祿": "禄", "禍": "祸", "禎": "祯", "禕": "祎", "禡": "祃", "禦": "御", "禪": "禅", "禮": "礼", "禰": "祢", "禱": "祷", "禿": "秃", "秈": "籼", "种": "种", "稅": "税", "稈": "秆", "稏": "䅉", "稟": "禀", "種": "种", "稱": "称", "穀": "谷", "穌": "稣", "積": "积", "穎": "颖", "穠": "秾", "穡": "穑", "穢": "秽", "穩": "稳", "穫": "获", "穭": "稆", "窩": "窝", "窪": "洼", "窮": "穷", "窯": "窑", "窵": "窎", "窶": "窭", "窺": "窥", "竄": "窜", "竅": "窍", "竇": "窦", "竈": "灶", "竊": "窃", "竪": "竖", "競": "竞", "筆": "笔", "筍": "笋", "筑": "筑", "筧": "笕", "筴": "䇲", "箋": "笺", "箏": "筝", "節": "节", "範": "范", "築": "筑", "篋": "箧", "篔": "筼", "篤": "笃", "篩": "筛", "篳": "筚", "簀": "箦", "簍": "篓", "簞": "箪", "簡": "简", "簣": "篑", "簫": "箫", "簹": "筜", "簽": "签", "簾": "帘", "籃": "篮", "籌": "筹", "籖": "签", "籙": "箓", "籜": "箨", "籟": "籁", "籠": "笼", "籩": "笾", "籪": "簖", "籬": "篱", "籮": "箩", "籲": "吁", "粵": "粤", "糝": "糁", "糞": "粪", "糧": "粮", "糰": "团", "糲": "粝", "糴": "籴", "糶": "粜", "糹": "纟", "糾": "纠", "紀": "纪", "紂": "纣", "約": "约", "紅": "红", "紆": "纡", "紇": "纥", "紈": "纨", "紉": "纫", "紋": "纹", "納": "纳", "紐": "纽", "紓": "纾", "純": "纯", "紕": "纰", "紖": "纼", "紗": "纱", "紘": "纮", "紙": "纸", "級": "级", "紛": "纷", "紜": "纭", "紝": "纴", "紡": "纺", "紬": "䌷", "細": "细", "紱": "绂", "紲": "绁", "紳": "绅", "紵": "纻", "紹": "绍", "紺": "绀", "紼": "绋", "紿": "绐", "絀": "绌", "終": "终", "組": "组", "絅": "䌹", "絆": "绊", "絎": "绗", "結": "结", "絕": "绝", "絛": "绦", "絝": "绔", "絞": "绞", "絡": "络", "絢": "绚", "給": "给", "絨": "绒", "絰": "绖", "統": "统", "絲": "丝", "絳": "绛", "絶": "绝", "絹": "绢", "綁": "绑", "綃": "绡", "綆": "绠", "綈": "绨", "綉": "绣", "綌": "绤", "綏": "绥", "綐": "䌼", "經": "经", "綜": "综", "綞": "缍", "綠": "绿", "綢": "绸", "綣": "绻", "綫": "线", "綬": "绶", "維": "维", "綯": "绹", "綰": "绾", "綱": "纲", "網": "网", "綳": "绷", "綴": "缀", "綵": "䌽", "綸": "纶", "綹": "绺", "綺": "绮", "綻": "绽", "綽": "绰", "綾": "绫", "綿": "绵", "緄": "绲", "緇": "缁", "緊": "紧", "緋": "绯", "緑": "绿", "緒": "绪", "緓": "绬", "緔": "绱", "緗": "缃", "緘": "缄", "緙": "缂", "線": "线", "緝": "缉", "緞": "缎", "締": "缔", "緡": "缗", "緣": "缘", "緦": "缌", "編": "编", "緩": "缓", "緬": "缅", "緯": "纬", "緱": "缑", "緲": "缈", "練": "练", "緶": "缏", "緹": "缇", "緻": "致", "縈": "萦", "縉": "缙", "縊": "缢", "縋": "缒", "縐": "绉", "縑": "缣", "縕": "缊", "縗": "缞", "縛": "缚", "縝": "缜", "縞": "缟", "縟": "缛", "縣": "县", "縧": "绦", "縫": "缝", "縭": "缡", "縮": "缩", "縱": "纵", "縲": "缧", "縳": "䌸", "縴": "纤", "縵": "缦", "縶": "絷", "縷": "缕", "縹": "缥", "總": "总", "績": "绩", "繃": "绷", "繅": "缫", "繆": "缪", "繒": "缯", "織": "织", "繕": "缮", "繚": "缭", "繞": "绕", "繡": "绣", "繢": "缋", "繩": "绳", "繪": "绘", "繫": "系", "繭": "茧", "繮": "缰", "繯": "缳", "繰": "缲", "繳": "缴", "繸": "䍁", "繹": "绎", "繼": "继", "繽": "缤", "繾": "缱", "繿": "䍀", "纈": "缬", "纊": "纩", "續": "续", "纍": "累", "纏": "缠", "纓": "缨", "纔": "才", "纖": "纤", "纘": "缵", "纜": "缆", "缽": "钵", "罈": "坛", "罌": "罂", "罰": "罚", "罵": "骂", "罷": "罢", "羅": "罗", "羆": "罴", "羈": "羁", "羋": "芈", "羥": "羟", "義": "义", "習": "习", "翹": "翘", "耬": "耧", "耮": "耢", "聖": "圣", "聞": "闻", "聯": "联", "聰": "聪", "聲": "声", "聳": "耸", "聵": "聩", "聶": "聂", "職": "职", "聹": "聍", "聽": "听", "聾": "聋", "肅": "肃", "胜": "胜", "脅": "胁", "脈": "脉", "脛": "胫", "脫": "脱", "脹": "胀", "腊": "腊", "腎": "肾", "腖": "胨", "腡": "脶", "腦": "脑", "腫": "肿", "腳": "脚", "腸": "肠", "膃": "腽", "膚": "肤", "膠": "胶", "膩": "腻", "膽": "胆", "膾": "脍", "膿": "脓", "臉": "脸", "臍": "脐", "臏": "膑", "臘": "腊", "臚": "胪", "臟": "脏", "臠": "脔", "臢": "臜", "臥": "卧", "臨": "临", "臺": "台", "與": "与", "興": "兴", "舉": "举", "舊": "旧", "艙": "舱", "艤": "舣", "艦": "舰", "艫": "舻", "艱": "艰", "艷": "艳", "芻": "刍", "苧": "苎", "苹": "苹", "范": "范", "茲": "兹", "荊": "荆", "莊": "庄", "莖": "茎", "莢": "荚", "莧": "苋", "華": "华", "萇": "苌", "萊": "莱", "萬": "万", "萵": "莴", "葉": "叶", "葒": "荭", "著名": "著名", "葤": "荮", "葦": "苇", "葯": "药", "葷": "荤", "蒓": "莼", "蒔": "莳", "蒞": "莅", "蒼": "苍", "蓀": "荪", "蓋": "盖", "蓮": "莲", "蓯": "苁", "蓴": "莼", "蓽": "荜", "蔔": "卜", "蔞": "蒌", "蔣": "蒋", "蔥": "葱", "蔦": "茑", "蔭": "荫", "蕁": "荨", "蕆": "蒇", "蕎": "荞", "蕒": "荬", "蕓": "芸", "蕕": "莸", "蕘": "荛", "蕢": "蒉", "蕩": "荡", "蕪": "芜", "蕭": "萧", "蕷": "蓣", "薀": "蕰", "薈": "荟", "薊": "蓟", "薌": "芗", "薔": "蔷", "薘": "荙", "薟": "莶", "薦": "荐", "薩": "萨", "薳": "䓕", "薴": "苧", "薺": "荠", "藉": "借", "藍": "蓝", "藎": "荩", "藝": "艺", "藥": "药", "藪": "薮", "藴": "蕴", "藶": "苈", "藹": "蔼", "藺": "蔺", "蘄": "蕲", "蘆": "芦", "蘇": "苏", "蘊": "蕴", "蘋": "苹", "蘚": "藓", "蘞": "蔹", "蘢": "茏", "蘭": "兰", "蘺": "蓠", "蘿": "萝", "虆": "蔂", "處": "处", "虛": "虚", "虜": "虏", "號": "号", "虧": "亏", "虫": "虫", "虯": "虬", "蛺": "蛱", "蛻": "蜕", "蜆": "蚬", "蜡": "蜡", "蝕": "蚀", "蝟": "猬", "蝦": "虾", "蝸": "蜗", "螄": "蛳", "螞": "蚂", "螢": "萤", "螮": "䗖", "螻": "蝼", "螿": "螀", "蟄": "蛰", "蟈": "蝈", "蟎": "螨", "蟣": "虮", "蟬": "蝉", "蟯": "蛲", "蟲": "虫", "蟶": "蛏", "蟻": "蚁", "蠅": "蝇", "蠆": "虿", "蠐": "蛴", "蠑": "蝾", "蠟": "蜡", "蠣": "蛎", "蠨": "蟏", "蠱": "蛊", "蠶": "蚕", "蠻": "蛮", "衆": "众", "衊": "蔑", "術": "术", "衕": "同", "衚": "胡", "衛": "卫", "衝": "冲", "衹": "只", "袞": "衮", "裊": "袅", "裏": "里", "補": "补", "裝": "装", "裡": "里", "製": "制", "複": "复", "褌": "裈", "褘": "袆", "褲": "裤", "褳": "裢", "褸": "褛", "褻": "亵", "襇": "裥", "襏": "袯", "襖": "袄", "襝": "裣", "襠": "裆", "襤": "褴", "襪": "袜", "襬": "䙓", "襯": "衬", "襲": "袭", "覆蓋": "覆盖", "翻來覆去": "翻来覆去", "見": "见", "覎": "觃", "規": "规", "覓": "觅", "視": "视", "覘": "觇", "覡": "觋", "覥": "觍", "覦": "觎", "親": "亲", "覬": "觊", "覯": "觏", "覲": "觐", "覷": "觑", "覺": "觉", "覽": "览", "覿": "觌", "觀": "观", "觴": "觞", "觶": "觯", "觸": "触", "訁": "讠", "訂": "订", "訃": "讣", "計": "计", "訊": "讯", "訌": "讧", "討": "讨", "訐": "讦", "訒": "讱", "訓": "训", "訕": "讪", "訖": "讫", "託": "讬", "記": "记", "訛": "讹", "訝": "讶", "訟": "讼", "訢": "䜣", "訣": "诀", "訥": "讷", "訩": "讻", "訪": "访", "設": "设", "許": "许", "訴": "诉", "訶": "诃", "診": "诊", "註": "注", "詁": "诂", "詆": "诋", "詎": "讵", "詐": "诈", "詒": "诒", "詔": "诏", "評": "评", "詖": "诐", "詗": "诇", "詘": "诎", "詛": "诅", "詞": "词", "詠": "咏", "詡": "诩", "詢": "询", "詣": "诣", "試": "试", "詩": "诗", "詫": "诧", "詬": "诟", "詭": "诡", "詮": "诠", "詰": "诘", "話": "话", "該": "该", "詳": "详", "詵": "诜", "詼": "诙", "詿": "诖", "誄": "诔", "誅": "诛", "誆": "诓", "誇": "夸", "誌": "志", "認": "认", "誑": "诳", "誒": "诶", "誕": "诞", "誘": "诱", "誚": "诮", "語": "语", "誠": "诚", "誡": "诫", "誣": "诬", "誤": "误", "誥": "诰", "誦": "诵", "誨": "诲", "說": "说", "説": "说", "誰": "谁", "課": "课", "誶": "谇", "誹": "诽", "誼": "谊", "誾": "訚", "調": "调", "諂": "谄", "諄": "谆", "談": "谈", "諉": "诿", "請": "请", "諍": "诤", "諏": "诹", "諑": "诼", "諒": "谅", "論": "论", "諗": "谂", "諛": "谀", "諜": "谍", "諝": "谞", "諞": "谝", "諢": "诨", "諤": "谔", "諦": "谛", "諧": "谐", "諫": "谏", "諭": "谕", "諮": "谘", "諱": "讳", "諳": "谙", "諶": "谌", "諷": "讽", "諸": "诸", "諺": "谚", "諼": "谖", "諾": "诺", "謀": "谋", "謁": "谒", "謂": "谓", "謄": "誊", "謅": "诌", "謊": "谎", "謎": "谜", "謐": "谧", "謔": "谑", "謖": "谡", "謗": "谤", "謙": "谦", "謚": "谥", "講": "讲", "謝": "谢", "謠": "谣", "謡": "谣", "謨": "谟", "謫": "谪", "謬": "谬", "謭": "谫", "謳": "讴", "謹": "谨", "謾": "谩", "譅": "䜧", "證": "证", "譎": "谲", "譏": "讥", "譖": "谮", "識": "识", "譙": "谯", "譚": "谭", "譜": "谱", "譫": "谵", "譯": "译", "議": "议", "譴": "谴", "護": "护", "譸": "诪", "譽": "誉", "譾": "谫", "讀": "读", "變": "变", "讎": "仇", "讎": "雠", "讒": "谗", "讓": "让", "讕": "谰", "讖": "谶", "讜": "谠", "讞": "谳", "豈": "岂", "豎": "竖", "豐": "丰", "豬": "猪", "豶": "豮", "貓": "猫", "貙": "䝙", "貝": "贝", "貞": "贞", "貟": "贠", "負": "负", "財": "财", "貢": "贡", "貧": "贫", "貨": "货", "販": "贩", "貪": "贪", "貫": "贯", "責": "责", "貯": "贮", "貰": "贳", "貲": "赀", "貳": "贰", "貴": "贵", "貶": "贬", "買": "买", "貸": "贷", "貺": "贶", "費": "费", "貼": "贴", "貽": "贻", "貿": "贸", "賀": "贺", "賁": "贲", "賂": "赂", "賃": "赁", "賄": "贿", "賅": "赅", "資": "资", "賈": "贾", "賊": "贼", "賑": "赈", "賒": "赊", "賓": "宾", "賕": "赇", "賙": "赒", "賚": "赉", "賜": "赐", "賞": "赏", "賠": "赔", "賡": "赓", "賢": "贤", "賣": "卖", "賤": "贱", "賦": "赋", "賧": "赕", "質": "质", "賫": "赍", "賬": "账", "賭": "赌", "賰": "䞐", "賴": "赖", "賵": "赗", "賺": "赚", "賻": "赙", "購": "购", "賽": "赛", "賾": "赜", "贄": "贽", "贅": "赘", "贇": "赟", "贈": "赠", "贊": "赞", "贋": "赝", "贍": "赡", "贏": "赢", "贐": "赆", "贓": "赃", "贔": "赑", "贖": "赎", "贗": "赝", "贛": "赣", "贜": "赃", "赬": "赪", "趕": "赶", "趙": "赵", "趨": "趋", "趲": "趱", "跡": "迹", "踐": "践", "踴": "踊", "蹌": "跄", "蹕": "跸", "蹣": "蹒", "蹤": "踪", "蹺": "跷", "躂": "跶", "躉": "趸", "躊": "踌", "躋": "跻", "躍": "跃", "躑": "踯", "躒": "跞", "躓": "踬", "躕": "蹰", "躚": "跹", "躡": "蹑", "躥": "蹿", "躦": "躜", "躪": "躏", "軀": "躯", "車": "车", "軋": "轧", "軌": "轨", "軍": "军", "軑": "轪", "軒": "轩", "軔": "轫", "軛": "轭", "軟": "软", "軤": "轷", "軫": "轸", "軲": "轱", "軸": "轴", "軹": "轵", "軺": "轺", "軻": "轲", "軼": "轶", "軾": "轼", "較": "较", "輅": "辂", "輇": "辁", "輈": "辀", "載": "载", "輊": "轾", "輒": "辄", "輓": "挽", "輔": "辅", "輕": "轻", "輛": "辆", "輜": "辎", "輝": "辉", "輞": "辋", "輟": "辍", "輥": "辊", "輦": "辇", "輩": "辈", "輪": "轮", "輬": "辌", "輯": "辑", "輳": "辏", "輸": "输", "輻": "辐", "輾": "辗", "輿": "舆", "轀": "辒", "轂": "毂", "轄": "辖", "轅": "辕", "轆": "辘", "轉": "转", "轍": "辙", "轎": "轿", "轔": "辚", "轟": "轰", "轡": "辔", "轢": "轹", "轤": "轳", "辟": "辟", "辦": "办", "辭": "辞", "辮": "辫", "辯": "辩", "農": "农", "迴": "回", "适": "适", "逕": "迳", "這": "这", "連": "连", "週": "周", "進": "进", "遊": "游", "運": "运", "過": "过", "達": "达", "違": "违", "遙": "遥", "遜": "逊", "遞": "递", "遠": "远", "適": "适", "遲": "迟", "遷": "迁", "選": "选", "遺": "遗", "遼": "辽", "邁": "迈", "還": "还", "邇": "迩", "邊": "边", "邏": "逻", "邐": "逦", "郁": "郁", "郟": "郏", "郵": "邮", "鄆": "郓", "鄉": "乡", "鄒": "邹", "鄔": "邬", "鄖": "郧", "鄧": "邓", "鄭": "郑", "鄰": "邻", "鄲": "郸", "鄴": "邺", "鄶": "郐", "鄺": "邝", "酇": "酂", "酈": "郦", "醖": "酝", "醜": "丑", "醞": "酝", "醫": "医", "醬": "酱", "醱": "酦", "釀": "酿", "釁": "衅", "釃": "酾", "釅": "酽", "采": "采", "釋": "释", "釐": "厘", "釒": "钅", "釓": "钆", "釔": "钇", "釕": "钌", "釗": "钊", "釘": "钉", "釙": "钋", "針": "针", "釣": "钓", "釤": "钐", "釧": "钏", "釩": "钒", "釵": "钗", "釷": "钍", "釹": "钕", "釺": "钎", "鈀": "钯", "鈁": "钫", "鈃": "钘", "鈄": "钭", "鈈": "钚", "鈉": "钠", "鈍": "钝", "鈎": "钩", "鈐": "钤", "鈑": "钣", "鈒": "钑", "鈔": "钞", "鈕": "钮", "鈞": "钧", "鈣": "钙", "鈥": "钬", "鈦": "钛", "鈧": "钪", "鈮": "铌", "鈰": "铈", "鈳": "钶", "鈴": "铃", "鈷": "钴", "鈸": "钹", "鈹": "铍", "鈺": "钰", "鈽": "钸", "鈾": "铀", "鈿": "钿", "鉀": "钾", "鉅": "钜", "鉈": "铊", "鉉": "铉", "鉋": "铇", "鉍": "铋", "鉑": "铂", "鉕": "钷", "鉗": "钳", "鉚": "铆", "鉛": "铅", "鉞": "钺", "鉢": "钵", "鉤": "钩", "鉦": "钲", "鉬": "钼", "鉭": "钽", "鉶": "铏", "鉸": "铰", "鉺": "铒", "鉻": "铬", "鉿": "铪", "銀": "银", "銃": "铳", "銅": "铜", "銍": "铚", "銑": "铣", "銓": "铨", "銖": "铢", "銘": "铭", "銚": "铫", "銛": "铦", "銜": "衔", "銠": "铑", "銣": "铷", "銥": "铱", "銦": "铟", "銨": "铵", "銩": "铥", "銪": "铕", "銫": "铯", "銬": "铐", "銱": "铞", "銳": "锐", "銷": "销", "銹": "锈", "銻": "锑", "銼": "锉", "鋁": "铝", "鋃": "锒", "鋅": "锌", "鋇": "钡", "鋌": "铤", "鋏": "铗", "鋒": "锋", "鋙": "铻", "鋝": "锊", "鋟": "锓", "鋣": "铘", "鋤": "锄", "鋥": "锃", "鋦": "锔", "鋨": "锇", "鋩": "铓", "鋪": "铺", "鋭": "锐", "鋮": "铖", "鋯": "锆", "鋰": "锂", "鋱": "铽", "鋶": "锍", "鋸": "锯", "鋼": "钢", "錁": "锞", "錄": "录", "錆": "锖", "錇": "锫", "錈": "锩", "錏": "铔", "錐": "锥", "錒": "锕", "錕": "锟", "錘": "锤", "錙": "锱", "錚": "铮", "錛": "锛", "錟": "锬", "錠": "锭", "錡": "锜", "錢": "钱", "錦": "锦", "錨": "锚", "錩": "锠", "錫": "锡", "錮": "锢", "錯": "错", "録": "录", "錳": "锰", "錶": "表", "錸": "铼", "鍀": "锝", "鍁": "锨", "鍃": "锪", "鍆": "钔", "鍇": "锴", "鍈": "锳", "鍋": "锅", "鍍": "镀", "鍔": "锷", "鍘": "铡", "鍚": "钖", "鍛": "锻", "鍠": "锽", "鍤": "锸", "鍥": "锲", "鍩": "锘", "鍬": "锹", "鍰": "锾", "鍵": "键", "鍶": "锶", "鍺": "锗", "鍾": "钟", "鎂": "镁", "鎄": "锿", "鎇": "镅", "鎊": "镑", "鎔": "镕", "鎖": "锁", "鎘": "镉", "鎚": "锤", "鎛": "镈", "鎝": "𨱏", "鎡": "镃", "鎢": "钨", "鎣": "蓥", "鎦": "镏", "鎧": "铠", "鎩": "铩", "鎪": "锼", "鎬": "镐", "鎮": "镇", "鎰": "镒", "鎲": "镋", "鎳": "镍", "鎵": "镓", "鎸": "镌", "鎿": "镎", "鏃": "镞", "鏇": "镟", "鏈": "链", "鏌": "镆", "鏍": "镙", "鏐": "镠", "鏑": "镝", "鏗": "铿", "鏘": "锵", "鏜": "镗", "鏝": "镘", "鏞": "镛", "鏟": "铲", "鏡": "镜", "鏢": "镖", "鏤": "镂", "鏨": "錾", "鏰": "镚", "鏵": "铧", "鏷": "镤", "鏹": "镪", "鏽": "锈", "鐃": "铙", "鐋": "铴", "鐐": "镣", "鐒": "铹", "鐓": "镦", "鐔": "镡", "鐘": "钟", "鐙": "镫", "鐝": "镢", "鐠": "镨", "鐦": "锎", "鐧": "锏", "鐨": "镄", "鐫": "镌", "鐮": "镰", "鐲": "镯", "鐳": "镭", "鐵": "铁", "鐶": "镮", "鐸": "铎", "鐺": "铛", "鐿": "镱", "鑄": "铸", "鑊": "镬", "鑌": "镔", "鑒": "鉴", "鑔": "镲", "鑕": "锧", "鑞": "镴", "鑠": "铄", "鑣": "镳", "鑥": "镥", "鑭": "镧", "鑰": "钥", "鑱": "镵", "鑲": "镶", "鑷": "镊", "鑹": "镩", "鑼": "锣", "鑽": "钻", "鑾": "銮", "鑿": "凿", "钁": "镢", "镟": "旋", "長": "长", "門": "门", "閂": "闩", "閃": "闪", "閆": "闫", "閈": "闬", "閉": "闭", "開": "开", "閌": "闶", "閎": "闳", "閏": "闰", "閑": "闲", "間": "间", "閔": "闵", "閘": "闸", "閡": "阂", "閣": "阁", "閤": "合", "閥": "阀", "閨": "闺", "閩": "闽", "閫": "阃", "閬": "阆", "閭": "闾", "閱": "阅", "閲": "阅", "閶": "阊", "閹": "阉", "閻": "阎", "閼": "阏", "閽": "阍", "閾": "阈", "閿": "阌", "闃": "阒", "闆": "板", "闈": "闱", "闊": "阔", "闋": "阕", "闌": "阑", "闍": "阇", "闐": "阗", "闒": "阘", "闓": "闿", "闔": "阖", "闕": "阙", "闖": "闯", "關": "关", "闞": "阚", "闠": "阓", "闡": "阐", "闤": "阛", "闥": "闼", "阪": "坂", "陘": "陉", "陝": "陕", "陣": "阵", "陰": "阴", "陳": "陈", "陸": "陆", "陽": "阳", "隉": "陧", "隊": "队", "階": "阶", "隕": "陨", "際": "际", "隨": "随", "險": "险", "隱": "隐", "隴": "陇", "隸": "隶", "隻": "只", "雋": "隽", "雖": "虽", "雙": "双", "雛": "雏", "雜": "杂", "雞": "鸡", "離": "离", "難": "难", "雲": "云", "電": "电", "霢": "霡", "霧": "雾", "霽": "霁", "靂": "雳", "靄": "霭", "靈": "灵", "靚": "靓", "靜": "静", "靨": "靥", "鞀": "鼗", "鞏": "巩", "鞝": "绱", "鞦": "秋", "鞽": "鞒", "韁": "缰", "韃": "鞑", "韆": "千", "韉": "鞯", "韋": "韦", "韌": "韧", "韍": "韨", "韓": "韩", "韙": "韪", "韜": "韬", "韞": "韫", "韻": "韵", "響": "响", "頁": "页", "頂": "顶", "頃": "顷", "項": "项", "順": "顺", "頇": "顸", "須": "须", "頊": "顼", "頌": "颂", "頎": "颀", "頏": "颃", "預": "预", "頑": "顽", "頒": "颁", "頓": "顿", "頗": "颇", "領": "领", "頜": "颌", "頡": "颉", "頤": "颐", "頦": "颏", "頭": "头", "頮": "颒", "頰": "颊", "頲": "颋", "頴": "颕", "頷": "颔", "頸": "颈", "頹": "颓", "頻": "频", "頽": "颓", "顆": "颗", "題": "题", "額": "额", "顎": "颚", "顏": "颜", "顒": "颙", "顓": "颛", "顔": "颜", "願": "愿", "顙": "颡", "顛": "颠", "類": "类", "顢": "颟", "顥": "颢", "顧": "顾", "顫": "颤", "顬": "颥", "顯": "显", "顰": "颦", "顱": "颅", "顳": "颞", "顴": "颧", "風": "风", "颭": "飐", "颮": "飑", "颯": "飒", "颱": "台", "颳": "刮", "颶": "飓", "颸": "飔", "颺": "飏", "颻": "飖", "颼": "飕", "飀": "飗", "飄": "飘", "飆": "飙", "飈": "飚", "飛": "飞", "飠": "饣", "飢": "饥", "飣": "饤", "飥": "饦", "飩": "饨", "飪": "饪", "飫": "饫", "飭": "饬", "飯": "饭", "飲": "饮", "飴": "饴", "飼": "饲", "飽": "饱", "飾": "饰", "飿": "饳", "餃": "饺", "餄": "饸", "餅": "饼", "餉": "饷", "養": "养", "餌": "饵", "餎": "饹", "餏": "饻", "餑": "饽", "餒": "馁", "餓": "饿", "餕": "馂", "餖": "饾", "餚": "肴", "餛": "馄", "餜": "馃", "餞": "饯", "餡": "馅", "館": "馆", "餱": "糇", "餳": "饧", "餶": "馉", "餷": "馇", "餺": "馎", "餼": "饩", "餾": "馏", "餿": "馊", "饁": "馌", "饃": "馍", "饅": "馒", "饈": "馐", "饉": "馑", "饊": "馓", "饋": "馈", "饌": "馔", "饑": "饥", "饒": "饶", "饗": "飨", "饜": "餍", "饞": "馋", "饢": "馕", "馬": "马", "馭": "驭", "馮": "冯", "馱": "驮", "馳": "驰", "馴": "驯", "馹": "驲", "駁": "驳", "駐": "驻", "駑": "驽", "駒": "驹", "駔": "驵", "駕": "驾", "駘": "骀", "駙": "驸", "駛": "驶", "駝": "驼", "駟": "驷", "駡": "骂", "駢": "骈", "駭": "骇", "駰": "骃", "駱": "骆", "駸": "骎", "駿": "骏", "騁": "骋", "騂": "骍", "騅": "骓", "騌": "骔", "騍": "骒", "騎": "骑", "騏": "骐", "騖": "骛", "騙": "骗", "騤": "骙", "騧": "䯄", "騫": "骞", "騭": "骘", "騮": "骝", "騰": "腾", "騶": "驺", "騷": "骚", "騸": "骟", "騾": "骡", "驀": "蓦", "驁": "骜", "驂": "骖", "驃": "骠", "驄": "骢", "驅": "驱", "驊": "骅", "驌": "骕", "驍": "骁", "驏": "骣", "驕": "骄", "驗": "验", "驚": "惊", "驛": "驿", "驟": "骤", "驢": "驴", "驤": "骧", "驥": "骥", "驦": "骦", "驪": "骊", "驫": "骉", "骯": "肮", "髏": "髅", "髒": "脏", "體": "体", "髕": "髌", "髖": "髋", "髮": "发", "鬆": "松", "鬍": "胡", "鬚": "须", "鬢": "鬓", "鬥": "斗", "鬧": "闹", "鬩": "阋", "鬮": "阄", "鬱": "郁", "魎": "魉", "魘": "魇", "魚": "鱼", "魛": "鱽", "魢": "鱾", "魨": "鲀", "魯": "鲁", "魴": "鲂", "魷": "鱿", "魺": "鲄", "鮁": "鲅", "鮃": "鲆", "鮊": "鲌", "鮋": "鲉", "鮍": "鲏", "鮎": "鲇", "鮐": "鲐", "鮑": "鲍", "鮒": "鲋", "鮓": "鲊", "鮚": "鲒", "鮜": "鲘", "鮝": "鲞", "鮞": "鲕", "鮦": "鲖", "鮪": "鲔", "鮫": "鲛", "鮭": "鲑", "鮮": "鲜", "鮳": "鲓", "鮶": "鲪", "鮺": "鲝", "鯀": "鲧", "鯁": "鲠", "鯇": "鲩", "鯉": "鲤", "鯊": "鲨", "鯒": "鲬", "鯔": "鲻", "鯕": "鲯", "鯖": "鲭", "鯗": "鲞", "鯛": "鲷", "鯝": "鲴", "鯡": "鲱", "鯢": "鲵", "鯤": "鲲", "鯧": "鲳", "鯨": "鲸", "鯪": "鲮", "鯫": "鲰", "鯴": "鲺", "鯷": "鳀", "鯽": "鲫", "鯿": "鳊", "鰁": "鳈", "鰂": "鲗", "鰃": "鳂", "鰈": "鲽", "鰉": "鳇", "鰍": "鳅", "鰏": "鲾", "鰐": "鳄", "鰒": "鳆", "鰓": "鳃", "鰜": "鳒", "鰟": "鳑", "鰠": "鳋", "鰣": "鲥", "鰥": "鳏", "鰨": "鳎", "鰩": "鳐", "鰭": "鳍", "鰮": "鳁", "鰱": "鲢", "鰲": "鳌", "鰳": "鳓", "鰵": "鳘", "鰷": "鲦", "鰹": "鲣", "鰺": "鲹", "鰻": "鳗", "鰼": "鳛", "鰾": "鳔", "鱂": "鳉", "鱅": "鳙", "鱈": "鳕", "鱉": "鳖", "鱒": "鳟", "鱔": "鳝", "鱖": "鳜", "鱗": "鳞", "鱘": "鲟", "鱝": "鲼", "鱟": "鲎", "鱠": "鲙", "鱣": "鳣", "鱤": "鳡", "鱧": "鳢", "鱨": "鲿", "鱭": "鲚", "鱯": "鳠", "鱷": "鳄", "鱸": "鲈", "鱺": "鲡", "䰾": "鲃", "䲁": "鳚", "鳥": "鸟", "鳧": "凫", "鳩": "鸠", "鳬": "凫", "鳲": "鸤", "鳳": "凤", "鳴": "鸣", "鳶": "鸢", "鳾": "䴓", "鴆": "鸩", "鴇": "鸨", "鴉": "鸦", "鴒": "鸰", "鴕": "鸵", "鴛": "鸳", "鴝": "鸲", "鴞": "鸮", "鴟": "鸱", "鴣": "鸪", "鴦": "鸯", "鴨": "鸭", "鴯": "鸸", "鴰": "鸹", "鴴": "鸻", "鴷": "䴕", "鴻": "鸿", "鴿": "鸽", "鵁": "䴔", "鵂": "鸺", "鵃": "鸼", "鵐": "鹀", "鵑": "鹃", "鵒": "鹆", "鵓": "鹁", "鵜": "鹈", "鵝": "鹅", "鵠": "鹄", "鵡": "鹉", "鵪": "鹌", "鵬": "鹏", "鵮": "鹐", "鵯": "鹎", "鵲": "鹊", "鵷": "鹓", "鵾": "鹍", "鶄": "䴖", "鶇": "鸫", "鶉": "鹑", "鶊": "鹒", "鶓": "鹋", "鶖": "鹙", "鶘": "鹕", "鶚": "鹗", "鶡": "鹖", "鶥": "鹛", "鶩": "鹜", "鶪": "䴗", "鶬": "鸧", "鶯": "莺", "鶲": "鹟", "鶴": "鹤", "鶹": "鹠", "鶺": "鹡", "鶻": "鹘", "鶼": "鹣", "鶿": "鹚", "鷀": "鹚", "鷁": "鹢", "鷂": "鹞", "鷄": "鸡", "鷈": "䴘", "鷊": "鹝", "鷓": "鹧", "鷖": "鹥", "鷗": "鸥", "鷙": "鸷", "鷚": "鹨", "鷥": "鸶", "鷦": "鹪", "鷫": "鹔", "鷯": "鹩", "鷲": "鹫", "鷳": "鹇", "鷸": "鹬", "鷹": "鹰", "鷺": "鹭", "鷽": "鸴", "鷿": "䴙", "鸂": "㶉", "鸇": "鹯", "鸌": "鹱", "鸏": "鹲", "鸕": "鸬", "鸘": "鹴", "鸚": "鹦", "鸛": "鹳", "鸝": "鹂", "鸞": "鸾", "鹵": "卤", "鹹": "咸", "鹺": "鹾", "鹽": "盐", "麗": "丽", "麥": "麦", "麩": "麸", "麯": "曲", "麵": "面", "麼": "么", "麽": "么", "黃": "黄", "黌": "黉", "點": "点", "黨": "党", "黲": "黪", "黴": "霉", "黶": "黡", "黷": "黩", "黽": "黾", "黿": "鼋", "鼉": "鼍", "鼕": "冬", "鼴": "鼹", "齊": "齐", "齋": "斋", "齎": "赍", "齏": "齑", "齒": "齿", "齔": "龀", "齕": "龁", "齗": "龂", "齙": "龅", "齜": "龇", "齟": "龃", "齠": "龆", "齡": "龄", "齣": "出", "齦": "龈", "齪": "龊", "齬": "龉", "齲": "龋", "齶": "腭", "齷": "龌", "龍": "龙", "龎": "厐", "龐": "庞", "龔": "龚", "龕": "龛", "龜": "龟", "幾畫": "几画", "賣畫": "卖画", "滷鹼": "卤碱", "原畫": "原画", "口鹼": "口碱", "古畫": "古画", "名畫": "名画", "奇畫": "奇画", "如畫": "如画", "弱鹼": "弱碱", "彩畫": "彩画", "所畫": "所画", "扉畫": "扉画", "教畫": "教画", "水鹼": "水碱", "洋鹼": "洋碱", "炭畫": "炭画", "畫一": "画一", "畫上": "画上", "畫下": "画下", "畫中": "画中", "畫供": "画供", "畫兒": "画儿", "畫具": "画具", "畫出": "画出", "畫史": "画史", "畫品": "画品", "畫商": "画商", "畫圈": "画圈", "畫境": "画境", "畫工": "画工", "畫帖": "画帖", "畫幅": "画幅", "畫意": "画意", "畫成": "画成", "畫景": "画景", "畫本": "画本", "畫架": "画架", "畫框": "画框", "畫法": "画法", "畫王": "画王", "畫界": "画界", "畫符": "画符", "畫紙": "画纸", "畫線": "画线", "畫航": "画航", "畫舫": "画舫", "畫虎": "画虎", "畫論": "画论", "畫譜": "画谱", "畫象": "画象", "畫質": "画质", "畫貼": "画贴", "畫軸": "画轴", "畫頁": "画页", "鹽鹼": "盐碱", "鹼": "碱", "鹼基": "碱基", "鹼度": "碱度", "鹼水": "碱水", "鹼熔": "碱熔", "磁畫": "磁画", "策畫": "策画", "組畫": "组画", "絹畫": "绢画", "耐鹼": "耐碱", "肉鹼": "肉碱", "膠畫": "胶画", "茶鹼": "茶碱", "西畫": "西画", "貼畫": "贴画", "返鹼": "返碱", "鍾鍛": "锺锻", "鍛鍾": "锻锺", "雕畫": "雕画", "鯰": "鲶", "三聯畫": "三联画", "中國畫": "中国画", "書畫": "书画", "書畫社": "书画社", "五筆畫": "五笔画", "作畫": "作画", "入畫": "入画", "寫生畫": "写生画", "刻畫": "刻画", "動畫": "动画", "勾畫": "勾画", "單色畫": "单色画", "卡通畫": "卡通画", "國畫": "国画", "圖畫": "图画", "壁畫": "壁画", "字畫": "字画", "宣傳畫": "宣传画", "工筆畫": "工笔画", "年畫": "年画", "幽默畫": "幽默画", "指畫": "指画", "描畫": "描画", "插畫": "插画", "擘畫": "擘画", "春畫": "春画", "木刻畫": "木刻画", "機械畫": "机械画", "比畫": "比画", "毛筆畫": "毛笔画", "水粉畫": "水粉画", "油畫": "油画", "海景畫": "海景画", "漫畫": "漫画", "點畫": "点画", "版畫": "版画", "畫": "画", "畫像": "画像", "畫冊": "画册", "畫刊": "画刊", "畫匠": "画匠", "畫捲": "画卷", "畫圖": "画图", "畫壇": "画坛", "畫室": "画室", "畫家": "画家", "畫屏": "画屏", "畫展": "画展", "畫布": "画布", "畫師": "画师", "畫廊": "画廊", "畫報": "画报", "畫押": "画押", "畫板": "画板", "畫片": "画片", "畫畫": "画画", "畫皮": "画皮", "畫眉鳥": "画眉鸟", "畫稿": "画稿", "畫筆": "画笔", "畫院": "画院", "畫集": "画集", "畫面": "画面", "筆畫": "笔画", "細密畫": "细密画", "繪畫": "绘画", "自畫像": "自画像", "蠟筆畫": "蜡笔画", "裸體畫": "裸体画", "西洋畫": "西洋画", "透視畫": "透视画", "銅版畫": "铜版画", "鍾": "锺", "靜物畫": "静物画", "餘": "馀", } zh2TW = { "缺省": "預設", "串行": "串列", "以太网": "乙太網", "位图": "點陣圖", "例程": "常式", "信道": "通道", "光标": "游標", "光盘": "光碟", "光驱": "光碟機", "全角": "全形", "加载": "載入", "半角": "半形", "变量": "變數", "噪声": "雜訊", "脱机": "離線", "声卡": "音效卡", "老字号": "老字號", "字号": "字型大小", "字库": "字型檔", "字段": "欄位", "字符": "字元", "存盘": "存檔", "寻址": "定址", "尾注": "章節附註", "异步": "非同步", "总线": "匯流排", "括号": "括弧", "接口": "介面", "控件": "控制項", "权限": "許可權", "盘片": "碟片", "硅片": "矽片", "硅谷": "矽谷", "硬盘": "硬碟", "磁盘": "磁碟", "磁道": "磁軌", "程控": "程式控制", "端口": "埠", "算子": "運算元", "算法": "演算法", "芯片": "晶片", "芯片": "晶元", "词组": "片語", "译码": "解碼", "软驱": "軟碟機", "快闪存储器": "快閃記憶體", "闪存": "快閃記憶體", "鼠标": "滑鼠", "进制": "進位", "交互式": "互動式", "仿真": "模擬", "优先级": "優先順序", "传感": "感測", "便携式": "攜帶型", "信息论": "資訊理論", "写保护": "防寫", "分布式": "分散式", "分辨率": "解析度", "服务器": "伺服器", "等于": "等於", "局域网": "區域網", "计算机": "電腦", "扫瞄仪": "掃瞄器", "宽带": "寬頻", "数据库": "資料庫", "奶酪": "乳酪", "巨商": "鉅賈", "手电": "手電筒", "万历": "萬曆", "永历": "永曆", "词汇": "辭彙", "习用": "慣用", "元音": "母音", "任意球": "自由球", "头球": "頭槌", "入球": "進球", "粒入球": "顆進球", "打门": "射門", "火锅盖帽": "蓋火鍋", "打印机": "印表機", "打印機": "印表機", "字节": "位元組", "字節": "位元組", "打印": "列印", "打印": "列印", "硬件": "硬體", "硬件": "硬體", "二极管": "二極體", "二極管": "二極體", "三极管": "三極體", "三極管": "三極體", "软件": "軟體", "軟件": "軟體", "网络": "網路", "網絡": "網路", "人工智能": "人工智慧", "航天飞机": "太空梭", "穿梭機": "太空梭", "因特网": "網際網路", "互聯網": "網際網路", "机器人": "機器人", "機械人": "機器人", "移动电话": "行動電話", "流動電話": "行動電話", "调制解调器": "數據機", "調制解調器": "數據機", "短信": "簡訊", "短訊": "簡訊", "乌兹别克斯坦": "烏茲別克", "乍得": "查德", "乍得": "查德", "也门": "葉門", "也門": "葉門", "伯利兹": "貝里斯", "伯利茲": "貝里斯", "佛得角": "維德角", "佛得角": "維德角", "克罗地亚": "克羅埃西亞", "克羅地亞": "克羅埃西亞", "冈比亚": "甘比亞", "岡比亞": "甘比亞", "几内亚比绍": "幾內亞比索", "幾內亞比紹": "幾內亞比索", "列支敦士登": "列支敦斯登", "列支敦士登": "列支敦斯登", "利比里亚": "賴比瑞亞", "利比里亞": "賴比瑞亞", "加纳": "迦納", "加納": "迦納", "加蓬": "加彭", "加蓬": "加彭", "博茨瓦纳": "波札那", "博茨瓦納": "波札那", "卡塔尔": "卡達", "卡塔爾": "卡達", "卢旺达": "盧安達", "盧旺達": "盧安達", "危地马拉": "瓜地馬拉", "危地馬拉": "瓜地馬拉", "厄瓜多尔": "厄瓜多", "厄瓜多爾": "厄瓜多", "厄立特里亚": "厄利垂亞", "厄立特里亞": "厄利垂亞", "吉布提": "吉布地", "吉布堤": "吉布地", "哈萨克斯坦": "哈薩克", "哥斯达黎加": "哥斯大黎加", "哥斯達黎加": "哥斯大黎加", "图瓦卢": "吐瓦魯", "圖瓦盧": "吐瓦魯", "土库曼斯坦": "土庫曼", "圣卢西亚": "聖露西亞", "聖盧西亞": "聖露西亞", "圣基茨和尼维斯": "聖克里斯多福及尼維斯", "聖吉斯納域斯": "聖克里斯多福及尼維斯", "圣文森特和格林纳丁斯": "聖文森及格瑞那丁", "聖文森特和格林納丁斯": "聖文森及格瑞那丁", "圣马力诺": "聖馬利諾", "聖馬力諾": "聖馬利諾", "圭亚那": "蓋亞那", "圭亞那": "蓋亞那", "坦桑尼亚": "坦尚尼亞", "坦桑尼亞": "坦尚尼亞", "埃塞俄比亚": "衣索比亞", "埃塞俄比亞": "衣索比亞", "基里巴斯": "吉里巴斯", "基里巴斯": "吉里巴斯", "塔吉克斯坦": "塔吉克", "塞拉利昂": "獅子山", "塞拉利昂": "獅子山", "塞浦路斯": "塞普勒斯", "塞浦路斯": "塞普勒斯", "塞舌尔": "塞席爾", "塞舌爾": "塞席爾", "多米尼加": "多明尼加", "多明尼加共和國": "多明尼加", "多米尼加联邦": "多米尼克", "多明尼加聯邦": "多米尼克", "安提瓜和巴布达": "安地卡及巴布達", "安提瓜和巴布達": "安地卡及巴布達", "尼日利亚": "奈及利亞", "尼日利亞": "奈及利亞", "尼日尔": "尼日", "尼日爾": "尼日", "巴巴多斯": "巴貝多", "巴巴多斯": "巴貝多", "巴布亚新几内亚": "巴布亞紐幾內亞", "巴布亞新畿內亞": "巴布亞紐幾內亞", "布基纳法索": "布吉納法索", "布基納法索": "布吉納法索", "布隆迪": "蒲隆地", "布隆迪": "蒲隆地", "希腊": "希臘", "帕劳": "帛琉", "意大利": "義大利", "意大利": "義大利", "所罗门群岛": "索羅門群島", "所羅門群島": "索羅門群島", "文莱": "汶萊", "斯威士兰": "史瓦濟蘭", "斯威士蘭": "史瓦濟蘭", "斯洛文尼亚": "斯洛維尼亞", "斯洛文尼亞": "斯洛維尼亞", "新西兰": "紐西蘭", "新西蘭": "紐西蘭", "格林纳达": "格瑞那達", "格林納達": "格瑞那達", "格鲁吉亚": "喬治亞", "格魯吉亞": "喬治亞", "佐治亚": "喬治亞", "佐治亞": "喬治亞", "毛里塔尼亚": "茅利塔尼亞", "毛里塔尼亞": "茅利塔尼亞", "毛里求斯": "模里西斯", "毛里裘斯": "模里西斯", "沙特阿拉伯": "沙烏地阿拉伯", "沙地阿拉伯": "沙烏地阿拉伯", "波斯尼亚和黑塞哥维那": "波士尼亞赫塞哥維納", "波斯尼亞黑塞哥維那": "波士尼亞赫塞哥維納", "津巴布韦": "辛巴威", "津巴布韋": "辛巴威", "洪都拉斯": "宏都拉斯", "洪都拉斯": "宏都拉斯", "特立尼达和托巴哥": "千里達托貝哥", "特立尼達和多巴哥": "千里達托貝哥", "瑙鲁": "諾魯", "瑙魯": "諾魯", "瓦努阿图": "萬那杜", "瓦努阿圖": "萬那杜", "溫納圖萬": "那杜", "科摩罗": "葛摩", "科摩羅": "葛摩", "科特迪瓦": "象牙海岸", "突尼斯": "突尼西亞", "索马里": "索馬利亞", "索馬里": "索馬利亞", "老挝": "寮國", "老撾": "寮國", "肯尼亚": "肯亞", "肯雅": "肯亞", "苏里南": "蘇利南", "莫桑比克": "莫三比克", "莱索托": "賴索托", "萊索托": "賴索托", "贝宁": "貝南", "貝寧": "貝南", "赞比亚": "尚比亞", "贊比亞": "尚比亞", "阿塞拜疆": "亞塞拜然", "阿塞拜疆": "亞塞拜然", "阿拉伯联合酋长国": "阿拉伯聯合大公國", "阿拉伯聯合酋長國": "阿拉伯聯合大公國", "马尔代夫": "馬爾地夫", "馬爾代夫": "馬爾地夫", "马耳他": "馬爾他", "马里共和国": "馬利共和國", "馬里共和國": "馬利共和國", "方便面": "速食麵", "快速面": "速食麵", "即食麵": "速食麵", "薯仔": "土豆", "蹦极跳": "笨豬跳", "绑紧跳": "笨豬跳", "冷菜": "冷盤", "凉菜": "冷盤", "出租车": "計程車", "台球": "撞球", "桌球": "撞球", "雪糕": "冰淇淋", "卫生": "衛生", "衞生": "衛生", "平治": "賓士", "奔驰": "賓士", "積架": "捷豹", "福士": "福斯", "雪铁龙": "雪鐵龍", "马自达": "馬自達", "萬事得": "馬自達", "拿破仑": "拿破崙", "拿破侖": "拿破崙", "布什": "布希", "布殊": "布希", "克林顿": "柯林頓", "克林頓": "柯林頓", "侯赛因": "海珊", "侯賽因": "海珊", "凡高": "梵谷", "狄安娜": "黛安娜", "戴安娜": "黛安娜", "赫拉": "希拉", } zh2HK = { "打印机": "打印機", "印表機": "打印機", "字节": "位元組", "字節": "位元組", "打印": "打印", "列印": "打印", "硬件": "硬件", "硬體": "硬件", "二极管": "二極管", "二極體": "二極管", "三极管": "三極管", "三極體": "三極管", "数码": "數碼", "數位": "數碼", "软件": "軟件", "軟體": "軟件", "网络": "網絡", "網路": "網絡", "人工智能": "人工智能", "人工智慧": "人工智能", "航天飞机": "穿梭機", "太空梭": "穿梭機", "因特网": "互聯網", "網際網路": "互聯網", "机器人": "機械人", "機器人": "機械人", "移动电话": "流動電話", "行動電話": "流動電話", "调制解调器": "調制解調器", "數據機": "調制解調器", "短信": "短訊", "簡訊": "短訊", "乍得": "乍得", "查德": "乍得", "也门": "也門", "葉門": "也門", "伯利兹": "伯利茲", "貝里斯": "伯利茲", "佛得角": "佛得角", "維德角": "佛得角", "克罗地亚": "克羅地亞", "克羅埃西亞": "克羅地亞", "冈比亚": "岡比亞", "甘比亞": "岡比亞", "几内亚比绍": "幾內亞比紹", "幾內亞比索": "幾內亞比紹", "列支敦士登": "列支敦士登", "列支敦斯登": "列支敦士登", "利比里亚": "利比里亞", "賴比瑞亞": "利比里亞", "加纳": "加納", "迦納": "加納", "加蓬": "加蓬", "加彭": "加蓬", "博茨瓦纳": "博茨瓦納", "波札那": "博茨瓦納", "卡塔尔": "卡塔爾", "卡達": "卡塔爾", "卢旺达": "盧旺達", "盧安達": "盧旺達", "危地马拉": "危地馬拉", "瓜地馬拉": "危地馬拉", "厄瓜多尔": "厄瓜多爾", "厄瓜多": "厄瓜多爾", "厄立特里亚": "厄立特里亞", "厄利垂亞": "厄立特里亞", "吉布提": "吉布堤", "吉布地": "吉布堤", "哥斯达黎加": "哥斯達黎加", "哥斯大黎加": "哥斯達黎加", "图瓦卢": "圖瓦盧", "吐瓦魯": "圖瓦盧", "圣卢西亚": "聖盧西亞", "聖露西亞": "聖盧西亞", "圣基茨和尼维斯": "聖吉斯納域斯", "聖克里斯多福及尼維斯": "聖吉斯納域斯", "圣文森特和格林纳丁斯": "聖文森特和格林納丁斯", "聖文森及格瑞那丁": "聖文森特和格林納丁斯", "圣马力诺": "聖馬力諾", "聖馬利諾": "聖馬力諾", "圭亚那": "圭亞那", "蓋亞那": "圭亞那", "坦桑尼亚": "坦桑尼亞", "坦尚尼亞": "坦桑尼亞", "埃塞俄比亚": "埃塞俄比亞", "衣索匹亞": "埃塞俄比亞", "衣索比亞": "埃塞俄比亞", "基里巴斯": "基里巴斯", "吉里巴斯": "基里巴斯", "狮子山": "獅子山", "塞普勒斯": "塞浦路斯", "塞舌尔": "塞舌爾", "塞席爾": "塞舌爾", "多米尼加": "多明尼加共和國", "多明尼加": "多明尼加共和國", "多米尼加联邦": "多明尼加聯邦", "多米尼克": "多明尼加聯邦", "安提瓜和巴布达": "安提瓜和巴布達", "安地卡及巴布達": "安提瓜和巴布達", "尼日利亚": "尼日利亞", "奈及利亞": "尼日利亞", "尼日尔": "尼日爾", "尼日": "尼日爾", "巴巴多斯": "巴巴多斯", "巴貝多": "巴巴多斯", "巴布亚新几内亚": "巴布亞新畿內亞", "巴布亞紐幾內亞": "巴布亞新畿內亞", "布基纳法索": "布基納法索", "布吉納法索": "布基納法索", "布隆迪": "布隆迪", "蒲隆地": "布隆迪", "義大利": "意大利", "所罗门群岛": "所羅門群島", "索羅門群島": "所羅門群島", "斯威士兰": "斯威士蘭", "史瓦濟蘭": "斯威士蘭", "斯洛文尼亚": "斯洛文尼亞", "斯洛維尼亞": "斯洛文尼亞", "新西兰": "新西蘭", "紐西蘭": "新西蘭", "格林纳达": "格林納達", "格瑞那達": "格林納達", "格鲁吉亚": "喬治亞", "格魯吉亞": "喬治亞", "梵蒂冈": "梵蒂岡", "毛里塔尼亚": "毛里塔尼亞", "茅利塔尼亞": "毛里塔尼亞", "毛里求斯": "毛里裘斯", "模里西斯": "毛里裘斯", "沙烏地阿拉伯": "沙特阿拉伯", "波斯尼亚和黑塞哥维那": "波斯尼亞黑塞哥維那", "波士尼亞赫塞哥維納": "波斯尼亞黑塞哥維那", "津巴布韦": "津巴布韋", "辛巴威": "津巴布韋", "洪都拉斯": "洪都拉斯", "宏都拉斯": "洪都拉斯", "特立尼达和托巴哥": "特立尼達和多巴哥", "千里達托貝哥": "特立尼達和多巴哥", "瑙鲁": "瑙魯", "諾魯": "瑙魯", "瓦努阿图": "瓦努阿圖", "萬那杜": "瓦努阿圖", "科摩罗": "科摩羅", "葛摩": "科摩羅", "索马里": "索馬里", "索馬利亞": "索馬里", "老挝": "老撾", "寮國": "老撾", "肯尼亚": "肯雅", "肯亞": "肯雅", "莫桑比克": "莫桑比克", "莫三比克": "莫桑比克", "莱索托": "萊索托", "賴索托": "萊索托", "贝宁": "貝寧", "貝南": "貝寧", "赞比亚": "贊比亞", "尚比亞": "贊比亞", "阿塞拜疆": "阿塞拜疆", "亞塞拜然": "阿塞拜疆", "阿拉伯联合酋长国": "阿拉伯聯合酋長國", "阿拉伯聯合大公國": "阿拉伯聯合酋長國", "马尔代夫": "馬爾代夫", "馬爾地夫": "馬爾代夫", "馬利共和國": "馬里共和國", "方便面": "即食麵", "快速面": "即食麵", "速食麵": "即食麵", "泡麵": "即食麵", "土豆": "馬鈴薯", "华乐": "中樂", "民乐": "中樂", "計程車": "的士", "出租车": "的士", "公車": "巴士", "自行车": "單車", "犬只": "狗隻", "台球": "桌球", "撞球": "桌球", "冰淇淋": "雪糕", "賓士": "平治", "捷豹": "積架", "福斯": "福士", "雪铁龙": "先進", "雪鐵龍": "先進", "沃尓沃": "富豪", "马自达": "萬事得", "馬自達": "萬事得", "寶獅": "標致", "拿破崙": "拿破侖", "布什": "布殊", "布希": "布殊", "克林顿": "克林頓", "柯林頓": "克林頓", "萨达姆": "薩達姆", "海珊": "侯賽因", "侯赛因": "侯賽因", "大卫·贝克汉姆": "大衛碧咸", "迈克尔·欧文": "米高奧雲", "珍妮弗·卡普里亚蒂": "卡佩雅蒂", "马拉特·萨芬": "沙芬", "迈克尔·舒马赫": "舒麥加", "希特勒": "希特拉", "狄安娜": "戴安娜", "黛安娜": "戴安娜", } zh2CN = { "記憶體": "内存", "預設": "默认", "串列": "串行", "乙太網": "以太网", "點陣圖": "位图", "常式": "例程", "游標": "光标", "光碟": "光盘", "光碟機": "光驱", "全形": "全角", "共用": "共享", "載入": "加载", "半形": "半角", "變數": "变量", "雜訊": "噪声", "因數": "因子", "功能變數名稱": "域名", "音效卡": "声卡", "字型大小": "字号", "字型檔": "字库", "欄位": "字段", "字元": "字符", "存檔": "存盘", "定址": "寻址", "章節附註": "尾注", "非同步": "异步", "匯流排": "总线", "括弧": "括号", "介面": "接口", "控制項": "控件", "許可權": "权限", "碟片": "盘片", "矽片": "硅片", "矽谷": "硅谷", "硬碟": "硬盘", "磁碟": "磁盘", "磁軌": "磁道", "程式控制": "程控", "運算元": "算子", "演算法": "算法", "晶片": "芯片", "晶元": "芯片", "片語": "词组", "軟碟機": "软驱", "快閃記憶體": "快闪存储器", "滑鼠": "鼠标", "進位": "进制", "互動式": "交互式", "優先順序": "优先级", "感測": "传感", "攜帶型": "便携式", "資訊理論": "信息论", "迴圈": "循环", "防寫": "写保护", "分散式": "分布式", "解析度": "分辨率", "伺服器": "服务器", "等於": "等于", "區域網": "局域网", "巨集": "宏", "掃瞄器": "扫瞄仪", "寬頻": "宽带", "資料庫": "数据库", "乳酪": "奶酪", "鉅賈": "巨商", "手電筒": "手电", "萬曆": "万历", "永曆": "永历", "辭彙": "词汇", "母音": "元音", "自由球": "任意球", "頭槌": "头球", "進球": "入球", "顆進球": "粒入球", "射門": "打门", "蓋火鍋": "火锅盖帽", "印表機": "打印机", "打印機": "打印机", "位元組": "字节", "字節": "字节", "列印": "打印", "打印": "打印", "硬體": "硬件", "二極體": "二极管", "二極管": "二极管", "三極體": "三极管", "三極管": "三极管", "數位": "数码", "數碼": "数码", "軟體": "软件", "軟件": "软件", "網路": "网络", "網絡": "网络", "人工智慧": "人工智能", "太空梭": "航天飞机", "穿梭機": "航天飞机", "網際網路": "因特网", "互聯網": "因特网", "機械人": "机器人", "機器人": "机器人", "行動電話": "移动电话", "流動電話": "移动电话", "調制解調器": "调制解调器", "數據機": "调制解调器", "短訊": "短信", "簡訊": "短信", "烏茲別克": "乌兹别克斯坦", "查德": "乍得", "乍得": "乍得", "也門": "", "葉門": "也门", "伯利茲": "伯利兹", "貝里斯": "伯利兹", "維德角": "佛得角", "佛得角": "佛得角", "克羅地亞": "克罗地亚", "克羅埃西亞": "克罗地亚", "岡比亞": "冈比亚", "甘比亞": "冈比亚", "幾內亞比紹": "几内亚比绍", "幾內亞比索": "几内亚比绍", "列支敦斯登": "列支敦士登", "列支敦士登": "列支敦士登", "利比里亞": "利比里亚", "賴比瑞亞": "利比里亚", "加納": "加纳", "迦納": "加纳", "加彭": "加蓬", "加蓬": "加蓬", "博茨瓦納": "博茨瓦纳", "波札那": "博茨瓦纳", "卡塔爾": "卡塔尔", "卡達": "卡塔尔", "盧旺達": "卢旺达", "盧安達": "卢旺达", "危地馬拉": "危地马拉", "瓜地馬拉": "危地马拉", "厄瓜多爾": "厄瓜多尔", "厄瓜多": "厄瓜多尔", "厄立特里亞": "厄立特里亚", "厄利垂亞": "厄立特里亚", "吉布堤": "吉布提", "吉布地": "吉布提", "哈薩克": "哈萨克斯坦", "哥斯達黎加": "哥斯达黎加", "哥斯大黎加": "哥斯达黎加", "圖瓦盧": "图瓦卢", "吐瓦魯": "图瓦卢", "土庫曼": "土库曼斯坦", "聖盧西亞": "圣卢西亚", "聖露西亞": "圣卢西亚", "聖吉斯納域斯": "圣基茨和尼维斯", "聖克里斯多福及尼維斯": "圣基茨和尼维斯", "聖文森特和格林納丁斯": "圣文森特和格林纳丁斯", "聖文森及格瑞那丁": "圣文森特和格林纳丁斯", "聖馬力諾": "圣马力诺", "聖馬利諾": "圣马力诺", "圭亞那": "圭亚那", "蓋亞那": "圭亚那", "坦桑尼亞": "坦桑尼亚", "坦尚尼亞": "坦桑尼亚", "埃塞俄比亞": "埃塞俄比亚", "衣索匹亞": "埃塞俄比亚", "衣索比亞": "埃塞俄比亚", "吉里巴斯": "基里巴斯", "基里巴斯": "基里巴斯", "塔吉克": "塔吉克斯坦", "塞拉利昂": "塞拉利昂", "塞普勒斯": "塞浦路斯", "塞浦路斯": "塞浦路斯", "塞舌爾": "塞舌尔", "塞席爾": "塞舌尔", "多明尼加共和國": "多米尼加", "多明尼加": "多米尼加", "多明尼加聯邦": "多米尼加联邦", "多米尼克": "多米尼加联邦", "安提瓜和巴布達": "安提瓜和巴布达", "安地卡及巴布達": "安提瓜和巴布达", "尼日利亞": "尼日利亚", "奈及利亞": "尼日利亚", "尼日爾": "尼日尔", "尼日": "尼日尔", "巴貝多": "巴巴多斯", "巴巴多斯": "巴巴多斯", "巴布亞新畿內亞": "巴布亚新几内亚", "巴布亞紐幾內亞": "巴布亚新几内亚", "布基納法索": "布基纳法索", "布吉納法索": "布基纳法索", "蒲隆地": "布隆迪", "布隆迪": "布隆迪", "希臘": "希腊", "帛琉": "帕劳", "義大利": "意大利", "意大利": "意大利", "所羅門群島": "所罗门群岛", "索羅門群島": "所罗门群岛", "汶萊": "文莱", "斯威士蘭": "斯威士兰", "史瓦濟蘭": "斯威士兰", "斯洛文尼亞": "斯洛文尼亚", "斯洛維尼亞": "斯洛文尼亚", "新西蘭": "新西兰", "紐西蘭": "新西兰", "格林納達": "格林纳达", "格瑞那達": "格林纳达", "格魯吉亞": "乔治亚", "喬治亞": "乔治亚", "梵蒂岡": "梵蒂冈", "毛里塔尼亞": "毛里塔尼亚", "茅利塔尼亞": "毛里塔尼亚", "毛里裘斯": "毛里求斯", "模里西斯": "毛里求斯", "沙地阿拉伯": "沙特阿拉伯", "沙烏地阿拉伯": "沙特阿拉伯", "波斯尼亞黑塞哥維那": "波斯尼亚和黑塞哥维那", "波士尼亞赫塞哥維納": "波斯尼亚和黑塞哥维那", "津巴布韋": "津巴布韦", "辛巴威": "津巴布韦", "宏都拉斯": "洪都拉斯", "洪都拉斯": "洪都拉斯", "特立尼達和多巴哥": "特立尼达和托巴哥", "千里達托貝哥": "特立尼达和托巴哥", "瑙魯": "瑙鲁", "諾魯": "瑙鲁", "瓦努阿圖": "瓦努阿图", "萬那杜": "瓦努阿图", "溫納圖": "瓦努阿图", "科摩羅": "科摩罗", "葛摩": "科摩罗", "象牙海岸": "科特迪瓦", "突尼西亞": "突尼斯", "索馬里": "索马里", "索馬利亞": "索马里", "老撾": "老挝", "寮國": "老挝", "肯雅": "肯尼亚", "肯亞": "肯尼亚", "蘇利南": "苏里南", "莫三比克": "莫桑比克", "莫桑比克": "莫桑比克", "萊索托": "莱索托", "賴索托": "莱索托", "貝寧": "贝宁", "貝南": "贝宁", "贊比亞": "赞比亚", "尚比亞": "赞比亚", "亞塞拜然": "阿塞拜疆", "阿塞拜疆": "阿塞拜疆", "阿拉伯聯合酋長國": "阿拉伯联合酋长国", "阿拉伯聯合大公國": "阿拉伯联合酋长国", "南韓": "韩国", "馬爾代夫": "马尔代夫", "馬爾地夫": "马尔代夫", "馬爾他": "马耳他", "馬利共和國": "马里共和国", "即食麵": "方便面", "快速面": "方便面", "速食麵": "方便面", "泡麵": "方便面", "笨豬跳": "蹦极跳", "绑紧跳": "蹦极跳", "冷盤": "凉菜", "冷菜": "凉菜", "散钱": "零钱", "谐星": "笑星", "夜学": "夜校", "华乐": "民乐", "中樂": "民乐", "屋价": "房价", "的士": "出租车", "計程車": "出租车", "公車": "公共汽车", "單車": "自行车", "節慶": "节日", "芝士": "乾酪", "狗隻": "犬只", "士多啤梨": "草莓", "忌廉": "奶油", "桌球": "台球", "撞球": "台球", "雪糕": "冰淇淋", "衞生": "卫生", "衛生": "卫生", "賓士": "奔驰", "平治": "奔驰", "積架": "捷豹", "福斯": "大众", "福士": "大众", "雪鐵龍": "雪铁龙", "萬事得": "马自达", "馬自達": "马自达", "寶獅": "标志", "拿破崙": "拿破仑", "布殊": "布什", "布希": "布什", "柯林頓": "克林顿", "克林頓": "克林顿", "薩達姆": "萨达姆", "海珊": "萨达姆", "梵谷": "凡高", "大衛碧咸": "大卫·贝克汉姆", "米高奧雲": "迈克尔·欧文", "卡佩雅蒂": "珍妮弗·卡普里亚蒂", "沙芬": "马拉特·萨芬", "舒麥加": "迈克尔·舒马赫", "希特拉": "希特勒", "黛安娜": "戴安娜", "希拉": "赫拉", } zh2SG = { "方便面": "快速面", "速食麵": "快速面", "即食麵": "快速面", "蹦极跳": "绑紧跳", "笨豬跳": "绑紧跳", "凉菜": "冷菜", "冷盤": "冷菜", "零钱": "散钱", "散紙": "散钱", "笑星": "谐星", "夜校": "夜学", "民乐": "华乐", "住房": "住屋", "房价": "屋价", "泡麵": "快速面", }
zhenxun-bot
/zhenxun_bot-0.1.4.3-py3-none-any.whl/zhenxun_bot/utils/zh_wiki.py
zh_wiki.py
from typing import Dict, Union, Optional, List, Any, Literal from utils.user_agent import get_user_agent from .utils import get_local_proxy from services.log import logger from pathlib import Path from httpx import Response from asyncio.exceptions import TimeoutError from nonebot.adapters.onebot.v11 import MessageSegment from playwright.async_api import Page from .message_builder import image from httpx import ConnectTimeout from .browser import get_browser from retrying import retry import asyncio import aiofiles import httpx class AsyncHttpx: proxy = {"http://": get_local_proxy(), "https://": get_local_proxy()} @classmethod @retry(stop_max_attempt_number=3) async def get( cls, url: str, *, params: Optional[Dict[str, Any]] = None, headers: Optional[Dict[str, str]] = None, cookies: Optional[Dict[str, str]] = None, use_proxy: bool = True, proxy: Dict[str, str] = None, timeout: Optional[int] = 30, **kwargs, ) -> Response: """ 说明: Get 参数: :param url: url :param params: params :param headers: 请求头 :param cookies: cookies :param use_proxy: 使用默认代理 :param proxy: 指定代理 :param timeout: 超时时间 """ if not headers: headers = get_user_agent() proxy = proxy if proxy else cls.proxy if use_proxy else None async with httpx.AsyncClient(proxies=proxy) as client: return await client.get( url, params=params, headers=headers, cookies=cookies, timeout=timeout, **kwargs ) @classmethod async def post( cls, url: str, *, data: Optional[Dict[str, str]] = None, content: Any = None, files: Any = None, use_proxy: bool = True, proxy: Dict[str, str] = None, json: Optional[Dict[str, Union[Any]]] = None, params: Optional[Dict[str, str]] = None, headers: Optional[Dict[str, str]] = None, cookies: Optional[Dict[str, str]] = None, timeout: Optional[int] = 30, **kwargs, ) -> Response: """ 说明: Post 参数: :param url: url :param data: data :param content: content :param files: files :param use_proxy: 是否默认代理 :param proxy: 指定代理 :param json: json :param params: params :param headers: 请求头 :param cookies: cookies :param timeout: 超时时间 """ if not headers: headers = get_user_agent() proxy = proxy if proxy else cls.proxy if use_proxy else None async with httpx.AsyncClient(proxies=proxy) as client: return await client.post( url, content=content, data=data, files=files, json=json, params=params, headers=headers, cookies=cookies, timeout=timeout, **kwargs, ) @classmethod async def download_file( cls, url: str, path: Union[str, Path], *, params: Optional[Dict[str, str]] = None, use_proxy: bool = True, proxy: Dict[str, str] = None, headers: Optional[Dict[str, str]] = None, cookies: Optional[Dict[str, str]] = None, timeout: Optional[int] = 30, **kwargs, ) -> bool: """ 说明: 下载文件 参数: :param url: url :param path: 存储路径 :param params: params :param use_proxy: 使用代理 :param proxy: 指定代理 :param headers: 请求头 :param cookies: cookies :param timeout: 超时时间 """ if isinstance(path, str): path = Path(path) path.parent.mkdir(parents=True, exist_ok=True) try: for _ in range(3): try: content = ( await cls.get( url, params=params, headers=headers, cookies=cookies, use_proxy=use_proxy, proxy=proxy, timeout=timeout, **kwargs, ) ).content async with aiofiles.open(path, "wb") as wf: await wf.write(content) logger.info(f"下载 {url} 成功.. Path:{path.absolute()}") return True except (TimeoutError, ConnectTimeout): pass else: logger.error(f"下载 {url} 下载超时.. Path:{path.absolute()}") except Exception as e: logger.error(f"下载 {url} 未知错误 {type(e)}:{e}.. Path:{path.absolute()}") return False @classmethod async def gather_download_file( cls, url_list: List[str], path_list: List[Union[str, Path]], *, limit_async_number: Optional[int] = None, params: Optional[Dict[str, str]] = None, use_proxy: bool = True, proxy: Dict[str, str] = None, headers: Optional[Dict[str, str]] = None, cookies: Optional[Dict[str, str]] = None, timeout: Optional[int] = 30, **kwargs, ) -> List[bool]: """ 说明: 分组同时下载文件 参数: :param url_list: url列表 :param path_list: 存储路径列表 :param limit_async_number: 限制同时请求数量 :param params: params :param use_proxy: 使用代理 :param proxy: 指定代理 :param headers: 请求头 :param cookies: cookies :param timeout: 超时时间 """ if n := len(url_list) != len(path_list): raise UrlPathNumberNotEqual( f"Url数量与Path数量不对等,Url:{len(url_list)},Path:{len(path_list)}" ) if limit_async_number and n > limit_async_number: m = float(n) / limit_async_number x = 0 j = limit_async_number _split_url_list = [] _split_path_list = [] for _ in range(int(m)): _split_url_list.append(url_list[x:j]) _split_path_list.append(path_list[x:j]) x += limit_async_number j += limit_async_number if int(m) < m: _split_url_list.append(url_list[j:]) _split_path_list.append(path_list[j:]) else: _split_url_list = [url_list] _split_path_list = [path_list] tasks = [] result_ = [] for x, y in zip(_split_url_list, _split_path_list): for url, path in zip(x, y): tasks.append( asyncio.create_task( cls.download_file( url, path, params=params, headers=headers, cookies=cookies, use_proxy=use_proxy, timeout=timeout, proxy=proxy, ** kwargs, ) ) ) _x = await asyncio.gather(*tasks) result_ = result_ + list(_x) tasks.clear() return result_ class AsyncPlaywright: @classmethod async def _new_page(cls, user_agent: Optional[str] = None, **kwargs) -> Page: """ 说明: 获取一个新页面 参数: :param user_agent: 请求头 """ browser = await get_browser() if browser: return await browser.new_page(user_agent=user_agent, **kwargs) raise BrowserIsNone("获取Browser失败...") @classmethod async def goto( cls, url: str, *, timeout: Optional[float] = 100000, wait_until: Optional[ Literal["domcontentloaded", "load", "networkidle"] ] = "networkidle", referer: str = None, **kwargs ) -> Optional[Page]: """ 说明: goto 参数: :param url: 网址 :param timeout: 超时限制 :param wait_until: 等待类型 :param referer: """ page = None try: page = await cls._new_page(**kwargs) await page.goto(url, timeout=timeout, wait_until=wait_until, referer=referer) return page except Exception as e: logger.warning(f"Playwright 访问 url:{url} 发生错误 {type(e)}:{e}") if page: await page.close() return None @classmethod async def screenshot( cls, url: str, path: Union[Path, str], element: Union[str, List[str]], *, sleep: Optional[int] = None, viewport_size: Dict[str, int] = None, wait_until: Optional[ Literal["domcontentloaded", "load", "networkidle"] ] = "networkidle", timeout: float = None, type_: Literal["jpeg", "png"] = None, **kwargs ) -> Optional[MessageSegment]: """ 说明: 截图,该方法仅用于简单快捷截图,复杂截图请操作 page 参数: :param url: 网址 :param path: 存储路径 :param element: 元素选择 :param sleep: 延迟截取 :param viewport_size: 窗口大小 :param wait_until: 等待类型 :param timeout: 超时限制 :param type_: 保存类型 """ page = None if viewport_size is None: viewport_size = dict(width=2560, height=1080) if isinstance(path, str): path = Path(path) try: page = await cls.goto(url, wait_until=wait_until, **kwargs) await page.set_viewport_size(viewport_size) if sleep: await asyncio.sleep(sleep) if isinstance(element, str): card = await page.query_selector(element) else: card = page for e in element: card = await card.query_selector(e) await card.screenshot(path=path, timeout=timeout, type=type_) return image(path) except Exception as e: logger.warning(f"Playwright 截图 url:{url} element:{element} 发生错误 {type(e)}:{e}") finally: if page: await page.close() return None class UrlPathNumberNotEqual(Exception): pass class BrowserIsNone(Exception): pass
zhenxun-bot
/zhenxun_bot-0.1.4.3-py3-none-any.whl/zhenxun_bot/utils/http_utils.py
http_utils.py
from configs.path_config import IMAGE_PATH, RECORD_PATH from nonebot.adapters.onebot.v11.message import MessageSegment from configs.config import NICKNAME from services.log import logger from typing import Union, List from pathlib import Path import os def image( file: Union[str, Path, bytes] = None, path: str = None, b64: str = None, ) -> Union[MessageSegment, str]: """ 说明: 生成一个 MessageSegment.image 消息 生成顺序:绝对路径(abspath) > base64(b64) > img_name 参数: :param file: 图片文件名称,默认在 resource/img 目录下 :param path: 图片所在路径,默认在 resource/img 目录下 :param b64: 图片base64 """ if isinstance(file, Path): if file.exists(): return MessageSegment.image(file) logger.warning(f"图片 {file.absolute()}缺失...") return "" elif isinstance(file, bytes): return MessageSegment.image(file) elif b64: return MessageSegment.image(b64 if "base64://" in b64 else "base64://" + b64) else: if file.startswith("http"): return MessageSegment.image(file) if len(file.split(".")) == 1: file += ".jpg" if (file := IMAGE_PATH / path / file if path else IMAGE_PATH / file).exists(): return MessageSegment.image(file) else: logger.warning(f"图片 {file} 缺失...") return "" def at(qq: int) -> MessageSegment: """ 说明: 生成一个 MessageSegment.at 消息 参数: :param qq: qq号 """ return MessageSegment.at(qq) def record(voice_name: str, path: str = None) -> MessageSegment or str: """ 说明: 生成一个 MessageSegment.record 消息 参数: :param voice_name: 音频文件名称,默认在 resource/voice 目录下 :param path: 音频文件路径,默认在 resource/voice 目录下 """ if len(voice_name.split(".")) == 1: voice_name += ".mp3" file = ( Path(RECORD_PATH) / path / voice_name if path else Path(RECORD_PATH) / voice_name ) if "http" in voice_name: return MessageSegment.record(voice_name) if file.exists(): result = MessageSegment.record(f"file:///{file.absolute()}") return result else: logger.warning(f"语音{file.absolute()}缺失...") return "" def text(msg: str) -> MessageSegment: """ 说明: 生成一个 MessageSegment.text 消息 参数: :param msg: 消息文本 """ return MessageSegment.text(msg) def contact_user(qq: int) -> MessageSegment: """ 说明: 生成一个 MessageSegment.contact_user 消息 参数: :param qq: qq号 """ return MessageSegment.contact_user(qq) def share( url: str, title: str, content: str = None, image_url: str = None ) -> MessageSegment: """ 说明: 生成一个 MessageSegment.share 消息 参数: :param url: 自定义分享的链接 :param title: 自定义分享的包体 :param content: 自定义分享的内容 :param image_url: 自定义分享的展示图片 """ return MessageSegment.share(url, title, content, image_url) def xml(data: str) -> MessageSegment: """ 说明: 生成一个 MessageSegment.xml 消息 参数: :param data: 数据文本 """ return MessageSegment.xml(data) def json(data: str) -> MessageSegment: """ 说明: 生成一个 MessageSegment.json 消息 参数: :param data: 消息数据 """ return MessageSegment.json(data) def face(id_: int) -> MessageSegment: """ 说明: 生成一个 MessageSegment.face 消息 参数: :param id_: 表情id """ return MessageSegment.face(id_) def poke(qq: int) -> MessageSegment: """ 说明: 生成一个 MessageSegment.poke 消息 参数: :param qq: qq号 """ return MessageSegment("poke", {"qq": qq}) def music(type_: str, id_: int) -> MessageSegment: return MessageSegment.music(type_, id_) def custom_forward_msg( msg_list: List[str], uin: Union[int, str], name: str = f"这里是{NICKNAME}" ) -> List[dict]: """ 生成自定义合并消息 :param msg_list: 消息列表 :param uin: 发送者 QQ :param name: 自定义名称 """ uin = int(uin) mes_list = [] for _message in msg_list: data = { "type": "node", "data": { "name": name, "uin": f"{uin}", "content": _message, }, } mes_list.append(data) return mes_list
zhenxun-bot
/zhenxun_bot-0.1.4.3-py3-none-any.whl/zhenxun_bot/utils/message_builder.py
message_builder.py
from copy import deepcopy import re try: import psyco psyco.full() except: pass from .zh_wiki import zh2Hant, zh2Hans import sys py3k = sys.version_info >= (3, 0, 0) if py3k: UEMPTY = '' else: _zh2Hant, _zh2Hans = {}, {} for old, new in ((zh2Hant, _zh2Hant), (zh2Hans, _zh2Hans)): for k, v in old.items(): new[k.decode('utf8')] = v.decode('utf8') zh2Hant = _zh2Hant zh2Hans = _zh2Hans UEMPTY = ''.decode('utf8') # states (START, END, FAIL, WAIT_TAIL) = list(range(4)) # conditions (TAIL, ERROR, MATCHED_SWITCH, UNMATCHED_SWITCH, CONNECTOR) = list(range(5)) MAPS = {} class Node(object): def __init__(self, from_word, to_word=None, is_tail=True, have_child=False): self.from_word = from_word if to_word is None: self.to_word = from_word self.data = (is_tail, have_child, from_word) self.is_original = True else: self.to_word = to_word or from_word self.data = (is_tail, have_child, to_word) self.is_original = False self.is_tail = is_tail self.have_child = have_child def is_original_long_word(self): return self.is_original and len(self.from_word)>1 def is_follow(self, chars): return chars != self.from_word[:-1] def __str__(self): return '<Node, %s, %s, %s, %s>' % (repr(self.from_word), repr(self.to_word), self.is_tail, self.have_child) __repr__ = __str__ class ConvertMap(object): def __init__(self, name, mapping=None): self.name = name self._map = {} if mapping: self.set_convert_map(mapping) def set_convert_map(self, mapping): convert_map = {} have_child = {} max_key_length = 0 for key in sorted(mapping.keys()): if len(key)>1: for i in range(1, len(key)): parent_key = key[:i] have_child[parent_key] = True have_child[key] = False max_key_length = max(max_key_length, len(key)) for key in sorted(have_child.keys()): convert_map[key] = (key in mapping, have_child[key], mapping.get(key, UEMPTY)) self._map = convert_map self.max_key_length = max_key_length def __getitem__(self, k): try: is_tail, have_child, to_word = self._map[k] return Node(k, to_word, is_tail, have_child) except: return Node(k) def __contains__(self, k): return k in self._map def __len__(self): return len(self._map) class StatesMachineException(Exception): pass class StatesMachine(object): def __init__(self): self.state = START self.final = UEMPTY self.len = 0 self.pool = UEMPTY def clone(self, pool): new = deepcopy(self) new.state = WAIT_TAIL new.pool = pool return new def feed(self, char, map): node = map[self.pool+char] if node.have_child: if node.is_tail: if node.is_original: cond = UNMATCHED_SWITCH else: cond = MATCHED_SWITCH else: cond = CONNECTOR else: if node.is_tail: cond = TAIL else: cond = ERROR new = None if cond == ERROR: self.state = FAIL elif cond == TAIL: if self.state == WAIT_TAIL and node.is_original_long_word(): self.state = FAIL else: self.final += node.to_word self.len += 1 self.pool = UEMPTY self.state = END elif self.state == START or self.state == WAIT_TAIL: if cond == MATCHED_SWITCH: new = self.clone(node.from_word) self.final += node.to_word self.len += 1 self.state = END self.pool = UEMPTY elif cond == UNMATCHED_SWITCH or cond == CONNECTOR: if self.state == START: new = self.clone(node.from_word) self.final += node.to_word self.len += 1 self.state = END else: if node.is_follow(self.pool): self.state = FAIL else: self.pool = node.from_word elif self.state == END: # END is a new START self.state = START new = self.feed(char, map) elif self.state == FAIL: raise StatesMachineException('Translate States Machine ' 'have error with input data %s' % node) return new def __len__(self): return self.len + 1 def __str__(self): return '<StatesMachine %s, pool: "%s", state: %s, final: %s>' % ( id(self), self.pool, self.state, self.final) __repr__ = __str__ class Converter(object): def __init__(self, to_encoding): self.to_encoding = to_encoding self.map = MAPS[to_encoding] self.start() def feed(self, char): branches = [] for fsm in self.machines: new = fsm.feed(char, self.map) if new: branches.append(new) if branches: self.machines.extend(branches) self.machines = [fsm for fsm in self.machines if fsm.state != FAIL] all_ok = True for fsm in self.machines: if fsm.state != END: all_ok = False if all_ok: self._clean() return self.get_result() def _clean(self): if len(self.machines): self.machines.sort(key=lambda x: len(x)) # self.machines.sort(cmp=lambda x,y: cmp(len(x), len(y))) self.final += self.machines[0].final self.machines = [StatesMachine()] def start(self): self.machines = [StatesMachine()] self.final = UEMPTY def end(self): self.machines = [fsm for fsm in self.machines if fsm.state == FAIL or fsm.state == END] self._clean() def convert(self, string): self.start() for char in string: self.feed(char) self.end() return self.get_result() def get_result(self): return self.final def registery(name, mapping): global MAPS MAPS[name] = ConvertMap(name, mapping) registery('zh-hant', zh2Hant) registery('zh-hans', zh2Hans) del zh2Hant, zh2Hans def run(): import sys from optparse import OptionParser parser = OptionParser() parser.add_option('-e', type='string', dest='encoding', help='encoding') parser.add_option('-f', type='string', dest='file_in', help='input file (- for stdin)') parser.add_option('-t', type='string', dest='file_out', help='output file') (options, args) = parser.parse_args() if not options.encoding: parser.error('encoding must be set') if options.file_in: if options.file_in == '-': file_in = sys.stdin else: file_in = open(options.file_in) else: file_in = sys.stdin if options.file_out: if options.file_out == '-': file_out = sys.stdout else: file_out = open(options.file_out, 'wb') else: file_out = sys.stdout c = Converter(options.encoding) for line in file_in: # print >> file_out, c.convert(line.rstrip('\n').decode( file_out.write(c.convert(line.rstrip('\n').decode( 'utf8')).encode('utf8')) if __name__ == '__main__': run()
zhenxun-bot
/zhenxun_bot-0.1.4.3-py3-none-any.whl/zhenxun_bot/utils/langconv.py
langconv.py
import random user_agent = [ "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50", "Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50", "Mozilla/5.0 (Windows NT 10.0; WOW64; rv:38.0) Gecko/20100101 Firefox/38.0", "Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; .NET4.0C; .NET4.0E; .NET CLR 2.0.50727; .NET CLR 3.0.30729; .NET CLR 3.5.30729; InfoPath.3; rv:11.0) like Gecko", "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)", "Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)", "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)", "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1", "Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1", "Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 Version/11.11", "Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11", "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Maxthon 2.0)", "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; TencentTraveler 4.0)", "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)", "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; The World)", "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SE 2.X MetaSr 1.0; SE 2.X MetaSr 1.0; .NET CLR 2.0.50727; SE 2.X MetaSr 1.0)", "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)", "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Avant Browser)", "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)", "Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5", "Mozilla/5.0 (iPod; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5", "Mozilla/5.0 (iPad; U; CPU OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5", "Mozilla/5.0 (Linux; U; Android 2.3.7; en-us; Nexus One Build/FRF91) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1", "MQQBrowser/26 Mozilla/5.0 (Linux; U; Android 2.3.7; zh-cn; MB200 Build/GRJ22; CyanogenMod-7) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1", "Opera/9.80 (Android 2.3.4; Linux; Opera Mobi/build-1107180945; U; en-GB) Presto/2.8.149 Version/11.10", "Mozilla/5.0 (Linux; U; Android 3.0; en-us; Xoom Build/HRI39) AppleWebKit/534.13 (KHTML, like Gecko) Version/4.0 Safari/534.13", "Mozilla/5.0 (BlackBerry; U; BlackBerry 9800; en) AppleWebKit/534.1+ (KHTML, like Gecko) Version/6.0.0.337 Mobile Safari/534.1+", "Mozilla/5.0 (hp-tablet; Linux; hpwOS/3.0.0; U; en-US) AppleWebKit/534.6 (KHTML, like Gecko) wOSBrowser/233.70 Safari/534.6 TouchPad/1.0", "Mozilla/5.0 (SymbianOS/9.4; Series60/5.0 NokiaN97-1/20.0.019; Profile/MIDP-2.1 Configuration/CLDC-1.1) AppleWebKit/525 (KHTML, like Gecko) BrowserNG/7.1.18124", "Mozilla/5.0 (compatible; MSIE 9.0; Windows Phone OS 7.5; Trident/5.0; IEMobile/9.0; HTC; Titan)", "UCWEB7.0.2.37/28/999", "NOKIA5700/ UCWEB7.0.2.37/28/999", "Openwave/ UCWEB7.0.2.37/28/999", "Mozilla/4.0 (compatible; MSIE 6.0; ) Opera/UCWEB7.0.2.37/28/999", # iPhone 6: "Mozilla/6.0 (iPhone; CPU iPhone OS 8_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/8.0 Mobile/10A5376e Safari/8536.25", ] def get_user_agent(): return {"User-Agent": random.choice(user_agent)}
zhenxun-bot
/zhenxun_bot-0.1.4.3-py3-none-any.whl/zhenxun_bot/utils/user_agent.py
user_agent.py
from datetime import datetime from collections import defaultdict from nonebot import require from configs.config import SYSTEM_PROXY from typing import List, Union, Optional, Type, Any from nonebot.adapters.onebot.v11 import Bot, Message from nonebot.matcher import matchers, Matcher import httpx import nonebot import pytz import pypinyin import time try: import ujson as json except ModuleNotFoundError: import json scheduler = require("nonebot_plugin_apscheduler").scheduler class CountLimiter: """ 次数检测工具,检测调用次数是否超过设定值 """ def __init__(self, max_count: int): self.count = defaultdict(int) self.max_count = max_count def add(self, key: Any): self.count[key] += 1 def check(self, key: Any) -> bool: if self.count[key] >= self.max_count: self.count[key] = 0 return True return False class UserBlockLimiter: """ 检测用户是否正在调用命令 """ def __init__(self): self.flag_data = defaultdict(bool) self.time = time.time() def set_true(self, key: Any): self.time = time.time() self.flag_data[key] = True def set_false(self, key: Any): self.flag_data[key] = False def check(self, key: Any) -> bool: if time.time() - self.time > 30: self.set_false(key) return False return self.flag_data[key] class FreqLimiter: """ 命令冷却,检测用户是否处于冷却状态 """ def __init__(self, default_cd_seconds: int): self.next_time = defaultdict(float) self.default_cd = default_cd_seconds def check(self, key: Any) -> bool: return time.time() >= self.next_time[key] def start_cd(self, key: Any, cd_time: int = 0): self.next_time[key] = time.time() + ( cd_time if cd_time > 0 else self.default_cd ) def left_time(self, key: Any) -> float: return self.next_time[key] - time.time() static_flmt = FreqLimiter(15) class BanCheckLimiter: """ 恶意命令触发检测 """ def __init__(self, default_check_time: float = 5, default_count: int = 4): self.mint = defaultdict(int) self.mtime = defaultdict(float) self.default_check_time = default_check_time self.default_count = default_count def add(self, key: Union[str, int, float]): if self.mint[key] == 1: self.mtime[key] = time.time() self.mint[key] += 1 def check(self, key: Union[str, int, float]) -> bool: if time.time() - self.mtime[key] > self.default_check_time: self.mtime[key] = time.time() self.mint[key] = 0 return False if ( self.mint[key] >= self.default_count and time.time() - self.mtime[key] < self.default_check_time ): self.mtime[key] = time.time() self.mint[key] = 0 return True return False class DailyNumberLimiter: """ 每日调用命令次数限制 """ tz = pytz.timezone("Asia/Shanghai") def __init__(self, max_num): self.today = -1 self.count = defaultdict(int) self.max = max_num def check(self, key) -> bool: day = datetime.now(self.tz).day if day != self.today: self.today = day self.count.clear() return bool(self.count[key] < self.max) def get_num(self, key): return self.count[key] def increase(self, key, num=1): self.count[key] += num def reset(self, key): self.count[key] = 0 def is_number(s: str) -> bool: """ 说明: 检测 s 是否为数字 参数: :param s: 文本 """ try: float(s) return True except ValueError: pass try: import unicodedata unicodedata.numeric(s) return True except (TypeError, ValueError): pass return False def get_bot() -> Optional[Bot]: """ 说明: 获取 bot 对象 """ try: return list(nonebot.get_bots().values())[0] except IndexError: return None def get_matchers() -> List[Type[Matcher]]: """ 获取所有插件 """ _matchers = [] for i in matchers.keys(): for matcher in matchers[i]: _matchers.append(matcher) return _matchers def get_message_at(data: Union[str, Message]) -> List[int]: """ 说明: 获取消息中所有的 at 对象的 qq 参数: :param data: event.json() """ qq_list = [] if isinstance(data, str): data = json.loads(data) for msg in data["message"]: if msg["type"] == "at": qq_list.append(int(msg["data"]["qq"])) else: for seg in data: if seg.type == "image": qq_list.append(seg.data["url"]) return qq_list def get_message_img(data: Union[str, Message]) -> List[str]: """ 说明: 获取消息中所有的 图片 的链接 参数: :param data: event.json() """ img_list = [] if isinstance(data, str): data = json.loads(data) for msg in data["message"]: if msg["type"] == "image": img_list.append(msg["data"]["url"]) else: for seg in data["image"]: img_list.append(seg.data["url"]) return img_list def get_message_text(data: Union[str, Message]) -> str: """ 说明: 获取消息中 纯文本 的信息 参数: :param data: event.json() """ result = "" if isinstance(data, str): data = json.loads(data) for msg in data["message"]: if msg["type"] == "text": result += msg["data"]["text"].strip() + " " return result.strip() else: for seg in data["text"]: result += seg.data["text"] + " " return result def get_message_record(data: Union[str, Message]) -> List[str]: """ 说明: 获取消息中所有 语音 的链接 参数: :param data: event.json() """ record_list = [] if isinstance(data, str): data = json.loads(data) for msg in data["message"]: if msg["type"] == "record": record_list.append(msg["data"]["url"]) else: for seg in data["record"]: record_list.append(seg.data["url"]) return record_list def get_message_json(data: str) -> List[dict]: """ 说明: 获取消息中所有 json 参数: :param data: event.json() """ try: json_list = [] data = json.loads(data) for msg in data["message"]: if msg["type"] == "json": json_list.append(msg["data"]) return json_list except KeyError: return [] def get_local_proxy(): """ 说明: 获取 config.py 中设置的代理 """ return SYSTEM_PROXY if SYSTEM_PROXY else None def is_chinese(word: str) -> bool: """ 说明: 判断字符串是否为纯中文 参数: :param word: 文本 """ for ch in word: if not "\u4e00" <= ch <= "\u9fff": return False return True async def get_user_avatar(qq: int) -> Optional[bytes]: """ 说明: 快捷获取用户头像 参数: :param qq: qq号 """ url = f"http://q1.qlogo.cn/g?b=qq&nk={qq}&s=160" async with httpx.AsyncClient() as client: for _ in range(3): try: return (await client.get(url)).content except TimeoutError: pass return None async def get_group_avatar(group_id: int) -> Optional[bytes]: """ 说明: 快捷获取用群头像 参数: :param group_id: 群号 """ url = f"http://p.qlogo.cn/gh/{group_id}/{group_id}/640/" async with httpx.AsyncClient() as client: for _ in range(3): try: return (await client.get(url)).content except TimeoutError: pass return None def cn2py(word: str) -> str: """ 说明: 将字符串转化为拼音 参数: :param word: 文本 """ temp = "" for i in pypinyin.pinyin(word, style=pypinyin.NORMAL): temp += "".join(i) return temp def change_pixiv_image_links( url: str, size: Optional[str] = None, nginx_url: Optional[str] = None ): """ 说明: 根据配置改变图片大小和反代链接 参数: :param url: 图片原图链接 :param size: 模式 :param nginx_url: 反代 """ if size == "master": img_sp = url.rsplit(".", maxsplit=1) url = img_sp[0] img_type = img_sp[1] url = url.replace("original", "master") + f"_master1200.{img_type}" if nginx_url: url = ( url.replace("i.pximg.net", nginx_url) .replace("i.pixiv.cat", nginx_url) .replace("_webp", "") ) return url
zhenxun-bot
/zhenxun_bot-0.1.4.3-py3-none-any.whl/zhenxun_bot/utils/utils.py
utils.py
import asyncio from configs.path_config import IMAGE_PATH, FONT_PATH from PIL import Image, ImageFile, ImageDraw, ImageFont, ImageFilter from imagehash import ImageHash from io import BytesIO from matplotlib import pyplot as plt from typing import Tuple, Optional, Union, List, Literal from pathlib import Path from math import ceil import random import cv2 import base64 import imagehash import re ImageFile.LOAD_TRUNCATED_IMAGES = True Image.MAX_IMAGE_PIXELS = None def compare_image_with_hash( image_file1: str, image_file2: str, max_dif: int = 1.5 ) -> bool: """ 说明: 比较两张图片的hash值是否相同 参数: :param image_file1: 图片文件路径 :param image_file2: 图片文件路径 :param max_dif: 允许最大hash差值, 越小越精确,最小为0 """ ImageFile.LOAD_TRUNCATED_IMAGES = True hash_1 = get_img_hash(image_file1) hash_2 = get_img_hash(image_file2) dif = hash_1 - hash_2 if dif < 0: dif = -dif if dif <= max_dif: return True else: return False def get_img_hash(image_file: Union[str, Path]) -> ImageHash: """ 说明: 获取图片的hash值 参数: :param image_file: 图片文件路径 """ with open(image_file, "rb") as fp: hash_value = imagehash.average_hash(Image.open(fp)) return hash_value def compressed_image( in_file: Union[str, Path], out_file: Union[str, Path] = None, ratio: float = 0.9 ): """ 说明: 压缩图片 参数: :param in_file: 被压缩的文件路径 :param out_file: 压缩后输出的文件路径 :param ratio: 压缩率,宽高 * 压缩率 """ in_file = IMAGE_PATH / in_file if isinstance(in_file, str) else in_file if out_file: out_file = ( IMAGE_PATH / out_file if isinstance(out_file, str) else out_file ) else: out_file = in_file h, w, d = cv2.imread(str(in_file.absolute())).shape img = cv2.resize( cv2.imread(str(in_file.absolute())), (int(w * ratio), int(h * ratio)) ) cv2.imwrite(str(out_file.absolute()), img) def alpha2white_pil(pic: Image) -> Image: """ 说明: 将图片透明背景转化为白色 参数: :param pic: 通过PIL打开的图片文件 """ img = pic.convert("RGBA") width, height = img.size for yh in range(height): for xw in range(width): dot = (xw, yh) color_d = img.getpixel(dot) if color_d[3] == 0: color_d = (255, 255, 255, 255) img.putpixel(dot, color_d) return img def pic2b64(pic: Image) -> str: """ 说明: PIL图片转base64 参数: :param pic: 通过PIL打开的图片文件 """ buf = BytesIO() pic.save(buf, format="PNG") base64_str = base64.b64encode(buf.getvalue()).decode() return "base64://" + base64_str def fig2b64(plt_: plt) -> str: """ 说明: matplotlib图片转base64 参数: :param plt_: matplotlib生成的图片 """ buf = BytesIO() plt_.savefig(buf, format="PNG", dpi=100) base64_str = base64.b64encode(buf.getvalue()).decode() return "base64://" + base64_str def is_valid(file: str) -> bool: """ 说明: 判断图片是否损坏 参数: :param file: 图片文件路径 """ valid = True try: Image.open(file).load() except OSError: valid = False return valid class BuildImage: """ 快捷生成图片与操作图片的工具类 """ def __init__( self, w: int, h: int, paste_image_width: int = 0, paste_image_height: int = 0, color: Union[str, Tuple[int, int, int], Tuple[int, int, int, int]] = None, image_mode: str = "RGBA", font_size: int = 10, background: Union[Optional[str], BytesIO, Path] = None, font: str = "yz.ttf", ratio: float = 1, is_alpha: bool = False, plain_text: Optional[str] = None, font_color: Optional[Union[str, Tuple[int, int, int]]] = None, ): """ 参数: :param w: 自定义图片的宽度,w=0时为图片原本宽度 :param h: 自定义图片的高度,h=0时为图片原本高度 :param paste_image_width: 当图片做为背景图时,设置贴图的宽度,用于贴图自动换行 :param paste_image_height: 当图片做为背景图时,设置贴图的高度,用于贴图自动换行 :param color: 生成图片的颜色 :param image_mode: 图片的类型 :param font_size: 文字大小 :param background: 打开图片的路径 :param font: 字体,默认在 resource/ttf/ 路径下 :param ratio: 倍率压缩 :param is_alpha: 是否背景透明 :param plain_text: 纯文字文本 """ self.w = int(w) self.h = int(h) self.paste_image_width = int(paste_image_width) self.paste_image_height = int(paste_image_height) self.current_w = 0 self.current_h = 0 self.font = ImageFont.truetype(str(FONT_PATH / font), int(font_size)) if not plain_text and not color: color = (255, 255, 255) self.background = background if not background: if plain_text: if not color: color = (255, 255, 255, 0) ttf_w, ttf_h = self.getsize(plain_text) self.w = self.w if self.w > ttf_w else ttf_w self.h = self.h if self.h > ttf_h else ttf_h self.markImg = Image.new(image_mode, (self.w, self.h), color) self.markImg.convert(image_mode) else: if not w and not h: self.markImg = Image.open(background) w, h = self.markImg.size if ratio and ratio > 0 and ratio != 1: self.w = int(ratio * w) self.h = int(ratio * h) self.markImg = self.markImg.resize( (self.w, self.h), Image.ANTIALIAS ) else: self.w = w self.h = h else: self.markImg = Image.open(background).resize( (self.w, self.h), Image.ANTIALIAS ) if is_alpha: array = self.markImg.load() for i in range(w): for j in range(h): pos = array[i, j] is_edit = sum([1 for x in pos[0:3] if x > 240]) == 3 if is_edit: array[i, j] = (255, 255, 255, 0) self.draw = ImageDraw.Draw(self.markImg) self.size = self.w, self.h if plain_text: fill = font_color if font_color else (0, 0, 0) self.text((0, 0), plain_text, fill) try: self.loop = asyncio.get_event_loop() except RuntimeError: new_loop = asyncio.new_event_loop() asyncio.set_event_loop(new_loop) self.loop = asyncio.get_event_loop() async def apaste( self, img: "BuildImage" or Image, pos: Tuple[int, int] = None, alpha: bool = False, center_type: Optional[Literal["center", "by_height", "by_width"]] = None, ): """ 说明: 异步 贴图 参数: :param img: 已打开的图片文件,可以为 BuildImage 或 Image :param pos: 贴图位置(左上角) :param alpha: 图片背景是否为透明 :param center_type: 居中类型,可能的值 center: 完全居中,by_width: 水平居中,by_height: 垂直居中 """ await self.loop.run_in_executor(None, self.paste, img, pos, alpha, center_type) def paste( self, img: "BuildImage" or Image, pos: Tuple[int, int] = None, alpha: bool = False, center_type: Optional[Literal["center", "by_height", "by_width"]] = None, ): """ 说明: 贴图 参数: :param img: 已打开的图片文件,可以为 BuildImage 或 Image :param pos: 贴图位置(左上角) :param alpha: 图片背景是否为透明 :param center_type: 居中类型,可能的值 center: 完全居中,by_width: 水平居中,by_height: 垂直居中 """ if center_type: if center_type not in ["center", "by_height", "by_width"]: raise ValueError( "center_type must be 'center', 'by_width' or 'by_height'" ) width, height = 0, 0 if not pos: pos = (0, 0) if center_type == "center": width = int((self.w - img.w) / 2) height = int((self.h - img.h) / 2) elif center_type == "by_width": width = int((self.w - img.w) / 2) height = pos[1] elif center_type == "by_height": width = pos[0] height = int((self.h - img.h) / 2) pos = (width, height) if isinstance(img, BuildImage): img = img.markImg if self.current_w == self.w: self.current_w = 0 self.current_h += self.paste_image_height if not pos: pos = (self.current_w, self.current_h) if alpha: try: self.markImg.paste(img, pos, img) except ValueError: img = img.convert("RGBA") self.markImg.paste(img, pos, img) else: self.markImg.paste(img, pos) self.current_w += self.paste_image_width def getsize(self, msg: str) -> Tuple[int, int]: """ 说明: 获取文字在该图片 font_size 下所需要的空间 参数: :param msg: 文字内容 """ return self.font.getsize(msg) async def apoint( self, pos: Tuple[int, int], fill: Optional[Tuple[int, int, int]] = None ): """ 说明: 异步 绘制多个或单独的像素 参数: :param pos: 坐标 :param fill: 填错颜色 """ await self.loop.run_in_executor(None, self.point, pos, fill) def point(self, pos: Tuple[int, int], fill: Optional[Tuple[int, int, int]] = None): """ 说明: 绘制多个或单独的像素 参数: :param pos: 坐标 :param fill: 填错颜色 """ self.draw.point(pos, fill=fill) async def aellipse( self, pos: Tuple[int, int, int, int], fill: Optional[Tuple[int, int, int]] = None, outline: Optional[Tuple[int, int, int]] = None, width: int = 1, ): """ 说明: 异步 绘制圆 参数: :param pos: 坐标范围 :param fill: 填充颜色 :param outline: 描线颜色 :param width: 描线宽度 """ await self.loop.run_in_executor(None, self.ellipse, pos, fill, outline, width) def ellipse( self, pos: Tuple[int, int, int, int], fill: Optional[Tuple[int, int, int]] = None, outline: Optional[Tuple[int, int, int]] = None, width: int = 1, ): """ 说明: 绘制圆 参数: :param pos: 坐标范围 :param fill: 填充颜色 :param outline: 描线颜色 :param width: 描线宽度 """ self.draw.ellipse(pos, fill, outline, width) async def atext( self, pos: Union[Tuple[int, int], Tuple[float, float]], text: str, fill: Union[str, Tuple[int, int, int]] = (0, 0, 0), center_type: Optional[Literal["center", "by_height", "by_width"]] = None, ): """ 说明: 异步 在图片上添加文字 参数: :param pos: 文字位置 :param text: 文字内容 :param fill: 文字颜色 :param center_type: 居中类型,可能的值 center: 完全居中,by_width: 水平居中,by_height: 垂直居中 """ await self.loop.run_in_executor(None, self.text, pos, text, fill, center_type) def text( self, pos: Union[Tuple[int, int], Tuple[float, float]], text: str, fill: Union[str, Tuple[int, int, int]] = (0, 0, 0), center_type: Optional[Literal["center", "by_height", "by_width"]] = None, ): """ 说明: 在图片上添加文字 参数: :param pos: 文字位置 :param text: 文字内容 :param fill: 文字颜色 :param center_type: 居中类型,可能的值 center: 完全居中,by_width: 水平居中,by_height: 垂直居中 """ if center_type: if center_type not in ["center", "by_height", "by_width"]: raise ValueError( "center_type must be 'center', 'by_width' or 'by_height'" ) w, h = self.w, self.h ttf_w, ttf_h = self.getsize(text) if center_type == "center": w = int((w - ttf_w) / 2) h = int((h - ttf_h) / 2) elif center_type == "by_width": w = int((w - ttf_w) / 2) h = pos[1] elif center_type == "by_height": h = int((h - ttf_h) / 2) w = pos[0] pos = (w, h) self.draw.text(pos, text, fill=fill, font=self.font) async def asave(self, path: Optional[Union[str, Path]] = None): """ 说明: 异步 保存图片 参数: :param path: 图片路径 """ await self.loop.run_in_executor(None, self.save, path) def save(self, path: Optional[Union[str, Path]] = None): """ 说明: 保存图片 参数: :param path: 图片路径 """ if not path: path = self.background self.markImg.save(path) def show(self): """ 说明: 显示图片 """ self.markImg.show(self.markImg) async def aresize(self, ratio: float = 0, w: int = 0, h: int = 0): """ 说明: 异步 压缩图片 参数: :param ratio: 压缩倍率 :param w: 压缩图片宽度至 w :param h: 压缩图片高度至 h """ await self.loop.run_in_executor(None, self.resize, ratio, w, h) def resize(self, ratio: float = 0, w: int = 0, h: int = 0): """ 说明: 压缩图片 参数: :param ratio: 压缩倍率 :param w: 压缩图片宽度至 w :param h: 压缩图片高度至 h """ if not w and not h and not ratio: raise Exception("缺少参数...") if not w and not h and ratio: w = int(self.w * ratio) h = int(self.h * ratio) self.markImg = self.markImg.resize((w, h), Image.ANTIALIAS) self.w, self.h = self.markImg.size self.size = self.w, self.h self.draw = ImageDraw.Draw(self.markImg) async def acrop(self, box: Tuple[int, int, int, int]): """ 说明: 异步 裁剪图片 参数: :param box: 左上角坐标,右下角坐标 (left, upper, right, lower) """ await self.loop.run_in_executor(None, self.crop, box) def crop(self, box: Tuple[int, int, int, int]): """ 说明: 裁剪图片 参数: :param box: 左上角坐标,右下角坐标 (left, upper, right, lower) """ self.markImg = self.markImg.crop(box) self.w, self.h = self.markImg.size self.size = self.w, self.h self.draw = ImageDraw.Draw(self.markImg) def check_font_size(self, word: str) -> bool: """ 说明: 检查文本所需宽度是否大于图片宽度 参数: :param word: 文本内容 """ return self.font.getsize(word)[0] > self.w async def atransparent(self, alpha_ratio: float = 1, n: int = 0): """ 说明: 异步 图片透明化 参数: :param alpha_ratio: 透明化程度 :param n: 透明化大小内边距 """ await self.loop.run_in_executor(None, self.transparent, alpha_ratio, n) def transparent(self, alpha_ratio: float = 1, n: int = 0): """ 说明: 图片透明化 参数: :param alpha_ratio: 透明化程度 :param n: 透明化大小内边距 """ self.markImg = self.markImg.convert("RGBA") x, y = self.markImg.size for i in range(n, x - n): for k in range(n, y - n): color = self.markImg.getpixel((i, k)) color = color[:-1] + (int(100 * alpha_ratio),) self.markImg.putpixel((i, k), color) self.draw = ImageDraw.Draw(self.markImg) def pic2bs4(self) -> str: """ 说明: BuildImage 转 base64 """ buf = BytesIO() self.markImg.save(buf, format="PNG") base64_str = base64.b64encode(buf.getvalue()).decode() return base64_str def convert(self, type_: str): """ 说明: 修改图片类型 参数: :param type_: 类型 """ self.markImg = self.markImg.convert(type_) async def arectangle( self, xy: Tuple[int, int, int, int], fill: Optional[Tuple[int, int, int]] = None, outline: str = None, width: int = 1, ): """ 说明: 异步 画框 参数: :param xy: 坐标 :param fill: 填充颜色 :param outline: 轮廓颜色 :param width: 线宽 """ await self.loop.run_in_executor(None, self.rectangle, xy, fill, outline, width) def rectangle( self, xy: Tuple[int, int, int, int], fill: Optional[Tuple[int, int, int]] = None, outline: str = None, width: int = 1, ): """ 说明: 画框 参数: :param xy: 坐标 :param fill: 填充颜色 :param outline: 轮廓颜色 :param width: 线宽 """ self.draw.rectangle(xy, fill, outline, width) async def apolygon( self, xy: List[Tuple[int, int]], fill: Tuple[int, int, int] = (0, 0, 0), outline: int = 1, ): """ 说明: 异步 画多边形 参数: :param xy: 坐标 :param fill: 颜色 :param outline: 线宽 """ await self.loop.run_in_executor(None, self.polygon, xy, fill, outline) def polygon( self, xy: List[Tuple[int, int]], fill: Tuple[int, int, int] = (0, 0, 0), outline: int = 1, ): """ 说明: 画多边形 参数: :param xy: 坐标 :param fill: 颜色 :param outline: 线宽 """ self.draw.polygon(xy, fill, outline) async def aline( self, xy: Tuple[int, int, int, int], fill: Optional[Union[str, Tuple[int, int, int]]] = None, width: int = 1, ): """ 说明: 异步 画线 参数: :param xy: 坐标 :param fill: 填充 :param width: 线宽 """ await self.loop.run_in_executor(None, self.line, xy, fill, width) def line( self, xy: Tuple[int, int, int, int], fill: Optional[Union[Tuple[int, int, int], str]] = None, width: int = 1, ): """ 说明: 画线 参数: :param xy: 坐标 :param fill: 填充 :param width: 线宽 """ self.draw.line(xy, fill, width) async def acircle(self): """ 说明: 异步 将 BuildImage 图片变为圆形 """ await self.loop.run_in_executor(None, self.circle) def circle(self): """ 说明: 使图像变圆 """ self.markImg.convert("RGBA") size = self.markImg.size r2 = min(size[0], size[1]) if size[0] != size[1]: self.markImg = self.markImg.resize((r2, r2), Image.ANTIALIAS) width = 1 antialias = 4 ellipse_box = [0, 0, r2 - 2, r2 - 2] mask = Image.new( size=[int(dim * antialias) for dim in self.markImg.size], mode='L', color='black') draw = ImageDraw.Draw(mask) for offset, fill in (width / -2.0, 'black'), (width / 2.0, 'white'): left, top = [(value + offset) * antialias for value in ellipse_box[:2]] right, bottom = [(value - offset) * antialias for value in ellipse_box[2:]] draw.ellipse([left, top, right, bottom], fill=fill) mask = mask.resize(self.markImg.size, Image.LANCZOS) self.markImg.putalpha(mask) async def acircle_corner(self, radii: int = 30): """ 说明: 异步 矩形四角变圆 参数: :param radii: 半径 """ await self.loop.run_in_executor(None, self.circle_corner, radii) def circle_corner(self, radii: int = 30): """ 说明: 矩形四角变圆 参数: :param radii: 半径 """ # 画圆(用于分离4个角) circle = Image.new("L", (radii * 2, radii * 2), 0) draw = ImageDraw.Draw(circle) draw.ellipse((0, 0, radii * 2, radii * 2), fill=255) self.markImg = self.markImg.convert("RGBA") w, h = self.markImg.size alpha = Image.new("L", self.markImg.size, 255) alpha.paste(circle.crop((0, 0, radii, radii)), (0, 0)) alpha.paste(circle.crop((radii, 0, radii * 2, radii)), (w - radii, 0)) alpha.paste( circle.crop((radii, radii, radii * 2, radii * 2)), (w - radii, h - radii) ) alpha.paste(circle.crop((0, radii, radii, radii * 2)), (0, h - radii)) self.markImg.putalpha(alpha) async def arotate(self, angle: int, expand: bool = False): """ 说明: 异步 旋转图片 参数: :param angle: 角度 :param expand: 放大图片适应角度 """ await self.loop.run_in_executor(None, self.rotate, angle, expand) def rotate(self, angle: int, expand: bool = False): """ 说明: 旋转图片 参数: :param angle: 角度 :param expand: 放大图片适应角度 """ self.markImg = self.markImg.rotate(angle, expand=expand) async def atranspose(self, angle: int): """ 说明: 异步 旋转图片(包括边框) 参数: :param angle: 角度 """ await self.loop.run_in_executor(None, self.transpose, angle) def transpose(self, angle: int): """ 说明: 旋转图片(包括边框) 参数: :param angle: 角度 """ self.markImg.transpose(angle) async def afilter(self, filter_: str, aud: int = None): """ 说明: 异步 图片变化 参数: :param filter_: 变化效果 :param aud: 利率 """ await self.loop.run_in_executor(None, self.filter, filter_, aud) def filter(self, filter_: str, aud: int = None): """ 说明: 图片变化 参数: :param filter_: 变化效果 :param aud: 利率 """ _x = None if filter_ == "GaussianBlur": # 高斯模糊 _x = ImageFilter.GaussianBlur elif filter_ == "EDGE_ENHANCE": # 锐化效果 _x = ImageFilter.EDGE_ENHANCE elif filter_ == "BLUR": # 模糊效果 _x = ImageFilter.BLUR elif filter_ == "CONTOUR": # 铅笔滤镜 _x = ImageFilter.CONTOUR elif filter_ == "FIND_EDGES": # 边缘检测 _x = ImageFilter.FIND_EDGES if _x: if aud: self.markImg = self.markImg.filter(_x(aud)) else: self.markImg = self.markImg.filter(_x) self.draw = ImageDraw.Draw(self.markImg) async def areplace_color_tran( self, src_color: Union[ Tuple[int, int, int], Tuple[Tuple[int, int, int], Tuple[int, int, int]] ], replace_color: Tuple[int, int, int], ): """ 说明: 异步 颜色替换 参数: :param src_color: 目标颜色,或者使用列表,设置阈值 :param replace_color: 替换颜色 """ self.loop.run_in_executor( None, self.replace_color_tran, src_color, replace_color ) def replace_color_tran( self, src_color: Union[ Tuple[int, int, int], Tuple[Tuple[int, int, int], Tuple[int, int, int]] ], replace_color: Tuple[int, int, int], ): """ 说明: 颜色替换 参数: :param src_color: 目标颜色,或者使用元祖,设置阈值 :param replace_color: 替换颜色 """ if isinstance(src_color, tuple): start_ = src_color[0] end_ = src_color[1] else: start_ = src_color end_ = None for i in range(self.w): for j in range(self.h): r, g, b = self.markImg.getpixel((i, j)) if not end_: if r == start_[0] and g == start_[1] and b == start_[2]: self.markImg.putpixel((i, j), replace_color) else: if ( start_[0] <= r <= end_[0] and start_[1] <= g <= end_[1] and start_[2] <= b <= end_[2] ): self.markImg.putpixel((i, j), replace_color) # def getchannel(self, type_): self.markImg = self.markImg.getchannel(type_) class BuildMat: """ 针对 折线图/柱状图,基于 BuildImage 编写的 非常难用的 自定义画图工具 目前仅支持 正整数 """ def __init__( self, y: List[int], mat_type: str = "line", *, x_name: Optional[str] = None, y_name: Optional[str] = None, x_index: List[Union[str, int, float]] = None, y_index: List[Union[str, int, float]] = None, x_rotate: int = 0, title: Optional[str] = None, size: Tuple[int, int] = (1000, 1000), font: str = "msyh.ttf", font_size: Optional[int] = None, display_num: bool = False, is_grid: bool = False, background: Optional[List[str]] = None, background_filler_type: Optional[str] = "center", bar_color: Optional[List[Union[str, Tuple[int, int, int]]]] = None, ): """ 说明: 初始化 BuildMat 参数: :param y: 坐标值 :param mat_type: 图像类型 可能的值:[line]: 折线图,[bar]: 柱状图,[barh]: 横向柱状图 :param x_name: 横坐标名称 :param y_name: 纵坐标名称 :param x_index: 横坐标值 :param y_index: 纵坐标值 :param x_rotate: 横坐标旋转角度 :param title: 标题 :param size: 图像大小,建议默认 :param font: 字体 :param font_size: 字体大小,建议默认 :param display_num: 是否显示数值 :param is_grid: 是否添加栅格 :param background: 背景图片 :param background_filler_type: 图像填充类型 :param bar_color: 柱状图颜色,位 ['*'] 时替换位彩虹随机色 """ self.mat_type = mat_type self.markImg = None self._check_value(y, y_index) self.w = size[0] self.h = size[1] self.y = y self.x_name = x_name self.y_name = y_name self.x_index = x_index self.y_index = y_index self.x_rotate = x_rotate self.title = title self.font = font self.display_num = display_num self.is_grid = is_grid self.background = background self.background_filler_type = background_filler_type self.bar_color = bar_color if bar_color else [(0, 0, 0)] self.size = size self.padding_w = 120 self.padding_h = 120 self.line_length = 760 self._deviation = 0.905 self._color = {} if not font_size: self.font_size = int(25 * (1 - len(x_index) / 100)) else: self.font_size = font_size if self.bar_color == ["*"]: self.bar_color = [ "#FF0000", "#FF7F00", "#FFFF00", "#00FF00", "#00FFFF", "#0000FF", "#8B00FF", ] if not x_index: raise ValueError("缺少 x_index [横坐标值]...") self._x_interval = int((self.line_length - 70) / len(x_index)) self._bar_width = int(30 * (1 - (len(x_index) + 10) / 100)) # 没有 y_index 时自动生成 if not y_index: _y_index = [] _max_value = int(max(y)) _max_value = ceil( _max_value / eval("1" + "0" * (len(str(_max_value)) - 1)) ) * eval("1" + "0" * (len(str(_max_value)) - 1)) _max_value = _max_value if _max_value >= 10 else 100 _step = int(_max_value / 10) for i in range(_step, _max_value + _step, _step): _y_index.append(i) self.y_index = _y_index self._p = self.line_length / max(self.y_index) self._y_interval = int((self.line_length - 70) / len(self.y_index)) def gen_graph(self): """ 说明: 生成图像 """ self.markImg = self._init_graph( x_name=self.x_name, y_name=self.y_name, x_index=self.x_index, y_index=self.y_index, font_size=self.font_size, is_grid=self.is_grid, ) if self.mat_type == "line": self._gen_line_graph(y=self.y, display_num=self.display_num) elif self.mat_type == "bar": self._gen_bar_graph(y=self.y, display_num=self.display_num) elif self.mat_type == "barh": self._gen_bar_graph(y=self.y, display_num=self.display_num, is_barh=True) def set_y(self, y: List[int]): """ 说明: 给坐标点设置新值 参数: :param y: 坐标点 """ self._check_value(y, self.y_index) self.y = y def set_y_index(self, y_index: List[Union[str, int, float]]): """ 说明: 设置y轴坐标值 参数: :param y_index: y轴坐标值 """ self._check_value(self.y, y_index) self.y_index = y_index def set_title(self, title: str, color: Optional[Union[str, Tuple[int, int, int]]]): """ 说明: 设置标题 参数: :param title: 标题 :param color: 字体颜色 """ self.title = title if color: self._color["title"] = color def set_background( self, background: Optional[List[str]], type_: Optional[str] = None ): """ 说明: 设置背景图片 参数: :param background: 图片路径列表 :param type_: 填充类型 """ self.background = background self.background_filler_type = type_ if type_ else self.background_filler_type def show(self): """ 说明: 展示图像 """ self.markImg.show() def pic2bs4(self) -> str: """ 说明: 转base64 """ return self.markImg.pic2bs4() def resize(self, ratio: float = 0.9): """ 说明: 调整图像大小 参数: :param ratio: 比例 """ self.markImg.resize(ratio) def save(self, path: Union[str, Path]): """ 说明: 保存图片 参数: :param path: 路径 """ self.markImg.save(path) def _check_value( self, y: List[int], y_index: List[Union[str, int, float]] = None, x_index: List[Union[str, int, float]] = None, ): """ 说明: 检查值合法性 参数: :param y: 坐标值 :param y_index: y轴坐标值 :param x_index: x轴坐标值 """ if y_index: _value = x_index if self.mat_type == "barh" else y_index if max(y) > max(y_index): raise ValueError("坐标点的值必须小于y轴坐标的最大值...") i = -9999999999 for y in y_index: if y > i: i = y else: raise ValueError("y轴坐标值必须有序...") def _gen_line_graph( self, y: List[Union[int, float]], display_num: bool = False, ): """ 说明: 生成折线图 参数: :param y: 坐标点 :param display_num: 显示该点的值 """ _black_point = BuildImage(7, 7, color=random.choice(self.bar_color)) _black_point.circle() x_interval = self._x_interval current_w = self.padding_w + x_interval current_h = self.padding_h + self.line_length for i in range(len(y)): if display_num: w = int(self.markImg.getsize(str(y[i]))[0] / 2) self.markImg.text( ( current_w - w, current_h - int(y[i] * self._p * self._deviation) - 25 - 5, ), f"{y[i]:.2f}" if isinstance(y[i], float) else f"{y[i]}", ) self.markImg.paste( _black_point, ( current_w - 3, current_h - int(y[i] * self._p * self._deviation) - 3, ), True, ) if i != len(y) - 1: self.markImg.line( ( current_w, current_h - int(y[i] * self._p * self._deviation), current_w + x_interval, current_h - int(y[i + 1] * self._p * self._deviation), ), fill=(0, 0, 0), width=2, ) current_w += x_interval def _gen_bar_graph( self, y: List[Union[int, float]], display_num: bool = False, is_barh: bool = False, ): """ 说明: 生成柱状图 参数: :param y: 坐标值 :param display_num: 是否显示数值 :param is_barh: 横柱状图 """ _interval = self._x_interval if is_barh: current_h = self.padding_h + self.line_length - _interval current_w = self.padding_w else: current_w = self.padding_w + _interval current_h = self.padding_h + self.line_length for i in range(len(y)): # 画出显示数字 if display_num: # 横柱状图 if is_barh: font_h = self.markImg.getsize(str(y[i]))[1] self.markImg.text( ( self.padding_w + int(y[i] * self._p * self._deviation) + 2 + 5, current_h - int(font_h / 2) - 1, ), f"{y[i]:.2f}" if isinstance(y[i], float) else f"{y[i]}", ) else: w = int(self.markImg.getsize(str(y[i]))[0] / 2) self.markImg.text( ( current_w - w, current_h - int(y[i] * self._p * self._deviation) - 25, ), f"{y[i]:.2f}" if isinstance(y[i], float) else f"{y[i]}", ) if i != len(y): bar_color = random.choice(self.bar_color) if is_barh: A = BuildImage( int(y[i] * self._p * self._deviation), self._bar_width, color=bar_color, ) self.markImg.paste( A, ( current_w + 2, current_h - int(self._bar_width / 2), ), ) else: A = BuildImage( self._bar_width, int(y[i] * self._p * self._deviation), color=bar_color, ) self.markImg.paste( A, ( current_w - int(self._bar_width / 2), current_h - int(y[i] * self._p * self._deviation), ), ) if is_barh: current_h -= _interval else: current_w += _interval def _init_graph( self, x_name: Optional[str] = None, y_name: Optional[str] = None, x_index: List[Union[str, int, float]] = None, y_index: List[Union[str, int, float]] = None, font_size: Optional[int] = None, is_grid: bool = False, ) -> BuildImage: """ 说明: 初始化图像,生成xy轴 参数: :param x_name: x轴名称 :param y_name: y轴名称 :param x_index: x轴坐标值 :param y_index: y轴坐标值 :param is_grid: 添加栅格 """ padding_w = self.padding_w padding_h = self.padding_h line_length = self.line_length background = random.choice(self.background) if self.background else None A = BuildImage( self.w, self.h, font_size=font_size, font=self.font, background=background ) if background: _tmp = BuildImage(self.w, self.h) _tmp.transparent(2) A.paste(_tmp, alpha=True) if self.title: title = BuildImage( 0, 0, plain_text=self.title, color=(255, 255, 255, 0), font_size=35, font_color=self._color.get("title"), font=self.font, ) A.paste(title, (0, 25), True, "by_width") A.line( ( padding_w, padding_h + line_length, padding_w + line_length, padding_h + line_length, ), (0, 0, 0), 2, ) A.line( ( padding_w, padding_h, padding_w, padding_h + line_length, ), (0, 0, 0), 2, ) _interval = self._x_interval if self.mat_type == "barh": tmp = x_index x_index = y_index y_index = tmp _interval = self._y_interval current_w = padding_w + _interval _text_font = BuildImage(0, 0, font_size=self.font_size, font=self.font) _grid = self.line_length if is_grid else 10 x_rotate_height = 0 for _x in x_index: _p = BuildImage(1, _grid, color="#a9a9a9") A.paste(_p, (current_w, padding_h + line_length - _grid)) w = int(_text_font.getsize(f"{_x}")[0] / 2) text = BuildImage( 0, 0, plain_text=f"{_x}", font_size=self.font_size, color=(255, 255, 255, 0), font=self.font, ) text.rotate(self.x_rotate, True) A.paste(text, (current_w - w, padding_h + line_length + 10), alpha=True) current_w += _interval x_rotate_height = text.h _interval = self._x_interval if self.mat_type == "barh" else self._y_interval current_h = padding_h + line_length - _interval _text_font = BuildImage(0, 0, font_size=self.font_size, font=self.font) for _y in y_index: _p = BuildImage(_grid, 1, color="#a9a9a9") A.paste(_p, (padding_w + 2, current_h)) w, h = _text_font.getsize(f"{_y}") h = int(h / 2) text = BuildImage( 0, 0, plain_text=f"{_y}", font_size=self.font_size, color=(255, 255, 255, 0), font=self.font, ) idx = 0 while text.size[0] > self.padding_w - 10 and idx < 3: text = BuildImage( 0, 0, plain_text=f"{_y}", font_size=int(self.font_size * 0.75), color=(255, 255, 255, 0), font=self.font, ) w, _ = text.getsize(f"{_y}") idx += 1 A.paste(text, (padding_w - w - 10, current_h - h), alpha=True) current_h -= _interval if x_name: A.text((int(padding_w / 2), int(padding_w / 2)), x_name) if y_name: A.text( ( int(padding_w + line_length + 50 - A.getsize(y_name)[0]), int(padding_h + line_length + 50 + x_rotate_height), ), y_name, ) return A async def text2image( text: str, auto_parse: bool = True, font_size: int = 20, color: Union[str, Tuple[int, int, int], Tuple[int, int, int, int]] = "white", font: str = "CJGaoDeGuo.otf", font_color: Union[str, Tuple[int, int, int]] = "black", padding: Union[int, Tuple[int, int, int, int]] = 0, ) -> BuildImage: """ 说明: 解析文本并转为图片 使用标签 <f> </f> 可选配置项 font: str -> 特殊文本字体 fs / font_size: int -> 特殊文本大小 fc / font_color: Union[str, Tuple[int, int, int]] -> 特殊文本颜色 示例 在不在,<f font=YSHaoShenTi-2.ttf font_size=30 font_color=red>HibiKi小姐</f>, 你最近还好吗,<f font_size=15 font_color=black>我非常想你</f>,这段时间我非常不好过, <f font_size=25>抽卡抽不到金色</f>,这让我很痛苦 参数: :param text: 文本 :param auto_parse: 是否自动解析,否则原样发送 :param font_size: 普通字体大小 :param color: 背景颜色 :param font: 普通字体 :param font_color: 普通字体颜色 :param padding: 文本外边距,元组类型时为 (上,左,下,右) """ pw = ph = top_padding = left_padding = 0 if padding: if isinstance(padding, int): pw = padding * 2 ph = padding * 2 top_padding = left_padding = padding elif isinstance(padding, tuple): pw = padding[0] + padding[2] ph = padding[1] + padding[3] top_padding = padding[0] left_padding = padding[1] if auto_parse and re.search(r"<f(.*)>(.*)</f>", text): _data = [] new_text = "" placeholder_index = 0 for s in text.split("</f>"): r = re.search(r"<f(.*)>(.*)", s) if r: start, end = r.span() if start != 0 and (t := s[:start]): new_text += t _data.append( [ (start, end), f"[placeholder_{placeholder_index}]", r.group(1).strip(), r.group(2), ] ) new_text += f"[placeholder_{placeholder_index}]" placeholder_index += 1 new_text += text.split("</f>")[-1] image_list = [] current_placeholder_index = 0 # 切分换行,每行为单张图片 for s in new_text.split("\n"): _tmp_text = s img_height = BuildImage(0, 0, font_size=font_size).getsize("正")[1] img_width = 0 _tmp_index = current_placeholder_index for _ in range(s.count("[placeholder_")): placeholder = _data[_tmp_index] if "font_size" in placeholder[2]: r = re.search(r"font_size=['\"]?(\d+)", placeholder[2]) if r: w, h = BuildImage(0, 0, font_size=int(r.group(1))).getsize( placeholder[3] ) img_height = img_height if img_height > h else h img_width += w else: img_width += BuildImage(0, 0, font_size=font_size).getsize( placeholder[3] )[0] _tmp_text = _tmp_text.replace(f"[placeholder_{_tmp_index}]", "") _tmp_index += 1 img_width += BuildImage(0, 0, font_size=font_size).getsize(_tmp_text)[0] # img_width += len(_tmp_text) * font_size # 开始画图 A = BuildImage( img_width, img_height, color=color, font=font, font_size=font_size ) basic_font_h = A.getsize("正")[1] current_width = 0 # 遍历占位符 for _ in range(s.count("[placeholder_")): if not s.startswith(f"[placeholder_{current_placeholder_index}]"): slice_ = s.split(f"[placeholder_{current_placeholder_index}]") await A.atext( (current_width, A.h - basic_font_h - 1), slice_[0], font_color ) current_width += A.getsize(slice_[0])[0] placeholder = _data[current_placeholder_index] # 解析配置 _font = font _font_size = font_size _font_color = font_color for e in placeholder[2].split(): if e.startswith("font="): _font = e.split("=")[-1] if e.startswith("font_size=") or e.startswith("fs="): _font_size = int(e.split("=")[-1]) if _font_size > 1000: _font_size = 1000 if _font_size < 1: _font_size = 1 if e.startswith("font_color") or e.startswith("fc="): _font_color = e.split("=")[-1] text_img = BuildImage( 0, 0, plain_text=placeholder[3], font_size=_font_size, font_color=_font_color, font=_font, ) _img_h = ( int(A.h / 2 - text_img.h / 2) if new_text == "[placeholder_0]" else A.h - text_img.h ) await A.apaste(text_img, (current_width, _img_h - 1), True) current_width += text_img.w s = s[ s.index(f"[placeholder_{current_placeholder_index}]") + len(f"[placeholder_{current_placeholder_index}]") : ] current_placeholder_index += 1 if s: slice_ = s.split(f"[placeholder_{current_placeholder_index}]") await A.atext((current_width, A.h - basic_font_h), slice_[0]) current_width += A.getsize(slice_[0])[0] A.crop((0, 0, current_width, A.h)) # A.show() image_list.append(A) height = 0 width = 0 for img in image_list: height += img.h width = width if width > img.w else img.w width += pw height += ph A = BuildImage(width + left_padding, height + top_padding, color=color) current_height = top_padding for img in image_list: await A.apaste(img, (left_padding, current_height), True) current_height += img.h else: width = 0 height = 0 _tmp = BuildImage(0, 0, font_size=font_size) for x in text.split("\n"): w, h = _tmp.getsize(x) height += h width = width if width > w else w width += pw height += ph A = BuildImage( width + left_padding, height + top_padding, font_size=font_size, color=color, font=font, ) await A.atext((left_padding, top_padding), text, font_color) # A.show() return A if __name__ == "__main__": pass
zhenxun-bot
/zhenxun_bot-0.1.4.3-py3-none-any.whl/zhenxun_bot/utils/image_utils.py
image_utils.py
from typing import Optional, List, Union, Dict from pathlib import Path from .data_class import StaticData from utils.utils import get_matchers, get_bot from configs.config import Config import nonebot Config.add_plugin_config( "group_manager", "DEFAULT_GROUP_LEVEL", 5, help_="默认群权限", default_value=5 ) Config.add_plugin_config( "group_manager", "DEFAULT_GROUP_BOT_STATUS", True, help_="默认进群总开关状态", default_value=True ) class GroupManager(StaticData): """ 群权限 | 功能 | 总开关 | 聊天时间 管理器 """ def __init__(self, file: Path): super().__init__(file) if not self._data: self._data = { "super": {"white_group_list": []}, "group_manager": {}, } self._task = {} def block_plugin(self, module: str, group_id: int): """ 说明: 锁定插件 参数: :param module: 功能模块名 :param group_id: 群组,None时为超级用户禁用 """ self._set_plugin_status(module, "block", group_id) def unblock_plugin(self, module: str, group_id: int): """ 说明: 解锁插件 参数: :param module: 功能模块名 :param group_id: 群组 """ self._set_plugin_status(module, "unblock", group_id) def turn_on_group_bot_status(self, group_id: int): """ 说明: 开启群bot开关 参数: :param group_id: 群号 """ self._set_group_bot_status(group_id, True) def shutdown_group_bot_status(self, group_id: int): """ 说明: 关闭群bot开关 参数: :param group_id: 群号 """ self._set_group_bot_status(group_id, False) def check_group_bot_status(self, group_id: int) -> bool: """ 说明: 检查群聊bot总开关状态 参数: :param group_id: 说明 """ group_id = str(group_id) if not self._data["group_manager"].get(group_id): self._init_group(group_id) if self._data["group_manager"][group_id].get("status") is None: default_group_bot_status = Config.get_config("group_manager", "DEFAULT_GROUP_BOT_STATUS") if default_group_bot_status: default_group_bot_status = True self._data["group_manager"][group_id]["status"] = default_group_bot_status return self._data["group_manager"][group_id]["status"] def set_group_level(self, group_id: int, level: int): """ 说明: 设置群权限 参数: :param group_id: 群组 :param level: 权限等级 """ group_id = str(group_id) if not self._data["group_manager"].get(group_id): self._init_group(group_id) self._data["group_manager"][group_id]["level"] = level self.save() def get_plugin_status(self, module: str, group_id: int) -> bool: """ 说明: 获取插件状态 参数: :param module: 功能模块名 :param group_id: 群组 """ group_id = str(group_id) if group_id else group_id if not self._data["group_manager"].get(group_id): self._init_group(group_id) return True if module in self._data["group_manager"][group_id]["close_plugins"]: return False return True def get_group_level(self, group_id: int) -> int: """ 说明: 获取群等级 参数: :param group_id: 群号 """ group_id = str(group_id) if not self._data["group_manager"].get(group_id): self._init_group(group_id) return self._data["group_manager"][group_id]["level"] def check_group_is_white(self, group_id: int) -> bool: """ 说明: 检测群聊是否在白名单 参数: :param group_id: 群号 """ return group_id in self._data["super"]["white_group_list"] def add_group_white_list(self, group_id: int): """ 说明: 将群聊加入白名单 参数: :param group_id: 群号 """ if group_id not in self._data["super"]["white_group_list"]: self._data["super"]["white_group_list"].append(group_id) def delete_group_white_list(self, group_id: int): """ 说明: 将群聊从白名单中删除 参数: :param group_id: 群号 """ if group_id in self._data["super"]["white_group_list"]: self._data["super"]["white_group_list"].remove(group_id) def get_group_white_list(self) -> List[str]: """ 说明: 获取所有群白名单 """ return self._data["super"]["white_group_list"] def delete_group(self, group_id: int): """ 说明: 删除群配置 参数: :param group_id: 群号 """ if group_id in self._data["group_manager"]: del self._data["group_manager"][str(group_id)] if group_id in self._data["super"]["white_group_list"]: self._data["super"]["white_group_list"].remove(group_id) self.save() async def open_group_task(self, group_id: int, task: str): """ 说明: 开启群被动技能 参数: :param group_id: 群号 :param task: 被动技能名称 """ await self._set_group_task_status(group_id, task, True) async def close_group_task(self, group_id: int, task: str): """ 说明: 关闭群被动技能 参数: :param group_id: 群号 :param task: 被动技能名称 """ await self._set_group_task_status(group_id, task, False) async def check_group_task_status(self, group_id: int, task: str) -> bool: """ 说明: 查看群被动技能状态 参数: :param group_id: 群号 :param task: 被动技能名称 """ group_id = str(group_id) if ( not self._data["group_manager"][group_id].get("group_task_status") or self._data["group_manager"][group_id]["group_task_status"].get(task) is None ): await self.init_group_task(group_id) return self._data["group_manager"][group_id]["group_task_status"][task] def get_task_data(self) -> Dict[str, str]: """ 说明: 获取所有被动任务 """ return self._task async def group_task_status(self, group_id: int) -> str: """ 说明: 查看群被全部动技能状态 参数: :param group_id: 群号 """ x = "[群被动技能]:\n" group_id = str(group_id) if not self._data["group_manager"][group_id].get("group_task_status"): await self.init_group_task(group_id) for key in self._data["group_manager"][group_id]["group_task_status"].keys(): x += f'{self._task[key]}:{"√" if await self.check_group_task_status(int(group_id), key) else "×"}\n' return x[:-1] async def _set_group_task_status(self, group_id: int, task: str, status: bool): """ 说明: 管理群被动技能状态 参数: :param group_id: 群号 :param task: 被动技能 :param status: 状态 """ group_id = str(group_id) if not self._data["group_manager"].get(group_id): self._init_group(group_id) if ( not self._data["group_manager"][group_id].get("group_task_status") or self._data["group_manager"][group_id]["group_task_status"].get(task) is None ): await self.init_group_task(group_id) self._data["group_manager"][group_id]["group_task_status"][task] = status self.save() async def init_group_task(self, group_id: Optional[Union[int, str]] = None): """ 说明: 初始化群聊 被动技能 状态 """ if not self._task: _m = [] for matcher in get_matchers(): if matcher.plugin_name not in _m: _m.append(matcher.plugin_name) _plugin = nonebot.plugin.get_plugin(matcher.plugin_name) try: _module = _plugin.module plugin_task = _module.__getattribute__("__plugin_task__") for key in plugin_task.keys(): if key in self._task.keys(): raise ValueError(f"plugin_task:{key} 已存在!") self._task[key] = plugin_task[key] except AttributeError: pass bot = get_bot() if bot or group_id: if group_id: _group_list = [group_id] else: _group_list = [x["group_id"] for x in await bot.get_group_list()] for group_id in _group_list: group_id = str(group_id) if not self._data["group_manager"].get(group_id): self._init_group(group_id) if not self._data["group_manager"][group_id].get("group_task_status"): self._data["group_manager"][group_id]["group_task_status"] = {} for task in self._task: if ( self._data["group_manager"][group_id]["group_task_status"].get( task ) is None ): self._data["group_manager"][group_id]["group_task_status"][ task ] = Config.get_config('_task', f'DEFAULT_{task}', default=True) for task in list( self._data["group_manager"][group_id]["group_task_status"] ): if task not in self._task: del self._data["group_manager"][group_id]["group_task_status"][ task ] self.save() def _set_plugin_status( self, module: str, status: str, group_id: int, ): """ 说明: 设置功能开关状态 参数: :param module: 功能模块名 :param status: 功能状态 :param group_id: 群组 """ group_id = str(group_id) if group_id else group_id if not self._data["group_manager"].get(group_id): self._init_group(group_id) if status == "block": if module not in self._data["group_manager"][group_id]["close_plugins"]: self._data["group_manager"][group_id]["close_plugins"].append(module) else: if module in self._data["group_manager"][group_id]["close_plugins"]: self._data["group_manager"][group_id]["close_plugins"].remove(module) self.save() def _init_group(self, group_id: str): """ 说明: 初始化群数据 参数: :param group_id: 群号 """ default_group_level = Config.get_config("group_manager", "DEFAULT_GROUP_LEVEL") if default_group_level is None: default_group_level = 5 default_group_bot_status = Config.get_config("group_manager", "DEFAULT_GROUP_BOT_STATUS") if default_group_bot_status: default_group_bot_status = True if not self._data["group_manager"].get(group_id): self._data["group_manager"][group_id] = { "level": default_group_level, "status": default_group_bot_status, "close_plugins": [], "group_task_status": {}, } def _set_group_bot_status(self, group_id: Union[int, str], status: bool): """ 说明: 设置群聊bot总开关 参数: :param group_id: 群号 :param status: 开关状态 """ group_id = str(group_id) if not self._data["group_manager"].get(group_id): self._init_group(group_id) self._data["group_manager"][group_id]["status"] = status self.save() def get_super_old_data(self) -> Optional[dict]: """ 说明: 获取旧数据,平时使用请不要调用 """ if self._data["super"].get("close_plugins"): _x = self._data["super"].get("close_plugins") del self._data["super"]["close_plugins"] return _x return None
zhenxun-bot
/zhenxun_bot-0.1.4.3-py3-none-any.whl/zhenxun_bot/utils/manager/group_manager.py
group_manager.py
from typing import Union, List, Optional from .data_class import StaticData from pathlib import Path from ruamel.yaml import YAML from services.log import logger import shutil yaml = YAML(typ="safe") class ResourcesManager(StaticData): """ 插件配置 与 资源 管理器 """ def __init__(self, file: Path): self.file = file super().__init__(file) self._temp_dir = [] self._abspath = Path() def add_resource( self, module: str, source_file: Union[str, Path], move_file: Union[str, Path] ): """ 添加一个资源移动路劲 :param module: 模块名 :param source_file: 源文件路径 :param move_file: 移动路径 """ if isinstance(source_file, Path): source_file = str(source_file.absolute()) if isinstance(move_file, Path): move_file = str(move_file.absolute()) if module not in self._data.keys(): self._data[module] = {source_file: move_file} else: self._data[module][source_file] = move_file def remove_resource(self, module: str, source_file: Optional[Union[str, Path]] = None): """ 删除一个资源路径 :param module: 模块 :param source_file: 源文件路径 """ if not source_file: if module in self._data.keys(): for x in self._data[module].keys(): move_file = Path(self._data[module][x]) if move_file.exists(): shutil.rmtree(move_file.absolute(), ignore_errors=True) logger.info(f"已清除插件 {module} 资源路径:{self._data[module][x]}") del self._data[module][x] else: if isinstance(source_file, Path): source_file = str(source_file.absolute()) if source_file: if module in self._data.keys() and source_file in self._data[module].keys(): move_file = Path(self._data[module][source_file]) if move_file.exists(): shutil.rmtree(move_file.absolute(), ignore_errors=True) del self._data[module][source_file] self.save() def start_move(self): """ 开始移动路径 """ for module in self._data.keys(): for source_path in self._data[module].keys(): move_path = Path(self._data[module][source_path]) try: source_path = Path(source_path) file_name = source_path.name move_path = move_path / file_name move_path.mkdir(exist_ok=True, parents=True) if source_path.exists(): if move_path.exists(): shutil.rmtree(str(move_path.absolute()), ignore_errors=True) shutil.move(str(source_path.absolute()), str(move_path.absolute())) logger.info( f"移动资源文件路径 {source_path.absolute()} >>> {move_path.absolute()}" ) elif not move_path.exists(): logger.warning( f"移动资源路径文件{source_path.absolute()} >>>" f" {move_path.absolute()} 失败,源文件不存在.." ) except Exception as e: logger.error( f"移动资源路径文件{source_path.absolute()} >>>" f" {move_path.absolute()}失败,{type(e)}:{e}" ) self.save() def add_temp_dir(self, path: Union[str, Path]): """ 添加临时清理文件夹 :param path: 路径 :param recursive: 是否将该目录下的所有目录也添加为临时文件夹 """ if isinstance(path, str): path = Path(path) self._temp_dir.append(path) def get_temp_data_dir(self) -> List[Path]: """ 获取临时文件文件夹 """ return self._temp_dir
zhenxun-bot
/zhenxun_bot-0.1.4.3-py3-none-any.whl/zhenxun_bot/utils/manager/resources_manager.py
resources_manager.py
from typing import Optional, Dict from .data_class import StaticData from utils.utils import DailyNumberLimiter from services.log import logger from pathlib import Path from ruamel.yaml import YAML yaml = YAML(typ="safe") class Plugins2countManager(StaticData): """ 插件命令 次数 管理器 """ def __init__(self, file: Path): self.file = file super().__init__(None) self._daily_limiter: Dict[str, DailyNumberLimiter] = {} if file.exists(): with open(file, "r", encoding="utf8") as f: self._data = yaml.load(f) if "PluginCountLimit" in self._data.keys(): self._data = ( self._data["PluginCountLimit"] if self._data["PluginCountLimit"] else {} ) def add_count_limit( self, plugin: str, *, max_count: int = 5, status: Optional[bool] = True, limit_type: Optional[str] = "user", rst: Optional[str] = None, data_dict: Optional[dict] = None, ): """ 添加插件调用 次数 限制 :param plugin: 插件模块名称 :param max_count: 最大次数限制 :param status: 默认开关状态 :param limit_type: 限制类型 监听对象,以user_id或group_id作为键来限制,'user':用户id,'group':群id :param rst: 回复的话,为空则不回复 :param data_dict: 封装好的字典数据 """ if data_dict: max_count = data_dict.get("max_count") status = data_dict.get("status") limit_type = data_dict.get("limit_type") rst = data_dict.get("rst") status = status if status is not None else True limit_type = limit_type if limit_type else "user" max_count = max_count if max_count is not None else 5 if limit_type not in ["user", "group"]: raise ValueError(f"{plugin} 添加count限制错误,‘limit_type‘ 必须为 'user'/'group'") self._data[plugin] = { "max_count": max_count, "status": status, "limit_type": limit_type, "rst": rst, } def get_plugin_count_data(self, plugin: str) -> Optional[dict]: """ 获取插件次数数据 :param plugin: 模块名 """ if self.check_plugin_count_status(plugin): return self._data[plugin] return None def get_plugin_data(self, plugin: str) -> Optional[dict]: """ 获取单个模块限制数据 :param plugin: 模块名 """ if self._data.get(plugin) is not None: return self._data.get(plugin) return None def check_plugin_count_status(self, plugin: str) -> bool: """ 检测插件是否有 次数 限制 :param plugin: 模块名 """ return ( plugin in self._data.keys() and self._data[plugin]["status"] and self._data[plugin]["max_count"] > 0 ) def check(self, plugin: str, id_: int) -> bool: """ 检查 count :param plugin: 模块名 :param id_: 限制 id """ if self._daily_limiter.get(plugin): return self._daily_limiter[plugin].check(id_) return True def increase(self, plugin: str, id_: int, num: int = 1): """ 增加次数 :param plugin: 模块名 :param id_: cd 限制类型 :param num: 增加次数 :return: """ if self._daily_limiter.get(plugin): self._daily_limiter[plugin].increase(id_, num) def reload_count_limit(self): """ 加载 cd 限制器 :return: """ for plugin in self._data: if self.check_plugin_count_status(plugin): self._daily_limiter[plugin] = DailyNumberLimiter( self.get_plugin_count_data(plugin)["max_count"] ) logger.info(f"已成功加载 {len(self._daily_limiter)} 个Count限制.") def reload(self): """ 重载本地数据 """ if self.file.exists(): with open(self.file, "r", encoding="utf8") as f: self._data: dict = yaml.load(f) self._data = self._data["PluginCountLimit"] self.reload_count_limit()
zhenxun-bot
/zhenxun_bot-0.1.4.3-py3-none-any.whl/zhenxun_bot/utils/manager/plugins2count_manager.py
plugins2count_manager.py
from typing import Optional, Dict from .data_class import StaticData from services.log import logger from utils.utils import UserBlockLimiter from pathlib import Path from ruamel.yaml import YAML yaml = YAML(typ="safe") class Plugins2blockManager(StaticData): """ 插件命令阻塞 管理器 """ def __init__(self, file: Path): self.file = file super().__init__(None) self._block_limiter: Dict[str, UserBlockLimiter] = {} if file.exists(): with open(file, "r", encoding="utf8") as f: self._data = yaml.load(f) if "PluginBlockLimit" in self._data.keys(): self._data = ( self._data["PluginBlockLimit"] if self._data["PluginBlockLimit"] else {} ) def add_block_limit( self, plugin: str, status: Optional[bool] = True, check_type: Optional[str] = "all", limit_type: Optional[str] = "user", rst: Optional[str] = None, data_dict: Optional[dict] = None, ): """ 添加插件调用 block 限制 :param plugin: 插件模块名称 :param status: 默认开关状态 :param check_type: 检查类型 'private'/'group'/'all',限制私聊/群聊/全部 :param limit_type: 限制类型 监听对象,以user_id或group_id作为键来限制,'user':用户id,'group':群id :param rst: 回复的话,为空则不回复 :param data_dict: 封装好的字典数据 """ if data_dict: status = data_dict.get("status") check_type = data_dict.get("check_type") limit_type = data_dict.get("limit_type") rst = data_dict.get("rst") status = status if status is not None else True check_type = check_type if check_type else "all" limit_type = limit_type if limit_type else "user" if check_type not in ["all", "group", "private"]: raise ValueError( f"{plugin} 添加block限制错误,‘check_type‘ 必须为 'private'/'group'/'all'" ) if limit_type not in ["user", "group"]: raise ValueError(f"{plugin} 添加block限制错误,‘limit_type‘ 必须为 'user'/'group'") self._data[plugin] = { "status": status, "check_type": check_type, "limit_type": limit_type, "rst": rst, } def get_plugin_block_data(self, plugin: str) -> Optional[dict]: """ 获取插件block数据 :param plugin: 模块名 """ if self.check_plugin_block_status(plugin): return self._data[plugin] return None def check_plugin_block_status(self, plugin: str) -> bool: """ 检测插件是否有 block :param plugin: 模块名 """ return plugin in self._data.keys() and self._data[plugin]["status"] def check(self, id_: int, plugin: str) -> bool: """ 检查 block :param plugin: 模块名 :param id_: 限制 id """ if self._block_limiter.get(plugin): return self._block_limiter[plugin].check(id_) return False def set_true(self, id_: int, plugin: str): """ 对插件 block :param plugin: 模块名 :param id_: 限制 id """ if self._block_limiter.get(plugin): self._block_limiter[plugin].set_true(id_) def set_false(self, id_: int, plugin: str): """ 对插件 unblock :param plugin: 模块名 :param id_: 限制 id """ if self._block_limiter.get(plugin): self._block_limiter[plugin].set_false(id_) def reload_block_limit(self): """ 加载 block 限制器 :return: """ for plugin in self._data: if self.check_plugin_block_status(plugin): self._block_limiter[plugin] = UserBlockLimiter() logger.info(f"已成功加载 {len(self._block_limiter)} 个Block限制.") def reload(self): """ 重载本地数据 """ if self.file.exists(): with open(self.file, "r", encoding="utf8") as f: self._data: dict = yaml.load(f) self._data = self._data["PluginBlockLimit"] self.reload_block_limit()
zhenxun-bot
/zhenxun_bot-0.1.4.3-py3-none-any.whl/zhenxun_bot/utils/manager/plugins2block_manager.py
plugins2block_manager.py
from typing import Optional, Dict from .data_class import StaticData from utils.utils import FreqLimiter from services.log import logger from pathlib import Path from ruamel.yaml import YAML yaml = YAML(typ="safe") class Plugins2cdManager(StaticData): """ 插件命令 cd 管理器 """ def __init__(self, file: Path): self.file = file super().__init__(None) self._freq_limiter: Dict[str, FreqLimiter] = {} if file.exists(): with open(file, "r", encoding="utf8") as f: self._data = yaml.load(f) if "PluginCdLimit" in self._data.keys(): self._data = ( self._data["PluginCdLimit"] if self._data["PluginCdLimit"] else {} ) def add_cd_limit( self, plugin: str, *, cd: Optional[int] = 5, status: Optional[bool] = True, check_type: Optional[str] = "all", limit_type: Optional[str] = "user", rst: Optional[str] = None, data_dict: Optional[dict] = None, ): """ 添加插件调用 cd 限制 :param plugin: 插件模块名称 :param cd: cd 时长 :param status: 默认开关状态 :param check_type: 检查类型 'private'/'group'/'all',限制私聊/群聊/全部 :param limit_type: 限制类型 监听对象,以user_id或group_id作为键来限制,'user':用户id,'group':群id :param rst: 回复的话,为空则不回复 :param data_dict: 封装好的字典数据 """ if data_dict: cd = data_dict.get("cd") status = data_dict.get("status") check_type = data_dict.get("check_type") limit_type = data_dict.get("limit_type") rst = data_dict.get("rst") cd = cd if cd is not None else 5 status = status if status is not None else True check_type = check_type if check_type else "all" limit_type = limit_type if limit_type else "user" if check_type not in ["all", "group", "private"]: raise ValueError( f"{plugin} 添加cd限制错误,‘check_type‘ 必须为 'private'/'group'/'all'" ) if limit_type not in ["user", "group"]: raise ValueError(f"{plugin} 添加cd限制错误,‘limit_type‘ 必须为 'user'/'group'") self._data[plugin] = { "cd": cd, "status": status, "check_type": check_type, "limit_type": limit_type, "rst": rst, } def get_plugin_cd_data(self, plugin: str) -> Optional[dict]: """ 获取插件cd数据 :param plugin: 模块名 """ if self.check_plugin_cd_status(plugin): return self._data[plugin] return None def check_plugin_cd_status(self, plugin: str) -> bool: """ 检测插件是否有 cd :param plugin: 模块名 """ return ( plugin in self._data.keys() and self._data[plugin]["cd"] > 0 and self._data[plugin]["status"] ) def check(self, plugin: str, id_: int) -> bool: """ 检查 cd :param plugin: 模块名 :param id_: 限制 id """ if self._freq_limiter.get(plugin): return self._freq_limiter[plugin].check(id_) return False def start_cd(self, plugin: str, id_: int, cd: int = 0): """ 开始cd :param plugin: 模块名 :param id_: cd 限制类型 :param cd: cd 时长 :return: """ if self._freq_limiter.get(plugin): self._freq_limiter[plugin].start_cd(id_, cd) def get_plugin_data(self, plugin: str) -> dict: """ 获取单个模块限制数据 :param plugin: 模块名 """ if self._data.get(plugin) is not None: return self._data.get(plugin) return {} def reload_cd_limit(self): """ 加载 cd 限制器 :return: """ for plugin in self._data: if self.check_plugin_cd_status(plugin): self._freq_limiter[plugin] = FreqLimiter( self.get_plugin_cd_data(plugin)["cd"] ) logger.info(f"已成功加载 {len(self._freq_limiter)} 个Cd限制.") def reload(self): """ 重载本地数据 """ if self.file.exists(): with open(self.file, "r", encoding="utf8") as f: self._data: dict = yaml.load(f) self._data = self._data["PluginCdLimit"] self.reload_cd_limit()
zhenxun-bot
/zhenxun_bot-0.1.4.3-py3-none-any.whl/zhenxun_bot/utils/manager/plugins2cd_manager.py
plugins2cd_manager.py
from typing import Union, Optional from pathlib import Path from ruamel.yaml import YAML import ujson as json yaml = YAML(typ="safe") class StaticData: """ 静态数据共享类 """ def __init__(self, file: Optional[Path]): self._data: dict = {} if file: file.parent.mkdir(exist_ok=True, parents=True) self.file = file if file.exists(): with open(file, "r", encoding="utf8") as f: if file.name.endswith("json"): try: self._data: dict = json.load(f) except ValueError: if f.read().strip(): raise ValueError(f"{file} 文件加载错误,请检查文件内容格式.") elif file.name.endswith("yaml"): self._data = yaml.load(f) def set(self, key, value): self._data[key] = value self.save() def set_module_data(self, module, key, value): if module in self._data.keys(): self._data[module][key] = value self.save() def get(self, key): return self._data.get(key) def keys(self): return self._data.keys() def delete(self, key): if self._data.get(key) is not None: del self._data[key] def get_data(self) -> dict: return self._data def save(self, path: Union[str, Path] = None): path = path if path else self.file if isinstance(path, str): path = Path(path) if path: with open(path, "w", encoding="utf8") as f: json.dump(self._data, f, ensure_ascii=False, indent=4) def reload(self): if self.file.exists(): if self.file.name.endswith("json"): self._data: dict = json.load(open(self.file, "r", encoding="utf8")) elif self.file.name.endswith("yaml"): self._data: dict = yaml.load(open(self.file, "r", encoding="utf8")) def is_exists(self): return self.file.exists() def is_empty(self): return bool(len(self._data)) def __str__(self): return str(self._data) def __setitem__(self, key, value): self._data[key] = value def __getitem__(self, key): return self._data[key]
zhenxun-bot
/zhenxun_bot-0.1.4.3-py3-none-any.whl/zhenxun_bot/utils/manager/data_class.py
data_class.py
from typing import Optional from pathlib import Path from .data_class import StaticData from . import group_manager class PluginsManager(StaticData): """ 插件 管理器 """ def __init__(self, file: Path): super().__init__(file) if not self._data: self._data = {} def add_plugin_data( self, module: str, plugin_name: str, *, status: Optional[bool] = True, error: Optional[bool] = False, block_type: Optional[str] = None, author: Optional[str] = None, version: Optional[int] = None, ): """ 添加插件数据 :param module: 模块名称 :param plugin_name: 插件名称 :param status: 插件开关状态 :param error: 加载状态 :param block_type: 限制类型 :param author: 作者 :param version: 版本 """ self._data[module] = { "plugin_name": plugin_name, "status": status, "error": error, "block_type": block_type, "author": author, "version": version, } def block_plugin( self, module: str, group_id: Optional[int] = None, block_type: str = "all" ): """ 说明: 锁定插件 参数: :param module: 功能模块名 :param group_id: 群组,None时为超级用户禁用 :param block_type: 限制类型 """ self._set_plugin_status(module, "block", group_id, block_type) def unblock_plugin(self, module: str, group_id: Optional[int] = None): """ 说明: 解锁插件 参数: :param module: 功能模块名 :param group_id: 群组 """ self._set_plugin_status(module, "unblock", group_id) def get_plugin_status( self, module: str, block_type: str = "all" ) -> bool: """ 说明: 获取插件状态 参数: :param module: 功能模块名 :param block_type: 限制类型 """ if module in self._data.keys(): if self._data[module]["block_type"] == "all" and block_type == "all": return False else: return not self._data[module]["block_type"] == block_type return True def get_plugin_block_type(self, module: str) -> str: """ 说明: 获取功能限制类型 参数: :param module: 模块名称 """ if module in self._data.keys(): return self._data[module]["block_type"] return "" def get_plugin_error_status(self, module: str) -> bool: """ 插件是否成功加载 :param module: 模块名称 """ if module not in self._data.keys(): self.init_plugin(module) return self._data[module]["error"] def _set_plugin_status( self, module: str, status: str, group_id: Optional[str], block_type: str = "all", ): """ 说明: 设置功能开关状态 参数: :param module: 功能模块名 :param status: 功能状态 :param group_id: 群组 :param block_type: 限制类型 """ group_id = str(group_id) if group_id else group_id if module: if group_id: if status == "block": group_manager.block_plugin(f"{module}:super", int(group_id)) else: group_manager.unblock_plugin(f"{module}:super", int(group_id)) else: if module not in self._data.keys(): self.init_plugin(module) if status == "block": self._data[module]["status"] = False self._data[module]["block_type"] = block_type else: if module in self._data.keys(): self._data[module]["status"] = True self._data[module]["block_type"] = None self.save() def init_plugin(self, module: str): """ 初始化插件数据 :param module: 模块名称 """ if module not in self._data.keys(): self._data[module] = { "plugin_name": module, "status": True, "error": False, "block_type": None, "author": None, "version": None, }
zhenxun-bot
/zhenxun_bot-0.1.4.3-py3-none-any.whl/zhenxun_bot/utils/manager/plugins_manager.py
plugins_manager.py
from utils.manager.data_class import StaticData from nonebot.adapters.onebot.v11 import Bot from nonebot.adapters.onebot.v11.exception import ActionFailed from services.log import logger from typing import Optional from utils.image_utils import BuildImage from utils.utils import get_user_avatar from pathlib import Path from io import BytesIO class RequestManager(StaticData): """ 好友请求/邀请请求 管理 """ def __init__(self, file: Optional[Path]): super().__init__(file) if not self._data: self._data = {"private": {}, "group": {}} def add_request( self, id_: int, type_: str, flag: str, *, nickname: Optional[str] = None, level: Optional[int] = None, sex: Optional[str] = None, age: Optional[str] = None, from_: Optional[str] = "", comment: Optional[str] = None, invite_group: Optional[int] = None, group_name: Optional[str] = None, ): """ 添加一个请求 :param id_: id,用户id或群id :param type_: 类型,private 或 group :param flag: event.flag :param nickname: 用户昵称 :param level: 等级 :param sex: 性别 :param age: 年龄 :param from_: 请求来自 :param comment: 附加消息 :param invite_group: 邀请群聊 :param group_name: 群聊名称 """ self._data[type_][str(len(self._data[type_].keys()))] = { "id": id_, "flag": flag, "nickname": nickname, "level": level, "sex": sex, "age": age, "from": from_, "comment": comment, "invite_group": invite_group, "group_name": group_name, } self.save() def remove_request(self, type_: str, id_: int): """ 删除一个请求数据 :param type_: 类型 :param id_: id,user_id 或 group_id """ for x in self._data[type_].keys(): if self._data[type_][x].get("id") == id_: del self._data[type_][x] break self.save() def get_group_id(self, id_: int) -> Optional[int]: """ 通过id获取群号 :param id_: id """ return self._data["group"].get(id_) async def approve(self, bot: Bot, id_: int, type_: str) -> Optional[int]: """ 同意请求 :param bot: Bot :param id_: id :param type_: 类型,private 或 group """ return await self._set_add_request(bot, id_, type_, True) async def refused(self, bot: Bot, id_: int, type_: str) -> Optional[int]: """ 拒绝请求 :param bot: Bot :param id_: id :param type_: 类型,private 或 group """ return await self._set_add_request(bot, id_, type_, False) def clear(self): """ 清空所有请求信息,无视请求 """ self._data = {"private": {}, "group": {}} self.save() def set_group_name(self, group_name: str, group_id: int): """ 设置群聊名称 :param group_name: 名称 :param group_id: id """ for id_ in self._data["group"].keys(): if self._data["group"][id_]["invite_group"] == group_id: self._data["group"][id_]["group_name"] = group_name break self.save() async def show(self, type_: str) -> Optional[str]: """ 请求可视化 """ data = self._data[type_] if not data: return None img_list = [] id_list = list(data.keys()) id_list.reverse() for id_ in id_list: age = data[id_]["age"] nickname = data[id_]["nickname"] comment = data[id_]["comment"] if type_ == "private" else "" from_ = data[id_]["from"] sex = data[id_]["sex"] ava = BuildImage( 80, 80, background=BytesIO(await get_user_avatar(data[id_]["id"])) ) ava.circle() age_bk = BuildImage( len(str(age)) * 10 - 5, 15, color="#04CAF7" if sex == "male" else "#F983C1", ) age_bk.text((3, 1), f"{age}", fill=(255, 255, 255)) x = BuildImage( 90, 32, font_size=15, color="#EEEFF4", font="HYWenHei-85W.ttf" ) x.text((0, 0), "同意/拒绝", center_type="center") x.circle_corner(10) A = BuildImage(500, 100, font_size=24, font="msyh.ttf") A.paste(ava, (15, 0), alpha=True, center_type="by_height") A.text((120, 15), nickname) A.paste(age_bk, (120, 50), True) A.paste( BuildImage( 200, 0, font_size=12, plain_text=f"对方留言:{comment}", font_color=(140, 140, 143), ), (120 + age_bk.w + 10, 49), True, ) if type_ == "private": A.paste( BuildImage( 200, 0, font_size=12, plain_text=f"来源:{from_}", font_color=(140, 140, 143), ), (120, 70), True, ) else: A.paste( BuildImage( 200, 0, font_size=12, plain_text=f"邀请你加入:{data[id_]['group_name']}({data[id_]['invite_group']})", font_color=(140, 140, 143), ), (120, 70), True, ) A.paste(x, (380, 35), True) A.paste( BuildImage( 0, 0, plain_text=f"id:{id_}", font_size=13, font_color=(140, 140, 143), ), (400, 10), True, ) img_list.append(A) A = BuildImage(500, len(img_list) * 100, 500, 100) for img in img_list: A.paste(img) bk = BuildImage(A.w, A.h + 50, color="#F8F9FB", font_size=20) bk.paste(A, (0, 50)) bk.text( (15, 13), "好友请求" if type_ == "private" else "群聊请求", fill=(140, 140, 143) ) return bk.pic2bs4() async def _set_add_request( self, bot: Bot, id_: int, type_: str, approve: bool ) -> Optional[int]: """ 处理请求 :param bot: Bot :param id_: id :param type_: 类型,private 或 group :param approve: 是否同意 """ id_ = str(id_) if id_ in self._data[type_]: try: if type_ == "private": await bot.set_friend_add_request( flag=self._data[type_][id_]["flag"], approve=approve ) rid = self._data[type_][id_]["id"] else: await bot.set_group_add_request( flag=self._data[type_][id_]["flag"], sub_type="invite", approve=approve, ) rid = self._data[type_][id_]["invite_group"] except ActionFailed: logger.info( f"同意{self._data[type_][id_]['nickname']}({self._data[type_][id_]['id']})" f"的{'好友' if type_ == 'private' else '入群'}请求失败了..." ) return None logger.info( f"同意{self._data[type_][id_]['nickname']}({self._data[type_][id_]['id']})" f"的{'好友' if type_ == 'private' else '入群'}请求..." ) del self._data[type_][id_] self.save() return rid return None
zhenxun-bot
/zhenxun_bot-0.1.4.3-py3-none-any.whl/zhenxun_bot/utils/manager/requests_manager.py
requests_manager.py
from typing import Optional from .group_manager import GroupManager from .data_class import StaticData from .withdraw_message_manager import WithdrawMessageManager from .plugins2cd_manager import Plugins2cdManager from .plugins2block_manager import Plugins2blockManager from .plugins2count_manager import Plugins2countManager from .plugins2settings_manager import Plugins2settingsManager from .plugins_manager import PluginsManager from .resources_manager import ResourcesManager from .admin_manager import AdminManager from .none_plugin_count_manager import NonePluginCountManager from .requests_manager import RequestManager from configs.path_config import DATA_PATH # 群功能开关 | 群被动技能 | 群权限 管理 group_manager: Optional[GroupManager] = GroupManager( DATA_PATH / "manager" / "group_manager.json" ) # 撤回消息管理 withdraw_message_manager: Optional[WithdrawMessageManager] = WithdrawMessageManager() # 插件管理 plugins_manager: Optional[PluginsManager] = PluginsManager( DATA_PATH / "manager" / "plugins_manager.json" ) # 插件基本设置管理 plugins2settings_manager: Optional[Plugins2settingsManager] = Plugins2settingsManager( DATA_PATH / "configs" / "plugins2settings.yaml" ) # 插件命令 cd 管理 plugins2cd_manager: Optional[Plugins2cdManager] = Plugins2cdManager( DATA_PATH / "configs" / "plugins2cd.yaml" ) # 插件命令 阻塞 管理 plugins2block_manager: Optional[Plugins2blockManager] = Plugins2blockManager( DATA_PATH / "configs" / "plugins2block.yaml" ) # 插件命令 每次次数限制 管理 plugins2count_manager: Optional[Plugins2countManager] = Plugins2countManager( DATA_PATH / "configs" / "plugins2count.yaml" ) # 资源管理 resources_manager: Optional[ResourcesManager] = ResourcesManager( DATA_PATH / "manager" / "resources_manager.json" ) # 插件加载容忍管理 none_plugin_count_manager: Optional[NonePluginCountManager] = NonePluginCountManager( DATA_PATH / "manager" / "none_plugin_count_manager.json" ) # 好友请求/群聊邀请 管理 requests_manager: Optional[RequestManager] = RequestManager( DATA_PATH / "manager" / "requests_manager.json" ) # 管理员命令管理器 admin_manager = AdminManager()
zhenxun-bot
/zhenxun_bot-0.1.4.3-py3-none-any.whl/zhenxun_bot/utils/manager/__init__.py
__init__.py
from typing import List, Optional, Union, Tuple from .data_class import StaticData from pathlib import Path from ruamel.yaml import YAML yaml = YAML(typ="safe") class Plugins2settingsManager(StaticData): """ 插件命令阻塞 管理器 """ def __init__(self, file: Path): self.file = file super().__init__(None) if file.exists(): with open(file, "r", encoding="utf8") as f: self._data = yaml.load(f) if self._data: if "PluginSettings" in self._data.keys(): self._data = ( self._data["PluginSettings"] if self._data["PluginSettings"] else {} ) for x in self._data.keys(): if self._data[x].get("cost_gold") is None: self._data[x]["cost_gold"] = 0 def add_plugin_settings( self, plugin: str, cmd: Optional[List[str]] = None, default_status: Optional[bool] = True, level: Optional[int] = 5, limit_superuser: Optional[bool] = False, plugin_type: Tuple[Union[str, int]] = ("normal",), cost_gold: int = 0, **kwargs ): """ 添加一个插件设置 :param plugin: 插件模块名称 :param cmd: 命令 或 命令别名 :param default_status: 默认开关状态 :param level: 功能权限等级 :param limit_superuser: 功能状态是否限制超级用户 :param plugin_type: 插件类型 :param cost_gold: 需要消费的金币 """ if kwargs: level = kwargs.get("level") if kwargs.get("level") is not None else 5 default_status = ( kwargs.get("default_status") if kwargs.get("default_status") is not None else True ) limit_superuser = ( kwargs.get("limit_superuser") if kwargs.get("limit_superuser") is not None else False ) cmd = kwargs.get("cmd") if kwargs.get("cmd") is not None else [] cost_gold = cost_gold if kwargs.get("cost_gold") else 0 self._data[plugin] = { "level": level if level is not None else 5, "default_status": default_status if default_status is not None else True, "limit_superuser": limit_superuser if limit_superuser is not None else False, "cmd": cmd, "plugin_type": list( plugin_type if plugin_type is not None else ("normal",) ), "cost_gold": cost_gold, } def get_plugin_data(self, module: str) -> dict: """ 通过模块名获取数据 :param module: 模块名称 """ if self._data.get(module) is not None: return self._data.get(module) return {} def get_plugin_module( self, cmd: str, is_all: bool = False ) -> Union[str, List[str]]: """ 根据 cmd 获取功能 modules :param cmd: 命令 :param is_all: 获取全部包含cmd的模块 """ keys = [] for key in self._data.keys(): if cmd in self._data[key]["cmd"]: if is_all: keys.append(key) else: return key return keys def reload(self): """ 重载本地数据 """ if self.file.exists(): with open(self.file, "r", encoding="utf8") as f: self._data: dict = yaml.load(f) self._data = self._data["PluginSettings"]
zhenxun-bot
/zhenxun_bot-0.1.4.3-py3-none-any.whl/zhenxun_bot/utils/manager/plugins2settings_manager.py
plugins2settings_manager.py
from typing import Optional, Any, Union from pathlib import Path from ruamel.yaml import YAML from ruamel import yaml class ConfigsManager: """ 插件配置 与 资源 管理器 """ def __init__(self, file: Path): self._data: dict = {} self._admin_level_data = [] if file: file.parent.mkdir(exist_ok=True, parents=True) self.file = file if file.exists(): _yaml = YAML() with open(file, "r", encoding="utf8") as f: self._data = _yaml.load(f) def add_plugin_config( self, module: str, key: str, value: Optional[Any], *, name: Optional[str] = None, help_: Optional[str] = None, default_value: Optional[str] = None, _override: bool = False, ): """ 为插件添加一个配置,不会被覆盖,只有第一个生效 :param module: 模块 :param key: 键 :param value: 值 :param name: 插件名称 :param help_: 配置注解 :param default_value: 默认值 :param _override: 覆盖前值 """ if ( not (module in self._data.keys() and self._data[module].get(key)) or _override ): _module = None if ":" in module: module = module.split(":") _module = module[-1] module = module[0] if "[LEVEL]" in key and _module: key = key.replace("[LEVEL]", "").strip() self._admin_level_data.append((_module, value)) if self._data.get(module) is None: self._data[module] = {} key = key.upper() self._data[module][key] = { "value": value, "name": name.strip() if isinstance(name, str) else name, "help": help_.strip() if isinstance(help_, str) else help_, "default_value": default_value, "level_module": _module, } def remove_plugin_config(self, module: str): """ 为插件删除一个配置 :param module: 模块名 """ if module in self._data.keys(): del self._data[module] def set_config(self, module: str, key: str, value: str): """ 设置配置值 :param module: 模块名 :param key: 配置名称 :param value: 值 """ if module in self._data.keys(): if self._data[module].get(key) is not None: self._data[module][key]["value"] = value def set_help(self, module: str, key: str, help_: str): """ 设置配置注释 :param module: 模块名 :param key: 配置名称 :param help_: 注释文本 """ if module in self._data.keys(): if self._data[module].get(key) is not None: self._data[module][key]["help"] = help_ def set_default_value(self, module: str, key: str, value: str): """ 设置配置默认值 :param module: 模块名 :param key: 配置名称 :param value: 值 """ if module in self._data.keys(): if self._data[module].get(key) is not None: self._data[module][key]["default_value"] = value def get_config(self, module: str, key: str, default: Optional[Any] = None) -> Optional[Any]: """ 获取指定配置值 :param module: 模块名 :param key: 配置名称 :param default: 没有key值内容的默认返回值 """ key = key.upper() if module in self._data.keys(): for key in [key, f"{key} [LEVEL]"]: if self._data[module].get(key) is not None: if self._data[module][key]["value"] is None: return self._data[module][key]["default_value"] return self._data[module][key]["value"] if default is not None: return default return None def get_level2module(self, module: str, key: str) -> Optional[str]: """ 获取指定key所绑定的module,一般为权限等级 :param module: 模块名 :param key: 配置名称 :return: """ if self._data.get(module) is not None: if self._data[module].get(key) is not None: return self._data[module][key].get("level_module") def get(self, key: str): """ 获取插件配置数据 :param key: 名称 """ if key in self._data.keys(): return self._data[key] def save(self, path: Union[str, Path] = None): """ 保存数据 :param path: 路径 """ path = path if path else self.file with open(path, "w", encoding="utf8") as f: yaml.dump( self._data, f, indent=2, Dumper=yaml.RoundTripDumper, allow_unicode=True ) def reload(self): """ 重新加载配置文件 """ _yaml = YAML() temp_file = Path() / "configs" / "config.yaml" if temp_file.exists(): with open(temp_file, "r", encoding="utf8") as f: temp = _yaml.load(f) for key in temp.keys(): for k in temp[key].keys(): self._data[key][k]["value"] = temp[key][k] self.save() def get_admin_level_data(self): """ 获取管理插件等级 """ return self._admin_level_data def is_empty(self) -> bool: return not bool(self._data) def keys(self): return self._data.keys() def __str__(self): return str(self._data) def __setitem__(self, key, value): self._data[key] = value def __getitem__(self, key): return self._data[key]
zhenxun-bot
/zhenxun_bot-0.1.4.3-py3-none-any.whl/zhenxun_bot/configs/utils/__init__.py
__init__.py
from collections import defaultdict, deque from typing import ( Any, AsyncGenerator, Dict, List, Literal, Optional, Tuple, Type, Union, ) from nonebot import on_command, on_message from nonebot.adapters.onebot.v11 import GROUP, GroupMessageEvent, MessageEvent from nonebot.matcher import Matcher from nonebot.params import Depends from nonebot.rule import to_me from .config import config from .data import setting def cooldow_checker(cd_time: int) -> Any: cooldown = defaultdict(int) async def check_cooldown( matcher: Matcher, event: MessageEvent ) -> AsyncGenerator[None, None]: cooldown_time = cooldown[event.user_id] + cd_time if event.time < cooldown_time: await matcher.finish( f"ChatGPT 冷却中,剩余 {cooldown_time - event.time} 秒", at_sender=True ) yield cooldown[event.user_id] = event.time return Depends(check_cooldown) def create_matcher( command: Union[str, List[str]], only_to_me: bool = True, private: bool = True, priority: int = 999, block: bool = True, ) -> Type[Matcher]: params: Dict[str, Any] = { "priority": priority, "block": block, } if command: on_matcher = on_command command = [command] if isinstance(command, str) else command params["cmd"] = command.pop(0) params["aliases"] = set(command) else: on_matcher = on_message if only_to_me: params["rule"] = to_me() if not private: params["permission"] = GROUP return on_matcher(**params) class Session(dict): def __init__(self, scope: Literal["private", "public"]) -> None: super().__init__() self.is_private = scope == "private" def __getitem__(self, event: MessageEvent) -> Dict[str, Any]: return super().__getitem__(self.id(event)) def __setitem__( self, event: MessageEvent, value: Union[Tuple[Optional[str], Optional[str]], Dict[str, Any]], ) -> None: if isinstance(value, tuple): conversation_id, parent_id = value else: conversation_id = value["conversation_id"] parent_id = value["parent_id"] if self[event]: if isinstance(value, tuple): self[event]["conversation_id"].append(conversation_id) self[event]["parent_id"].append(parent_id) else: super().__setitem__( self.id(event), { "conversation_id": deque( [conversation_id], maxlen=config.chatgpt_max_rollback ), "parent_id": deque([parent_id], maxlen=config.chatgpt_max_rollback), }, ) def __delitem__(self, event: MessageEvent) -> None: sid = self.id(event) if sid in self: super().__delitem__(sid) def __missing__(self, _) -> Dict[str, Any]: return {} def id(self, event: MessageEvent) -> str: if self.is_private: return event.get_session_id() return str( event.group_id if isinstance(event, GroupMessageEvent) else event.user_id ) def save(self, name: str, event: MessageEvent) -> None: sid = self.id(event) if setting.session.get(sid) is None: setting.session[sid] = {} setting.session[sid][name] = { "conversation_id": self[event]["conversation_id"][-1], "parent_id": self[event]["parent_id"][-1], } setting.save() def find(self, event: MessageEvent) -> Dict[str, Any]: sid = self.id(event) return setting.session[sid] def count(self, event: MessageEvent) -> int: return len(self[event]["conversation_id"]) def pop(self, event: MessageEvent) -> Tuple[str, str]: conversation_id = self[event]["conversation_id"].pop() parent_id = self[event]["parent_id"].pop() return conversation_id, parent_id
zhenxun-plugin-chatgpt
/zhenxun_plugin_chatgpt-0.7.4-py3-none-any.whl/zhenxun_plugin_chatgpt/utils.py
utils.py
import uuid from contextlib import asynccontextmanager from typing import Any, Dict, Optional from nonebot import get_driver from nonebot.log import logger from nonebot.utils import escape_tag from playwright.async_api import Page, Route, async_playwright from typing_extensions import Self driver = get_driver() try: import ujson as json except ModuleNotFoundError: import json SESSION_TOKEN_KEY = "__Secure-next-auth.session-token" class Chatbot: def __init__( self, *, token: str = "", account: str = "", password: str = "", api: str = "https://chat.openai.com/", proxies: Optional[str] = None, timeout: int = 10, ) -> None: self.session_token = token self.account = account self.password = password self.api_url = api self.proxies = proxies self.timeout = timeout self.content = None self.parent_id = None self.conversation_id = None self.browser = None self.playwright = async_playwright() if self.session_token: self.auto_auth = False elif self.account and self.password: self.auto_auth = True else: raise ValueError("至少需要配置 session_token 或者 account 和 password") async def playwright_start(self): """启动浏览器,在插件开始运行时调用""" playwright = await self.playwright.start() try: self.browser = await playwright.firefox.launch( headless=True, proxy={"server": self.proxies} if self.proxies else None, # your proxy ) except Exception as e: logger.opt(exception=e).error("playwright未安装,请先在shell中运行playwright install") return ua = f"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:72.0) Gecko/20100101 Firefox/{self.browser.version}" self.content = await self.browser.new_context(user_agent=ua) await self.set_cookie(self.session_token) async def set_cookie(self, session_token: str): """设置session_token""" self.session_token = session_token await self.content.add_cookies( [ { "name": SESSION_TOKEN_KEY, "value": session_token, "domain": "chat.openai.com", "path": "/", } ] ) @driver.on_shutdown async def playwright_close(self): """关闭浏览器""" await self.content.close() await self.browser.close() await self.playwright.__aexit__() def __call__( self, conversation_id: Optional[str] = None, parent_id: Optional[str] = None ) -> Self: self.conversation_id = conversation_id[-1] if conversation_id else None self.parent_id = parent_id[-1] if parent_id else self.id return self @property def id(self) -> str: return str(uuid.uuid4()) def get_payload(self, prompt: str) -> Dict[str, Any]: return { "action": "next", "messages": [ { "id": self.id, "role": "user", "content": {"content_type": "text", "parts": [prompt]}, } ], "conversation_id": self.conversation_id, "parent_message_id": self.parent_id, "model": "text-davinci-002-render", } @asynccontextmanager async def get_page(self): """打开网页,这是一个异步上下文管理器,使用async with调用""" page = await self.content.new_page() js = "Object.defineProperties(navigator, {webdriver:{get:()=>undefined}});" await page.add_init_script(js) await page.goto("https://chat.openai.com/chat") yield page await page.close() async def get_chat_response(self, prompt: str) -> str: async with self.get_page() as page: await page.wait_for_load_state("domcontentloaded") if not await page.locator("text=OpenAI Discord").is_visible(): await self.get_cf_cookies(page) logger.debug("正在发送请求") async def change_json(route: Route): await route.continue_( post_data=json.dumps(self.get_payload(prompt)), ) await self.content.route( "https://chat.openai.com/backend-api/conversation", change_json ) await page.wait_for_load_state("domcontentloaded") await page.wait_for_load_state("networkidle") session_expired = page.locator("button", has_text="Log in") if await session_expired.is_visible(): logger.debug("检测到session过期") return "token失效,请重新设置token" next_botton = page.locator( ".btn.flex.justify-center.gap-2.btn-neutral.ml-auto" ) if await next_botton.is_visible(): logger.debug("检测到初次打开弹窗") await next_botton.click() await next_botton.click() await page.click(".btn.flex.justify-center.gap-2.btn-primary.ml-auto") async with page.expect_response( "https://chat.openai.com/backend-api/conversation", timeout=self.timeout * 1000, ) as response_info: textarea = page.locator("textarea") botton = page.locator('button[class="absolute p-1 rounded-md text-gray-500 bottom-1.5 right-1 md:bottom-2.5 md:right-2 hover:bg-gray-100 dark:hover:text-gray-400 dark:hover:bg-gray-900 disabled:hover:bg-transparent dark:disabled:hover:bg-transparent"]') logger.debug("正在等待回复") for _ in range(3): await textarea.fill(prompt) if await botton.is_enabled(): await botton.click() break await page.wait_for_timeout(500) response = await response_info.value if response.status == 429: return "请求过多,请放慢速度" if response.status == 403: await self.get_cf_cookies(page) return await self.get_chat_response(prompt) if response.status != 200: logger.opt(colors=True).error( f"非预期的响应内容: <r>HTTP{response.status}</r> {escape_tag(response.text)}" ) return f"ChatGPT 服务器返回了非预期的内容: HTTP{response.status}\n{response.text}" lines = await response.text() lines = lines.splitlines() data = lines[-4][6:] response = json.loads(data) self.parent_id = response["message"]["id"] self.conversation_id = response["conversation_id"] logger.debug("发送请求结束") return response["message"]["content"]["parts"][0] async def refresh_session(self) -> None: logger.debug("正在刷新session") if self.auto_auth: await self.login() else: async with self.get_page() as page: if not await page.locator("text=OpenAI Discord").is_visible(): await self.get_cf_cookies(page) await page.wait_for_load_state("domcontentloaded") session_expired = page.locator("text=Your session has expired") if await session_expired.count(): logger.opt(colors=True).error("刷新会话失败, session token 已过期, 请重新设置") cookies = await self.content.cookies() for i in cookies: if i["name"] == SESSION_TOKEN_KEY: self.session_token = i["value"] break logger.debug("刷新会话成功") async def login(self) -> None: from OpenAIAuth.OpenAIAuth import OpenAIAuth auth = OpenAIAuth(self.account, self.password, bool(self.proxies), self.proxies) # type: ignore try: auth.begin() except Exception as e: if str(e) == "Captcha detected": logger.error("不支持验证码, 请使用 session token") raise e if not auth.access_token: logger.error("ChatGPT 登陆错误!") if auth.session_token: await self.set_cookie(auth.session_token) elif possible_tokens := auth.session.cookies.get(SESSION_TOKEN_KEY): if len(possible_tokens) > 1: await self.set_cookie(possible_tokens[0]) else: try: await self.set_cookie(possible_tokens) except Exception as e: logger.opt(exception=e).error("ChatGPT 登陆错误!") else: logger.error("ChatGPT 登陆错误!") @staticmethod async def get_cf_cookies(page: Page) -> None: logger.debug("正在获取cf cookies") for _ in range(20): button = page.get_by_role("button", name="Verify you are human") if await button.count(): await button.click() label = page.locator("label span") if await label.count(): await label.click() await page.wait_for_timeout(1000) cf = page.locator("text=OpenAI Discord") if await cf.is_visible(): break else: logger.error("cf cookies获取失败") logger.debug("cf cookies获取成功")
zhenxun-plugin-chatgpt
/zhenxun_plugin_chatgpt-0.7.4-py3-none-any.whl/zhenxun_plugin_chatgpt/chatgpt.py
chatgpt.py
from nonebot import on_command, require from nonebot.adapters.onebot.v11 import ( GroupMessageEvent, Message, MessageEvent, MessageSegment, ) from nonebot.log import logger from nonebot.params import CommandArg, _command_arg, _command_start from nonebot.rule import to_me from nonebot.typing import T_State from playwright._impl._api_types import Error as PlaywrightAPIError from .chatgpt import Chatbot from .config import config from .data import setting from .utils import Session, cooldow_checker, create_matcher require("nonebot_plugin_apscheduler") from nonebot_plugin_apscheduler import scheduler require("nonebot_plugin_htmlrender") from nonebot_plugin_htmlrender import md_to_pic #真寻帮助 __zx_plugin_name__ = "ChatGPT" __plugin_usage__ = """ usage: 官方ChatGPT插件 指令: 刷新会话/刷新对话 重置会话记录,开始新的对话 导出会话/导出对话 导出当前会话记录 导入会话/导入对话+会话ID+父消息ID(可选) 将会话记录导入,这会替换当前的会话 保存会话/保存对话+会话名称 将当前会话保存 查看会话/查看对话 查看已保存的所有会话 切换会话/切换对话+会话名称 切换到指定的会话 回滚会话/回滚对话 返回到之前的会话,输入数字可以返回多个会话,但不可以超过最大支持数量 """.strip() __plugin_des__ = "ChatGPT" __plugin_cmd__ = ["刷新回话","刷新对话","导出对话","导出对话","导入会话","保存会话","查看会话","查看对话","切换会话","回滚会话","回滚对话"] __plugin_type__ = ("功能",) __plugin_version__ = 1.0 __plugin_author__ = "INSide_734" __plugin_settings__ = { "level": 5, "limit_superuser": False, "default_status": True, "cmd": ["刷新回话","刷新对话","导出对话","导出对话","导入会话","保存会话","查看会话","查看对话","切换会话","回滚会话","回滚对话"], } chat_bot = Chatbot( token=setting.token or config.chatgpt_session_token, account=config.chatgpt_account, password=config.chatgpt_password, api=config.chatgpt_api, proxies=config.chatgpt_proxies, timeout=config.chatgpt_timeout, ) matcher = create_matcher( config.chatgpt_command, config.chatgpt_to_me, config.chatgpt_private, config.chatgpt_priority, config.chatgpt_block, ) session = Session(config.chatgpt_scope) def check_purview(event: MessageEvent) -> bool: return not ( isinstance(event, GroupMessageEvent) and config.chatgpt_scope == "public" and event.sender.role == "member" ) @matcher.handle(parameterless=[cooldow_checker(config.chatgpt_cd_time)]) async def ai_chat(event: MessageEvent, state: T_State) -> None: if not chat_bot.content: await chat_bot.playwright_start() message = _command_arg(state) or event.get_message() text = message.extract_plain_text().strip() if start := _command_start(state): text = text[len(start):] try: msg = await chat_bot(**session[event]).get_chat_response(text) if (msg == "token失效,请重新设置token") and ( chat_bot.session_token != config.chatgpt_session_token ): await chat_bot.set_cookie(config.chatgpt_session_token) msg = await chat_bot(**session[event]).get_chat_response(text) except PlaywrightAPIError as e: error = f"{type(e).__name__}: {e}" logger.opt(exception=e).error(f"ChatGPT request failed: {error}") if type(e).__name__ == "TimeoutError": await matcher.finish( "ChatGPT回复已超时。", at_sender=True ) elif type(e).__name__ == "Error": msg = "ChatGPT 目前无法回复您的问题。" if config.chatgpt_detailed_error: msg += f"\n{error}" else: msg += "可能的原因是同时提问过多,问题过于复杂等。" await matcher.finish( msg, at_sender=True ) if config.chatgpt_image: if msg.count("```") % 2 != 0: msg += "\n```" img = await md_to_pic(msg, width=config.chatgpt_image_width) msg = MessageSegment.image(img) await matcher.send(msg, at_sender=True) session[event] = chat_bot.conversation_id, chat_bot.parent_id refresh = on_command("刷新对话", aliases={"刷新会话"}, block=True, rule=to_me(), priority=1) @refresh.handle() async def refresh_conversation(event: MessageEvent) -> None: if not check_purview(event): await import_.finish("当前为公共会话模式, 仅支持群管理操作") del session[event] await refresh.send("当前会话已刷新") export = on_command("导出对话", aliases={"导出会话"}, block=True, rule=to_me(), priority=1) @export.handle() async def export_conversation(event: MessageEvent) -> None: if cvst := session[event]: await export.send( f"已成功导出会话:\n" f"会话ID: {cvst['conversation_id'][-1]}\n" f"父消息ID: {cvst['parent_id'][-1]}", at_sender=True, ) else: await export.finish("你还没有任何会话记录", at_sender=True) import_ = on_command( "导入对话", aliases={"导入会话", "加载对话", "加载会话"}, block=True, rule=to_me(), priority=1 ) @import_.handle() async def import_conversation(event: MessageEvent, arg: Message = CommandArg()) -> None: if not check_purview(event): await import_.finish("当前为公共会话模式, 仅支持群管理操作") args = arg.extract_plain_text().strip().split() if not args: await import_.finish("至少需要提供会话ID", at_sender=True) if len(args) > 2: await import_.finish("提供的参数格式不正确", at_sender=True) session[event] = args.pop(0), args[0] if args else None await import_.send("已成功导入会话", at_sender=True) save = on_command("保存对话", aliases={"保存会话"}, block=True, rule=to_me(), priority=1) @save.handle() async def save_conversation(event: MessageEvent, arg: Message = CommandArg()) -> None: if not check_purview(event): await save.finish("当前为公共会话模式, 仅支持群管理操作") if session[event]: name = arg.extract_plain_text().strip() session.save(name, event) await save.send(f"已将当前会话保存为: {name}", at_sender=True) else: await save.finish("你还没有任何会话记录", at_sender=True) check = on_command("查看对话", aliases={"查看会话"}, block=True, rule=to_me(), priority=1) @check.handle() async def check_conversation(event: MessageEvent) -> None: name_list = "\n".join(list(session.find(event).keys())) await check.send(f"已保存的会话有:\n{name_list}", at_sender=True) switch = on_command("切换对话", aliases={"切换会话"}, block=True, rule=to_me(), priority=1) @switch.handle() async def switch_conversation(event: MessageEvent, arg: Message = CommandArg()) -> None: if not check_purview(event): await switch.finish("当前为公共会话模式, 仅支持群管理操作") name = arg.extract_plain_text().strip() try: session[event] = session.find(event)[name] await switch.send(f"已切换到会话: {name}", at_sender=True) except KeyError: await switch.send(f"找不到会话: {name}", at_sender=True) @scheduler.scheduled_job("interval", minutes=config.chatgpt_refresh_interval) async def refresh_session() -> None: await chat_bot.refresh_session() setting.token = chat_bot.session_token setting.save() rollback = on_command("回滚对话", aliases={"回滚会话"}, block=True, rule=to_me(), priority=1) @rollback.handle() async def rollback_conversation(event: MessageEvent, arg: Message = CommandArg()): num = arg.extract_plain_text().strip() if num.isdigit(): num = int(num) if session[event]: count = session.count(event) if num > count: await rollback.finish(f"历史会话数不足,当前历史会话数为{count}", at_sender=True) else: for _ in range(num): session.pop(event) await rollback.send(f"已成功回滚{num}条会话", at_sender=True) else: await save.finish("你还没有任何会话记录", at_sender=True) else: await rollback.finish( f"请输入有效的数字,最大回滚数为{config.chatgpt_max_rollback}", at_sender=True )
zhenxun-plugin-chatgpt
/zhenxun_plugin_chatgpt-0.7.4-py3-none-any.whl/zhenxun_plugin_chatgpt/__init__.py
__init__.py
# 这些年程序员遇到的法律BUG ## 下载 ### Docker ``` docker pull apachecn0/zhexienian-chengxuyuan-yudaode-falv-bug docker run -tid -p <port>:80 apachecn0/zhexienian-chengxuyuan-yudaode-falv-bug # 访问 http://localhost:{port} 查看文档 ``` ### PYPI ``` pip install zhexienian-chengxuyuan-yudaode-falv-bug zhexienian-chengxuyuan-yudaode-falv-bug <port> # 访问 http://localhost:{port} 查看文档 ``` ### NPM ``` npm install -g zhexienian-chengxuyuan-yudaode-falv-bug zhexienian-chengxuyuan-yudaode-falv-bug <port> # 访问 http://localhost:{port} 查看文档 ```
zhexienian-chengxuyuan-yudaode-falv-bug
/zhexienian_chengxuyuan_yudaode_falv_bug-2022.10.10.0-py3-none-any.whl/ZhexienianChengxuyuanYudaodeFalvBug/README.md
README.md
import pickle import random import pathlib # configuration PATH = pathlib.Path('mynote.pkl') DEFAULT_NOTE = {'note': '笔记'} # business logic def _query(word): """查询单词的意思,如果查不到询问是否存储 Arguments: word {str} -- 单词 """ if word in note: answer = note[word] print("单词的中文是:", answer) else: choice = input("无法找到该单词是否讲此单词加入到单词本中[y/n]:") if choice in {"", "y", "Y", "yes", "Yes"}: word_meaning = input("请输入单词的中文:") note[word] = word_meaning print("最新添加的单词单词本为:", note) print("添加成功可以复习") def query(): name = input("请输入单词:") _query(name) def review(): # 复习 print("如果想退出复习直接回车") while True: word = random.choice(tuple(note.keys())) print("请打出单词 %s 的意思:" % word) meaning = input("单词的意思是:") if meaning == note[word]: print("正确!你真棒") elif meaning == "": break else: print("再仔细想想, 再给你一次机会") meaning = input("单词的意思:") if meaning == note[word]: print("这次对啦 ^_^ !") elif meaning == "": break else: print("不对哦,单词的意思是:", note[word]) def modify(): """ 输入修改的单词,和单词意思 更新note 询问是否继续修改 """ word = input("输入单词:") meaning = input("输入单词意思:") note[word] = meaning def run(save=True): # 人机交互界面 print('--进入笔记系统--') while True: option = input(""" ==**== 菜单: ------ 1. 查询 2. 复习 3. 退出 4. 修改 (未开通) ------ >>> """) if option == "1": query() elif option == "2": review() elif option == "3": print("谢谢使用!") if save: print("系统完全退出前笔记会自动保存。") with open(PATH, 'wb') as pickle_file: pickle.dump(note, pickle_file) break elif option == "4": modify() else: print("功能输入错误") print('--退出笔记系统--') # main programming if PATH.exists(): with open(PATH, 'b') as pickle_file: note = pickle.load(pickle_file) else: note = DEFAULT_NOTE run()
zhfnote
/zhfnote-1.0.tar.gz/zhfnote-1.0/zhfnote.py
zhfnote.py
import json import os import re import shutil from datetime import datetime from random import random from time import sleep import requests from bs4 import BeautifulSoup class Zhihu(object): def __request__(self, url, payloads=None): ua = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36' } try: sleep(random()) response = requests.get(url, payloads, headers=ua) if response.status_code == 200: return json.loads(response.text, encoding='utf8') elif response.status_code == 403: raise ConnectionError(response.status_code, response.text) else: raise ConnectionError(response.status_code, response.url) except Exception as e: raise e def __download__(self, url): ua = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36' } try: sleep(random()) response = requests.get(url, headers=ua) if response.status_code == 200: return response.content else: raise ConnectionError(response.status_code, response.url) except Exception as e: raise e def __ut2date__(self, ut): return datetime.fromtimestamp(ut).strftime('%Y-%m-%d %H:%M:%S') if ut else '0000-00-00 00:00:00' def __html2text__(self, html): pattern = re.compile(r"<.*?>") return pattern.sub('', html) def __addattribute__(self): for k, v in self.__info__.items(): self.__dict__[k] = v self.__dict__['info'] = self.__info__ class User(Zhihu): def __init__(self, uid): self.__uid__ = uid self.__anonymous__ = ['', '0', 0, None] self.__gendermap__ = { 1: "男", 0: "女", -1: "未知", } self.__info__ = { "customized_id": '', # 自定义ID "internal_id": '', # 内部ID "nickname": '', # 显示名字 "gender": self.__gendermap__[-1], # 性别 0:女 1:男 "avatar": '', # 用户头像 "headline": '', # 个人简介 "is_vip": '', # 盐选会员 "follower_count": '', # 关注者数量 "following_count": '', # 关注的人数量 "answer_count": '', # 回答数量 "question_count": '', # 提问数量 "articles_count": '', # 文章数量 "voteup_count": '', # 获得赞同数量 } self.__getinfo__() if self.__uid__ not in self.__anonymous__: self.__info__['followers'] = self.__follower__() self.__info__['followings'] = self.__following__() self.__addattribute__() def __getinfo__(self): url = f"https://www.zhihu.com/api/v4/members/{self.__uid__}" payloads = { "include": "follower_count,following_count,answer_count,question_count,articles_count,voteup_count" } if self.__uid__ in self.__anonymous__: self.__info__['nickname'] = '匿名用户' else: info = self.__request__(url, payloads) self.__info__['customized_id'] = info['url_token'] self.__info__['internal_id'] = info['id'] self.__info__['nickname'] = info['name'] self.__info__['avatar'] = info['avatar_url'] self.__info__['gender'] = self.__gendermap__[info['gender']] self.__info__['headline'] = info['headline'] self.__info__['is_vip'] = info['vip_info']['is_vip'] self.__info__['follower_count'] = info['follower_count'] self.__info__['following_count'] = info['following_count'] self.__info__['answer_count'] = info['answer_count'] self.__info__['question_count'] = info['question_count'] self.__info__['articles_count'] = info['articles_count'] self.__info__['voteup_count'] = info['voteup_count'] def __follower__(self): url = f"https://www.zhihu.com/api/v4/members/{self.info['customized_id']}/followers" offset = 0 payloads = { "limit": 1, "offset": offset, } info = self.__request__(url, payloads) is_end = info['paging']['is_end'] yield User(info['data'][0]['url_token']) while not is_end: offset += 1 payloads = { "limit": 1, "offset": offset, } info = self.__request__(url, payloads) is_end = info['paging']['is_end'] yield User(info['data'][0]['url_token']) def __following__(self): url = f"https://www.zhihu.com/api/v4/members/{self.info['customized_id']}/followees" offset = 0 payloads = { "limit": 1, "offset": offset, } info = self.__request__(url, payloads) is_end = info['paging']['is_end'] yield User(info['data'][0]['url_token']) while not is_end: offset += 1 payloads = { "limit": 1, "offset": offset, } info = self.__request__(url, payloads) is_end = info['paging']['is_end'] yield User(info['data'][0]['url_token']) def __bool__(self): return False if self.__uid__ in self.__anonymous__ else True class Answer(Zhihu): def __init__(self, aid, **kwargs): self.__info__ = { "aid": aid, "type": kwargs.get('type', ''), "author": User(kwargs.get('author', None)), "excerpt": kwargs.get('excerpt', ''), "content": kwargs.get('content', ''), "text": self.__html2text__(kwargs.get('content', '')), "comment_count": kwargs.get('comment_count', ''), "voteup_count": kwargs.get('voteup_count', ''), "created": self.__ut2date__(kwargs.get('created', None)), "updated": self.__ut2date__(kwargs.get('updated', None)), "question": kwargs.get('question', '') } if not kwargs: self.__getinfo__() self.__addattribute__() def __getinfo__(self): url = f"https://www.zhihu.com/api/v4/answers/{self.__info__['aid']}" payloads = { "include": "content,excerpt,comment_count,voteup_count" } info = self.__request__(url, payloads) self.__info__['type'] = info['answer_type'] self.__info__['author'] = User(info['author']['id']) self.__info__['excerpt'] = info['excerpt'] self.__info__['content'] = info['content'] self.__info__['text'] = self.__html2text__(info['content']) self.__info__['comment_count'] = info['comment_count'] self.__info__['voteup_count'] = info['voteup_count'] self.__info__['created'] = self.__ut2date__(info['created_time']) self.__info__['updated'] = self.__ut2date__(info['updated_time']) self.__info__['question'] = Question(info['question']['id']) def save_media(self, path=None): if not path: path = os.path.join( str(self.question.qid)+'_'+self.question.title, str(self.aid) ) shutil.rmtree(path, ignore_errors=True) os.makedirs(path) soup = BeautifulSoup(self.__info__['content'], 'lxml') noscript = soup.find_all('noscript') for imgtag in noscript: if imgtag.img.has_attr('data-original'): url = imgtag.img['data-original'] else: url = imgtag.img['src'] data = self.__download__(url) with open(os.path.join(path, os.path.basename(url)), 'wb') as f: f.write(data) class Question(Zhihu): def __init__(self, qid): self.__info__ = { 'qid': qid, 'title': None, 'detail': None, 'type': None, 'created': None, 'updated': None, 'author': None, } self.__getinfo__() self.__addattribute__() def __getinfo__(self): url = f"https://www.zhihu.com/api/v4/questions/{self.__info__['qid']}" payloads = { "include": "question.detail,author" } info = self.__request__(url, payloads) self.__info__['title'] = info['title'] self.__info__['detail'] = self.__html2text__(info['detail']) self.__info__['type'] = info['question_type'] self.__info__['created'] = self.__ut2date__(info['created']) self.__info__['updated'] = self.__ut2date__(info['updated_time']) self.__info__['author'] = User(info['author']['id']) def answers(self, sort_by='default'): url = f"https://www.zhihu.com/api/v4/questions/{self.__info__['qid']}/answers" payloads = { "include": "content,excerpt,comment_count,voteup_count", "offset": 0, "limit": 1, "sort_by": sort_by } info = self.__request__(url, payloads) is_end = info['paging']['is_end'] nexturl = info['paging']['next'] yield Answer( aid=info['data'][0]['id'], type=info['data'][0]['answer_type'], author=info['data'][0]['author']['id'], excerpt=info['data'][0]['excerpt'], content=info['data'][0]['content'], text=info['data'][0]['content'], comment_count=info['data'][0]['comment_count'], voteup_count=info['data'][0]['voteup_count'], created=info['data'][0]['created_time'], updated=info['data'][0]['updated_time'], question=Question(info['data'][0]['question']['id']) ) while not is_end: info = self.__request__(nexturl) is_end = info['paging']['is_end'] nexturl = info['paging']['next'] yield Answer( aid=info['data'][0]['id'], type=info['data'][0]['answer_type'], author=info['data'][0]['author']['id'], excerpt=info['data'][0]['excerpt'], content=info['data'][0]['content'], text=info['data'][0]['content'], comment_count=info['data'][0]['comment_count'], voteup_count=info['data'][0]['voteup_count'], created=info['data'][0]['created_time'], updated=info['data'][0]['updated_time'], question=Question(info['data'][0]['question']['id']) )
zhihu-cli
/zhihu_cli-1.2.0-py3-none-any.whl/zhihu/zhihu.py
zhihu.py
zhihu_crawler ============= 本程序支持关键词搜索、热榜、用户信息、回答、专栏文章、评论等信息的抓取 项目目录 ------- - __init__.py 为程序的对外统一入口 - constants.py 常量 - exceptions.py 自定义异常 - extractors.py 数据清洗 - page_iterators.py 简单的页面处理 - zhihu_scraper.py 页面请求、cookie设置 - zhihu_types.py 类型提示、检查。项目自定义类型 - 注意事项: 项目内有部分异步操作,在模块引用之前需要使用猴子补丁; 同时该项目没有对ip限制、登录做针对性处理 安装 ---- .. code:: bash pip install zhihu_crawler 使用 ---- .. code:: python if __name__ == '__main__': # 设置代理; 如采集量较大,建议每次请求都切换代理 set_proxy({'http': 'http://127.0.0.1:8125', 'https': 'http://127.0.0.1:8125'}) # 设置cookie set_cookie({'d_c0': 'AIBfvRMxmhSPTk1AffR--QLwm-gDM5V5scE=|1646725014'}) # 搜索采集使用案例: for info in search_crawl(key_word='天空', count=10): print(info) # 可传入data_type 指定搜索类型 for info in search_crawl(key_word='天空', count=10, data_type='answer'): print(info) # 用户信息回答列表使用案例(采集该用户信息及50条回答信息,每条回答包含50条评论): for info in user_crawler('wo-men-de-tai-kong', answer_count=50, comment_count=50 ): print(info) # 用户信息提问列表使用案例(采集该用户信息及10条问题信息,每条问题包含10条回答,每条回答包含50条评论): for info in user_crawler('wo-men-de-tai-kong', question_count=10, drill_down_count=10, comment_count=50): print(info) # 热点问题采集使用案例 # 采集 前10个问题, 每个问题采集10条回答 for info in hot_questions_crawl(question_count=10, drill_down_count=10): print(info) # 可传入period 指定热榜性质。如小时榜、日榜、周榜、月榜 # 传入domains 采集指定主题的问题 for info in hot_questions_crawl(question_count=10, period='day', domains=['1001', 1003]): print(info)
zhihu-crawler
/zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/README.rst
README.rst
BASE_URL = 'https://www.zhihu.com/' # 知乎基础api BASE_API = 'https://api.zhihu.com/' # 知乎搜索综合url SEARCH_URL = 'https://api.zhihu.com/search?t=general&q={key_word}&correction=1&offset=0&limit=20&filter_fields=&' \ 'lc_idx=0&show_all_topics=0&search_source=Filter&vertical={data_type}' # 视频请求url VIDEO_BASE_URL = 'https://lens.zhihu.com/api/v4/videos/' # 文章url ARTICLE_BASE_URL = 'https://zhuanlan.zhihu.com/p/' # 单个问题回答的接口,发布时间排序 QUESTION_ANSWERS_URL = 'https://api.zhihu.com/questions/{question_id}/answers?include=data%5B%2A%5D.is_normal%2' \ 'Cadmin_closed_comment%2Creward_info%2Cis_collapsed%2Cannotation_action%2Cannotation_detail%2' \ 'Ccollapse_reason%2Cis_sticky%2Ccollapsed_by%2Csuggest_edit%2Ccomment_count%2Ccan_comment%2' \ 'Ccontent%2Ceditable_content%2Cattachment%2Cvoteup_count%2Creshipment_settings%2' \ 'Ccomment_permission%2Ccreated_time%2Cupdated_time%2Creview_info%2Crelevant_info%2C' \ 'question%2Cexcerpt%2Cis_labeled%2Cpaid_info%2Cpaid_info_content%2Creaction_instruction%2' \ 'Crelationship.is_authorized%2Cis_author%2Cvoting%2Cis_thanked%2Cis_nothelp%2Cis_recognized%3' \ 'Bdata%5B%2A%5D.mark_infos%5B%2A%5D.url%3Bdata%5B%2A%5D.author.follower_count%2Cvip_info%2' \ 'Cbadge%5B%2A%5D.topics%3Bdata%5B%2A%5D.settings.table_of_content.enabled&' \ 'offset=0&limit=20&platform=desktop&sort_by=updated' # 用户回答的接口 时间排序 USER_ANSWERS_URL = 'https://api.zhihu.com/members/{user_id}/answers?include=data%5B*%5D.is_normal%2' \ 'Cadmin_closed_comment%2Creward_info%2Cis_collapsed%2Cannotation_action%2Cannotation_detail%2' \ 'Ccollapse_reason%2Ccollapsed_by%2Csuggest_edit%2Ccomment_count%2Ccan_comment%2Ccontent%2' \ 'Ceditable_content%2Cattachment%2Cvoteup_count%2Creshipment_settings%2Ccomment_permission%2' \ 'Cmark_infos%2Ccreated_time%2Cupdated_time%2Creview_info%2Cexcerpt%2Cis_labeled%2Clabel_info%2' \ 'Crelationship.is_authorized%2Cvoting%2Cis_author%2Cis_thanked%2Cis_nothelp%2Cis_recognized%3' \ 'Bdata%5B*%5D.vessay_info%3Bdata%5B*%5D.author.badge%5B%3F%28type%3Dbest_answerer%29%5D.topics%3' \ 'Bdata%5B*%5D.author.vip_info%3Bdata%5B*%5D.question.has_publishing_draft%2Crelationship&' \ 'offset=0&limit=20&sort_by=created' # 用户视频的接口 USER_VIDEO_URL = 'https://www.zhihu.com/api/v4/members/{user_id}/zvideos?offset=0&limit=20' # 用户文章的接口 USER_ARTICLE_URL = 'https://www.zhihu.com/api/v4/members/{user_id}/articles?include=data%5B*%5D.comment_count%2C' \ 'suggest_edit%2Cis_normal%2Cthumbnail_extra_info%2Cthumbnail%2Ccan_comment%2Ccomment_permission%2C' \ 'admin_closed_comment%2Ccontent%2Cvoteup_count%2Ccreated%2Cupdated%2Cupvoted_followees%2Cvoting%2C' \ 'review_info%2Cis_labeled%2Clabel_info%3Bdata%5B*%5D.vessay_info%3Bdata%5B*%5D.author.badge' \ '%5B%3F%28type%3Dbest_answerer%29%5D.topics%3Bdata%5B*%5D.author.vip_info%3B&' \ 'offset=0&limit=20&sort_by=created' # 用户问题列表接口 USER_QUESTION_URL = 'https://api.zhihu.com/members/{user_id}/questions?include=data%5B*%5D.created%2' \ 'Canswer_count%2Cfollower_count%2Cauthor%2Cadmin_closed_comment&offset=0&limit=20' # 用户想法列表接口 USER_PINS_URL = 'https://www.zhihu.com/api/v4/members/{user_id}/pins?offset=0&limit=20&includes=' \ 'data%5B*%5D.upvoted_followees%2Cadmin_closed_comment' # 用户专栏列表接口 USER_COLUMN_URL = 'https://api.zhihu.com/members/{user_id}/column-contributions?include=data%5B*%5D.' \ 'column.intro%2Cfollowers%2Carticles_count%2Cvoteup_count%2Citems_count&offset=0&limit=20' # 专栏文章列表接口 COLUMN_ITEMS_URL = 'https://api.zhihu.com/columns/{column_id}/items?offset=0&limit=20' # 用户所关注的人列表接口 USER_FOLLOWEE_URL = 'https://www.zhihu.com/api/v4/members/{user_id}/followees?include=data%5B*%5D.answer_count%2Carticles_count%2Cgender%2Cfollower_count%2Cis_followed%2Cis_following%2Cbadge%5B%3F%28type%3Dbest_answerer%29%5D.topics&offset=0&limit=20' # 关注该账号的人列表的接口 USER_FOLLOWERS_URL = 'https://www.zhihu.com/api/v4/members/{user_id}/followers?include=data%5B*%5D.answer_count%2Carticles_count%2Cgender%2Cfollower_count%2Cis_followed%2Cis_following%2Cbadge%5B%3F%28type%3Dbest_answerer%29%5D.topics&offset=0&limit=20' # 用户关注的专栏列表接口 USER_FOLLOWING_COLUMNS_URL = 'https://www.zhihu.com/api/v4/members/{user_id}/following-columns?include=data%5B*%5D.intro%2Cfollowers%2Carticles_count%2Cvoteup_count%2Citems_count&offset=0&limit=20' # 用户关注的话题列表接口 USER_FOLLOWING_TOPICS_URL = 'https://www.zhihu.com/api/v4/members/{user_id}/following-topic-contributions?include=data%5B*%5D.topic.introduction&offset=0&limit=20' # 用户关注的问题的列表接口 USER_FOLLOWING_QUESTIONS_URL = 'https://www.zhihu.com/api/v4/members/{user_id}/following-questions?include=data%5B*%5D.created%2Canswer_count%2Cfollower_count%2Cauthor&offset=0&limit=20' # 单条回复url ANSWER_BASE_URL = 'https://www.zhihu.com/question/{question_id}/answer/{answer_id}' # 热搜 TOP_SEARCH_URL = 'https://www.zhihu.com/api/v4/search/top_search' # 话题 TOPIC_BASE_URL = 'https://www.zhihu.com/topic/' # 评论url COMMENT_URL = 'https://api.zhihu.com/{data_type}/{id}/comments?order=reverse&offset=0&limit=20&status=open' # 话题下内容列表接口 TOPIC_FEEDS_URL = 'https://api.zhihu.com/topics/{topic_id}/feeds/timeline_activity?offset=0&limit=20' # x-zse-93 X_ZSE_93 = '101_3_2.0' # 类型 ANSWER = 'answer' # 问答 VIDEO_ANSWER = 'videoanswer' # 视频类问答 VIDEO = 'zvideo' # 视频 ARTICLE = 'article' # 文章 GENERAL = 'general' # 杂志文章 需要付费 QUESTION = 'question' # 问题 TOPIC = 'topic' # 话题 USER = 'user' # 账号 WIKI_BOX = 'wiki_box' # 协程数 ASYNC_COUNT = 5 # 翻页数 DEFAULT_PAGE_LIMIT = 5 # 采集评论数 DEFAULT_COMMENT_COUNT = 50 # 默认请求休眠时间 DEFAULT_REQUESTS_TIMEOUT = 5 # -------- 搜索过滤条件 ---——- #
zhihu-crawler
/zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/zhihu_crawler/constants.py
constants.py
import json from .zhihu_types import * from .constants import * from loguru import logger import re from urllib.parse import urljoin, quote def iter_search_pages(key_word: str, request_fn: RequestFunction, **kwargs) -> Iterator[Page]: """ 搜索 :return: """ start_url = kwargs.pop('start_url', None) if not start_url: data_type = kwargs.pop('data_type', '') sort = kwargs.pop('sort', None) time_interval = kwargs.pop('time_interval', None) key_word = quote(key_word) start_url = SEARCH_URL.format(key_word=key_word, data_type=data_type) if sort: start_url = start_url + f'&sort={sort}' if time_interval: start_url = start_url + f'&time_interval={time_interval}' return generic_iter_pages(start_url, PageParser, request_fn, **kwargs) def iter_question_pages(question_id: str, request_fn: RequestFunction, **kwargs) -> Iterator[Page]: start_url = kwargs.pop('start_url', None) pubdate_sort = kwargs.pop('pubdate_sort', False) if not start_url: start_url = QUESTION_ANSWERS_URL.format(question_id=question_id) if not pubdate_sort: start_url = start_url.replace('&sort_by=updated', '&sort_by=default') return generic_iter_pages(start_url, QuestionPageParser, request_fn, **kwargs) def iter_article_pages(article_id: str, request_fn: RequestFunction, **kwargs) -> Iterator[Page]: start_url = kwargs.pop('start_url', None) if not start_url: start_url = urljoin(BASE_API, f'articles/{article_id}') return generic_iter_pages(start_url, ArticlePageParser, request_fn, **kwargs) def iter_video_pages(video_id: str, request_fn: RequestFunction, **kwargs) -> Iterator[Page]: start_url = kwargs.pop('start_url', None) if not start_url: start_url = urljoin(BASE_API, f'zvideos/{video_id}') return generic_iter_pages(start_url, VideoPageParser, request_fn, **kwargs) def iter_user_pages(user_id: str, request_fn: RequestFunction, **kwargs) -> Iterator[Page]: start_url = kwargs.pop('start_url', None) if not start_url: start_url = urljoin(BASE_API, f'people/{user_id}') return generic_iter_pages(start_url, UserPageParser, request_fn, **kwargs) def iter_hot_list_pages(request_fn: RequestFunction, **kwargs) -> Iterator[Page]: start_url = kwargs.pop('start_url', None) if not start_url: start_url = urljoin(BASE_API, 'topstory/hot-lists/total?limit=50') return generic_iter_pages(start_url, HotListPageParser, request_fn, **kwargs) def iter_hot_question_pages(domain: str, request_fn: RequestFunction, **kwargs) -> Iterator[Page]: start_url = kwargs.pop('start_url', None) period = kwargs.pop('period', '') question_count = kwargs.pop('question_count', 200) question_count = 200 if question_count > 200 else question_count if not start_url: start_url = BASE_API + f'creators/rank/hot?domain={domain}&limit={question_count}&offset=0&period={period}' return generic_iter_pages(start_url, HotQuestionPageParser, request_fn, **kwargs) def generic_iter_pages(start_url, page_parser_cls, request_fn, **kwargs) -> Iterator[Page]: next_url = start_url response = None while next_url: try: response = request_fn(next_url, **kwargs) except Exception as e: logger.error(f'error: {e}') parser = page_parser_cls(response) page = parser.get_pages() yield page next_page_info = parser.get_next_page() if not next_page_info.get('is_end'): next_url = next_page_info.get('next_url') logger.warning(f'request next url {next_url}') else: logger.warning('last page') return class PageParser: """ json数据清洗 """ json_prefix = 'js-initialData' json_regex = re.compile(r'id="js-initialData".*>(\{"initialState.*subAppName.*?})</script>') def __init__(self, response): self.response = response self.html = None self.json_data = None self._parse() def _parse(self): jsons = [] assert self.response is not None, 'response is null' if self.json_prefix in self.response.text: jsons = self.json_regex.findall(self.response.text) self.json_data = json.loads(self.response.text) if not jsons else json.loads(jsons[0]) self.html = self.response.html def get_raw_page(self) -> RawPage: return self.html def get_next_page(self) -> Dict[str, Union[str, bool]]: assert self.json_data is not None, 'json_data is null' is_end = self.json_data.get('paging', {}).get('is_end', '') return {'next_url': self.json_data.get('paging', {}).get('next'), 'is_end': is_end} def get_pages(self) -> Page: assert self.json_data is not None, 'json_data is null' data_list = self.json_data.get('data', []) if not data_list: data_list = [self.json_data] assert data_list is not None, 'data_list is null' return data_list class HotListPageParser(PageParser): def get_pages(self) -> Page: assert self.json_data is not None, 'json_data is null' data_list = self.json_data.get('data', []) answers = [] for data in data_list: if data: target = data.get('target') or {} target['heat_text'] = data.get('detail_text', '') answers.append(target) del data_list return answers class HotQuestionPageParser(PageParser): """ 热点问题json数据清洗 """ def get_pages(self) -> Page: assert self.json_data is not None, 'json_data is null' data_list = self.json_data.get('data', []) questions = [] for data in data_list: question_info = {} question_info.update(data.get('question', {})) question_info['reaction'] = data.get('reaction', {}) questions.append(question_info) del data_list return questions class QuestionPageParser(PageParser): """ 问题json数据清洗 """ pass class VideoPageParser(PageParser): """ 视频json数据清洗 """ pass class ArticlePageParser(PageParser): """ 文章json数据清洗 """ pass class UserPageParser(PageParser): """ 用户json数据清洗 """ pass class UserAnswerPageParser(PageParser): """ 用户的回答json数据清洗 """ pass class UserVideoPageParser(PageParser): """ 用户的视频json数据清洗 """ pass class UserArticlePageParser(PageParser): """ 用户的文章json数据清洗 """ pass
zhihu-crawler
/zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/zhihu_crawler/page_iterators.py
page_iterators.py
from .zhihu_types import * from .constants import * from requests_html import HTML from loguru import logger import re import copy import json from ..utils import extract_time, generating_page_links from urllib.parse import urljoin def extract_data(raw_html, options: Options, request_fn: RequestFunction, full_html=None) -> Union[QuestionType, AnswerType, ArticleType]: return BaseExtractor(raw_html, options, request_fn, full_html).extract_data() def extract_user(raw_html, options: Options, request_fn: RequestFunction, full_html=None) -> UserType: return UserExtractor(raw_html, options, request_fn, full_html).extract_data() def extract_question_data(raw_html, options: Options, request_fn: RequestFunction, full_html=None) -> QuestionType: return QuestionExtractor(raw_html, options, request_fn, full_html).extract_data() def init_question_reaction(): """ 问题的互动量 """ return { "new_pv": 0, # 浏览增量 "new_pv_7_days": 0, # 浏览7日增量 "new_follow_num": 0, # 关注增量 "new_follow_num_7_days": 0, # 关注7日增量 "new_answer_num": 0, # 回答增量 "new_answer_num_7_days": 0, # 回答7日增量 "new_upvote_num": 0, # 点赞增量 "new_upvote_num_7_days": 0, # 点赞7日增量 "pv": 0, # 总浏览量 "follow_num": 0, # 总关注量 "answer_num": 0, # 总回答量 "upvote_num": 0, # 总点赞量 "new_pv_yesterday": 0, "new_pv_t_yesterday": 0, "score": 0, # 热力值 "score_level": 0 # 热力等级 } class BaseExtractor: video_regex = re.compile(r'play_url\".*\"(http.*\.mp4\?.*=hw?)"') ele_regex = re.compile(r'<.*</.*>') def __init__(self, element, options, request_fn, full_html=None): self.element = element self.options = options self.request_fn = request_fn self._type = None self._content_html = None self.info = {} # 详情页 self._detail_response = None @property def type(self): if self._type is not None: return self._type self._type = self.element.get('type', '') return self._type def content_html(self, content=None): if self._content_html is not None: return self._content_html html = content if content else self.element.get('content', '') self._content_html = HTML(html=html) return self._content_html def detail_response(self): """ 详情页请求 :param: :return: """ if self._detail_response is not None: return self._detail_response self._detail_response = self.request_fn(self.info.get('source_url')) return self._detail_response def extract_data(self): """ 数据清洗入口 :return: """ if self.element.get('tab_type', '') == GENERAL: """ 杂志文章需要付费, 跳过清洗 """ return {} methods = [ self.extract_id, self.extract_video_id, self.extract_title, self.extract_content, self.extract_pub_time, self.extract_edit_time, self.extract_question, self.extract_url, self.extract_pictures, self.extract_video_url, self.extract_author, self.extract_labels, self.extract_up_count, self.extract_appreciate_count, self.extract_comment_count, self.extract_title_pictures, self.extract_title_description, self.extract_like_count, self.extract_play_count ] for method in methods: try: partial_info = method() if partial_info is None: continue # logger.warning(f'method: {method.__name__} return: {partial_info}') self.info.update(partial_info) except Exception as ex: logger.debug(f'method: {method.__name__} error:{ex}') temp_info = copy.deepcopy(self.info) # 问题只保留问题相关内容 for key in temp_info.keys(): if self.type == QUESTION: pop_info = self.info.pop(key) if key in ('author_info', 'content', 'pictures'): # 将问题的user信息和内容信息移至question_info下 self.info[key] = pop_info if key == 'question_info': self.info.update(pop_info) # ********* 如果是问题采集回答 ********* # total_count = self.info.get('question_answer_count') question_id = self.info.get('question_id', '') answer_count = self.options.get('drill_down_count') if self._type == QUESTION and answer_count > -1 and total_count > 0: total_count = answer_count if 0 < answer_count < total_count else total_count url = QUESTION_ANSWERS_URL.format(question_id=question_id) self.extract_meta_data(url, type_name='answers', total_count=total_count) self.info['type'] = self.type # 采集评论: self.info.update(self.extract_comments()) del temp_info return self.info def extract_id(self) -> PartialType: data_id = self.element.get('id', '') if not data_id: data_id = self.element.get('zvideo_id', '') return { f'{self.type}_id': data_id } def extract_video_id(self) -> PartialType: """ 清洗视频id """ video_id = self.element.get('attachment', {}).get('attachment_id', '') if not video_id: video_id = self.element.get('video_id', '') if not video_id: video_id = self.element.get('video', {}).get('video_id', '') return { 'video_id': video_id } def extract_title(self) -> PartialType: """ 标题 """ title = self.element.get('title', '') if not title: question = self.element.get('question', {}) title = question.get('title', '') or question.get('name', '') if not title: title = self.element.get('attachment', {}).get('video', {}).get('title', '') title = re.sub('<em>|</em>', '', title) return { 'title': title } def extract_content(self, content=None) -> PartialType: content = self.element.get('content', '') if content is None else content if content and self.ele_regex.findall(content): contents = [] for ele in self.content_html(content).find('p,h1,h2,h3,h4,h5,h6,pre>code,li'): if ele and ele.text: if ele.text not in contents: contents.append(ele.text) content = '\n'.join(contents) content = re.sub(r'<.*>', '', content) return { 'content': content } def extract_pub_time(self) -> PartialType: """ 编辑时间 :return: """ return { 'pub_time': extract_time(self.element).get('pub_time') } def extract_edit_time(self) -> PartialType: """ 编辑时间 :return: """ return { 'edit_time': extract_time(self.element).get('edit_time') } def extract_question(self) -> PartialType: """ 清洗问题信息 :return: """ question_info = self.element if self.type == QUESTION else self.element.get('question', {}) question_id = question_info.get('id', '') question_title = question_info.get('title', '') or question_info.get('name', '') question_reaction = init_question_reaction() question_reaction.update(question_info.get('reaction', {})) answers = question_reaction.get('answer_num') return { 'question_info': { 'question_id': question_id, 'created_time': extract_time(question_info).get('pub_time'), 'question_title': question_title.replace('<em>', '').replace('</em>', ''), 'question_type': question_info.get('type', '') or question_info.get('question_type', ''), 'edit_time': extract_time(question_info).get('edit_time'), 'question_url': BASE_URL + f'question/{question_id}' if question_id else '', 'question_follower_count': question_info.get('follower_count', 0) or question_info.get('followerCount', 0), 'comment_count': question_info.get('comment_count', 0) or question_info.get('commentCount', 0), 'question_answer_count': question_info.get('answer_count', 0) or question_info.get('answerCount', 0) or answers, 'question_visits_count': question_info.get('visits_count', 0) or question_info.get('visitCount', 0), 'question_collapsed_count': question_info.get('collapsedAnswerCount', 0), 'question_up_count': question_info.get('voteupCount', 0), 'question_description': question_info.get('description', ''), 'question_labels': [topic_info.get('name') for topic_info in question_info.get('topics', []) if topic_info], 'question_reaction': question_reaction } } def extract_url(self) -> PartialType: """ url 清洗 """ url = self.element.get('url', '') if ANSWER in url or self.type == VIDEO_ANSWER: question_id = self.element.get('question', {}).get('id', '') url = urljoin(BASE_URL, f"question/{question_id}/answer/{self.info.get(f'{self.type}_id', '')}") if not url: url = self.element.get('video_url', '') url = re.sub('.*/articles/', ARTICLE_BASE_URL, url).replace('api/v4/zvideos', 'zvideo') return { 'source_url': url } def extract_pictures(self) -> PartialType: """ 图片清洗 """ pic = '' if self._content_html: pic = '#'.join([img.attrs.get('src') for img in self._content_html.find('img') if img and img.attrs.get('src', '').startswith('http')]) if not pic: pic = self.element.get('cover_url', '') or self.element.get('thumbnail', '') if not pic: pic = self.element.get('image_url', '') return { 'pictures': pic } def extract_video_url(self) -> PartialType: """ 视频链接 """ video_url = '' if self.type in (VIDEO, VIDEO_ANSWER) and self.info.get('video_id'): response = self.request_fn(VIDEO_BASE_URL + f'{self.info.get("video_id")}') if response: result = self.video_regex.findall(response.text) video_url = result[0] if result else '' return {'video_url': video_url} def extract_author(self, author_info=None) -> PartialType: """ 用户信息 """ author_info = self.element.get('author', {}) if not author_info else author_info user_url_token = author_info.get('url_token', '') or author_info.get('urlToken', '') user_type = author_info.get('type', '') badge = author_info.get('badge', []) auth_infos = [badge_dict.get('description') for badge_dict in badge if badge_dict] return { 'author_info': { 'user_name': author_info.get('name', '').replace('<em>', '').replace('</em>', ''), 'user_url_token': user_url_token, 'user_id': author_info.get('id', ''), 'user_url': urljoin(BASE_URL, f'{user_type}/{user_url_token}') if user_url_token else '', 'user_headline': author_info.get('headline', ''), 'user_avatar_url': author_info.get('avatar_url', '') or author_info.get('avatarUrl', ''), 'user_is_vip': author_info.get('vip_info', {}).get('is_vip', '') or author_info.get('vipInfo', {}).get( 'isVip', False), 'user_follower_count': author_info.get('follower_count', 0) or author_info.get('followerCount', 0), 'user_up_count': author_info.get('voteup_count', 0) or author_info.get('voteupCount', 0), 'user_auth_infos': auth_infos, } } def extract_up_count(self) -> PartialType: """ 赞同数 """ return { 'up_count': self.element.get('voteup_count', 0) } def extract_appreciate_count(self) -> PartialType: """ 赞赏数 """ return { 'appreciate_count': self.element.get('reward_info', {}).get('reward_member_count', 0) } def extract_comment_count(self) -> PartialType: """ 评论数 """ return { 'comment_count': self.element.get('comment_count', 0) or self.element.get('commentCount', 0) } def extract_labels(self) -> PartialType: """ 详情页的 标签 :return: """ labels = [topic.get('name', '') for topic in self.element.get('topics', []) if topic] if not labels: if self._detail_response is None: self.detail_response() label_ele = self._detail_response.html.find('meta[name="keywords"]', first=True) labels = label_ele.attrs.get('content').split(',') if label_ele else [] return {'labels': labels} def extract_title_pictures(self) -> PartialType: img_str = '#'.join([self.element.get('titleImage', '') or self.element.get('title_image', '')]) if not img_str and self._detail_response: images = self._detail_response.html.find('div.QuestionHeader-detail img') img_str = '#'.join(img.attrs.get('src') for img in images if img) return {'title_pictures': img_str} def extract_title_description(self) -> PartialType: """ 详情页的标题描述 :return: """ desc = self.element.get('excerpt', '') if self._detail_response and not desc: divs = self._detail_response.html.find('div.QuestionHeader-detail') desc = ''.join([div.text.replace('显示全部', '').replace('\u200b', '') for div in divs if div]) desc = re.sub(r'<.*>|<em>|</em>', '', desc) return {'title_description': desc} def extract_relevant_query(self) -> PartialType: """ 清洗相关搜索关键词列表 :return: """ query_list = self.element.get('query_list', []) return { 'relevant_query': [query_dict.get('query' '') for query_dict in query_list if isinstance(query_dict, dict)] } def extract_play_count(self) -> PartialType: """ 视频播放量 :return: """ return { 'play_count': self.element.get('play_count', 0) or self.element.get('playCount', 0) } def extract_like_count(self) -> PartialType: return { 'like_count': self.element.get('liked_count', 0) } def extract_comments(self, comment_count=0, comment_url=None) -> CommentType: """ 评论数据采集. 理论上该部分应该独立采集 :return: """ comments = [] comment_count = comment_count if comment_count else self.info.get('comment_count') count = self.options.get('comment_count') if count == -1 or comment_count == 0: return {'comments': comments} comment_count = comment_count if count == 0 and comment_count > count else count data_type = self.type.replace(VIDEO_ANSWER, ANSWER) if comment_url is None: comment_url = COMMENT_URL.format(data_type=f'{data_type}s', id=self.info.get(f'{self.type}_id')) page_urls = generating_page_links(comment_url, total_num=comment_count) logger.info(f'start request links: {page_urls}') for responses in self.request_fn(page_urls): for response in responses: if response is None or (response and response.status_code != 200): continue response_json = json.loads(response.text) or {} comment_infos = response_json.get('data', []) for comment_info in comment_infos: if comment_count <= len(comments): return {'comments': comments} comment_content = comment_info.get('content', '') if '<' in comment_content and '>' in comment_content: comment_content = HTML(html=comment_content) comment_content = ''.join([ele.text for ele in comment_content.find('p,a') if ele and ele.text]) info = { 'comment_id': comment_info.get('id', ''), 'comment_content': comment_content, 'comment_pub_time': comment_info.get('created_time', 0), 'comment_vote_count': comment_info.get('vote_count', 0), 'author_info': {}, } reply_to_author = self.extract_author(comment_info.get('reply_to_author', {}).get('member', {})) info['reply_to_author'] = reply_to_author.get('author_info', {}) info.update(self.extract_author(comment_info.get('author', {}).get('member', {}))) comments.append(info) if response_json.get('paging', {}).get('is_end', False): break return {'comments': comments} def extract_meta_data(self, start_url, type_name, **kwargs) -> PartialType: """ 获取账号的回答、文章、视频、专栏、提问等数据 :param start_url: 请求数据的接口 :param type_name: 请求数据类型名称 @ headers 是否需要加密参数 :return: 返回对应数据集合 """ total_count = kwargs.get('total_count', 0) if total_count == 0: return page_urls = generating_page_links(start_url, total_count) data_list = [] for responses in self.request_fn(page_urls, **kwargs): for response in responses: if response is None or (response and response.status_code != 200): continue response_json = json.loads(response.text) if response else {} infos = response_json.get('data', []) for info in infos: if total_count <= len(data_list): return { type_name: data_list } extractor = None if type_name in ('questions', 'following_questions'): extractor = UserQuestionExtractor(info, self.options, self.request_fn, full_html=None) elif type_name == 'pins': extractor = UserPinExtractor(info, self.options, self.request_fn, full_html=None) elif type_name in ('columns', 'following_columns'): extractor = UserColumnExtractor(info, self.options, self.request_fn, full_html=None) elif type_name in ('following', 'followers'): extractor = UserFriendExtractor(info, self.options, self.request_fn, full_html=None) elif type_name == 'following_topics': extractor = UserFollowingTopicExtractor(info, self.options, self.request_fn, full_html=None) else: extractor = BaseExtractor(info, self.options, self.request_fn, full_html=None) result = extractor.extract_data() data_list.append(result) is_end = response_json.get('paging', {}).get('is_end', False) if not infos or is_end: break return {type_name: data_list} class QuestionExtractor(BaseExtractor): """ 热榜问题 """ def extract_data(self): question_info = self.extract_question().get('question_info') or {} count = self.options.get('drill_down_count') total_count = question_info.get('question_answer_count') if count > -1 and total_count > 0: total_count = count if 0 < count < total_count else total_count url = QUESTION_ANSWERS_URL.format(question_id=question_info.get('question_id', '')) result = self.extract_meta_data(start_url=url, total_count=total_count, type_name='answers') or {} question_info.update(result) return question_info class UserExtractor(BaseExtractor): """ 用户信息 """ def extract_data(self): methods = [ self.extract_user ] for method in methods: try: partial_info = method() if partial_info is None: continue logger.warning(f'method: {method.__name__} return: {partial_info}') self.info.update(partial_info) except Exception as ex: logger.debug(f'method: {method.__name__} error:{ex}') user_id = self.info.get('user_url_token', '') # ********* 用户回答列表采集 ********* # total_count = self.info['user_answer_count'] count = self.options.get('answer_count') sort = self.options.get('sort', '') if count > -1 and total_count > 0: start_url = USER_ANSWERS_URL.format(user_id=user_id) if sort == 'included': start_url = re.sub(r'/answers\?', '/marked-answers?', start_url) else: start_url = re.sub('sort_by=created', f'sort_by={sort}', start_url) total_count = count if 0 < count < total_count else total_count result = self.extract_meta_data(start_url=start_url, type_name='answers', total_count=total_count) or {} self.info.update(result) # ********* 用户视频列表采集 ********* # total_count = self.info['user_zvideo_count'] count = self.options.get('zvideo_count') if count > -1 and total_count > 0: total_count = count if 0 < count < total_count else total_count start_url = USER_VIDEO_URL.format(user_id=user_id) result = self.extract_meta_data(start_url=start_url, type_name='zvideos', total_count=total_count) or {} self.info.update(result) # ********* 用户文章列表采集 ********* # total_count = self.info['user_articles_count'] count = self.options.get('article_count') if count > -1 and total_count > 0: total_count = count if 0 < count < total_count else total_count start_url = USER_ARTICLE_URL.format(user_id=user_id) if sort == 'included': start_url = start_url.replace('/articles?', '/included-articles?').\ replace('sort_by=created', 'sort_by=included') else: start_url = re.sub('sort_by=created', f'sort_by={sort}', start_url) result = self.extract_meta_data(start_url=start_url, type_name='articles', x_zse_96=True, total_count=total_count) or {} self.info.update(result) # ********* 用户提问列表采集 ********* # total_count = self.info['user_question_count'] count = self.options.get('question_count') if count > -1 and total_count > 0: total_count = count if 0 < count < total_count else total_count start_url = USER_QUESTION_URL.format(user_id=user_id) result = self.extract_meta_data(start_url=start_url, type_name='questions', total_count=total_count) or {} self.info.update(result) # ********* 用户想法列表采集 ********* # total_count = self.info['user_pins_count'] count = self.options.get('pin_count') if count > -1 and total_count > 0: total_count = count if 0 < count < total_count else total_count start_url = USER_PINS_URL.format(user_id=user_id) result = self.extract_meta_data(start_url=start_url, type_name='pins', x_zse_96=True, total_count=total_count) or {} self.info.update(result) # ********* 用户专栏列表采集 ********* # total_count = self.info['user_columns_count'] count = self.options.get('column_count') if count > -1 and total_count > 0: start_url = USER_COLUMN_URL.format(user_id=user_id) total_count = count if 0 < count < total_count else total_count result = self.extract_meta_data(start_url=start_url, type_name='columns', total_count=total_count) or {} self.info.update(result) # ********* 用户关注的人列表采集 ********* # total_count = self.info['user_following_count'] count = self.options.get('following') if count > -1 and total_count > 0: start_url = USER_FOLLOWEE_URL.format(user_id=user_id) total_count = count if 0 < count < total_count else total_count result = self.extract_meta_data(start_url=start_url, type_name='following', total_count=total_count, x_zse_96=True) or {} self.info.update(result) # ********* 关注该用户的人列表采集 ********* # total_count = self.info['user_follower_count'] count = self.options.get('followers') if count > -1 and total_count > 0: start_url = USER_FOLLOWERS_URL.format(user_id=user_id) total_count = count if 0 < count < total_count else total_count result = self.extract_meta_data(start_url=start_url, type_name='followers', total_count=total_count, x_zse_96=True) or {} self.info.update(result) # ********* 用户关注的专栏列表采集 ********* # total_count = self.info['user_following_columns_count'] count = self.options.get('following_columns') if count > -1 and total_count > 0: start_url = USER_FOLLOWING_COLUMNS_URL.format(user_id=user_id) total_count = count if 0 < count < total_count else total_count result = self.extract_meta_data(start_url=start_url, type_name='following_columns', total_count=total_count, x_zse_96=True) or {} self.info.update(result) # ********* 用户关注的问题列表采集 ********* # total_count = self.info['user_following_question_count'] count = self.options.get('following_questions') if count > -1 and total_count > 0: start_url = USER_FOLLOWING_QUESTIONS_URL.format(user_id=user_id) total_count = count if 0 < count < total_count else total_count result = self.extract_meta_data(start_url=start_url, type_name='following_questions', total_count=total_count, x_zse_96=True) or {} self.info.update(result) # ********* 用户关注的话题列表采集 ********* # total_count = self.info['user_following_topic_count'] count = self.options.get('following_topics') if count > -1 and total_count > 0: start_url = USER_FOLLOWING_TOPICS_URL.format(user_id=user_id) total_count = count if 0 < count < total_count else total_count result = self.extract_meta_data(start_url=start_url, type_name='following_topics', total_count=total_count, x_zse_96=True) or {} self.info.update(result) return self.info def extract_user(self) -> Dict[str, Dict[str, Union[List, int, str]]]: badges = self.element.get('badge', []) return { 'user_id': self.element.get('id', ''), 'user_name': self.element.get('name'), 'user_url_token': self.element.get('url_token', ''), 'user_head_img': self.element.get('avatar_url', ''), 'user_is_org': self.element.get('is_org', False), 'user_headline': self.element.get('headline', ''), 'user_type': self.element.get('type', ''), 'user_is_active': self.element.get('is_active', True), 'user_description': re.sub(r'<.*>|</.*>', '', self.element.get('description', '')), 'user_is_advertiser': self.element.get('is_advertiser', False), 'user_is_vip': self.element.get('vip_info', {}).get('is_vip', False), 'user_badges': [badge.get('description', '') for badge in badges if badge and isinstance(badge, dict)], 'user_follower_count': self.element.get('follower_count', 0), 'user_following_count': self.element.get('following_count', 0), 'user_answer_count': self.element.get('answer_count', 0), 'user_question_count': self.element.get('question_count', 0), 'user_articles_count': self.element.get('articles_count', 0), 'user_columns_count': self.element.get('columns_count', 0), 'user_zvideo_count': self.element.get('zvideo_count', 0), 'user_pins_count': self.element.get('pins_count', 0), 'user_favorite_count': self.element.get('favorite_count', 0), # 用户创建的收藏夹的数量 'user_favorited_count': self.element.get('favorited_count', 0), # 获得其余用户的收藏数 'user_reactions_count': self.element.get('reactions_count', 0), 'user_shared_count': self.element.get('shared_count', 0), 'user_voteup_count': self.element.get('voteup_count', 0), # 获得点赞数 'user_thanked_count': self.element.get('thanked_count', 0), # 获得喜欢数 'user_following_columns_count': self.element.get('following_columns_count', 0), # 关注的专栏数 'user_following_topic_count': self.element.get('following_topic_count', 0), # 关注的话题 'user_following_question_count': self.element.get('following_question_count', 0), # 关注的问题 'user_following_favlists_count': self.element.get('following_favlists_count', 0), 'user_participated_live_count': self.element.get('participated_live_count', 0), # 参与的直播数 'user_included_answers_count': self.element.get('included_answers_count', 0), # 知乎收录的回答 'user_included_articles_count': self.element.get('included_articles_count', 0), # 知乎收录的文章 'user_recognized_count': self.element.get('recognized_count', 0), # 获得专业认可数 'user_cover_url': self.element.get('cover_url', ''), 'user_org_name': self.element.get('org_detail', {}).get('organization_name', ''), 'user_org_industry': self.element.get('org_detail', {}).get('industry', ''), 'user_org_url': self.element.get('home_page', ''), 'user_org_lic_code': self.element.get('business_lic_code', '') or self.element.get('org_detail', {}).get('social_credit_code', '') } class UserQuestionExtractor(UserExtractor): """ 用户问题 """ def extract_data(self): question_info = self.extract_question().get('question_info', {}) count = self.options.get('drill_down_count') total_count = question_info.get('question_answer_count') if total_count > 0 and count > -1: total_count = count if 0 < count < total_count else total_count url = QUESTION_ANSWERS_URL.format(question_id=question_info.get('question_id', '')) answer_info = self.extract_meta_data(url, type_name='answers', total_count=total_count) question_info.update(answer_info) return question_info class UserPinExtractor(UserExtractor): """ 用户想法 """ def extract_data(self): methods = [ self.extract_author, self.extract_pin, self.extract_pin_content, self.extract_pin_pictures, ] for method in methods: try: partial_info = method() if partial_info is None: continue logger.warning(f'method: {method.__name__} return: {partial_info}') self.info.update(partial_info) except Exception as ex: logger.debug(f'method: {method.__name__} error:{ex}') # ********* 采集想法的评论 ********* # self.info.update(self.extract_comments()) return self.info def extract_pin(self) -> Dict[str, Union[str, int]]: return { 'pin_title': self.element.get('excerpt_title', ''), 'pin_type': self.element.get('type', ''), 'pin_reaction_count': self.element.get('reaction_count', 0), 'pin_like_count': self.element.get('like_count', 0), 'pin_id': self.element.get('id', ''), 'pin_edit_time': extract_time(self.element).get('pub_time', ''), 'pin_pub_time': extract_time(self.element).get('edit_time', ''), 'pin_source_url': urljoin(BASE_URL, self.element.get('url', '')), 'comment_count': self.element.get('comment_count', 0), } def extract_pin_content(self) -> Dict[str, str]: """ 用户想法的内容 """ content = ''.join([content.get('content', '') for content in self.element.get('content', []) if content and content.get('content', '')]) content = re.sub(r'<.*>', '', content) return self.extract_content(content) def extract_pin_pictures(self) -> Dict[str, str]: """ 用户想法的图片 """ pic = '#'.join([pic.get('url', '') for pic in self.element.get('content', []) if pic and pic.get('url', '')]) return { 'pin_pictures': pic } class UserColumnExtractor(UserExtractor): """ 用户专栏 """ def extract_data(self): column = self.element.get('column', {}) or self.element author = self.extract_author(column.get('author', {})) column_info = { 'column_title': column.get('title', ''), 'column_url': column.get('url', '').replace('api.', '').replace('columns', 'column'), 'column_image_url': column.get('image_url', ''), 'column_edit_time': column.get('updated', ''), 'column_followers': column.get('followers', 0), 'column_articles_count': column.get('articles_count', 0), 'column_intro': column.get('intro', ''), 'column_id': column.get('id', ''), 'column_voteup_count': column.get('voteup_count', 0), 'column_all_count': column.get('items_count', 0), 'column_author': author } total_count = column_info['column_all_count'] count = self.options.get('drill_down_count') if count > -1 and total_count > 0: total_count = count if 0 < count < total_count else total_count url = COLUMN_ITEMS_URL.format(column_id=column_info.get('column_id', '')) items = self.extract_meta_data(start_url=url, type_name='column_articles', total_count=total_count) or {} column_info.update(items) return column_info class UserFriendExtractor(UserExtractor): """ 用户关注人或粉丝 """ def extract_data(self): return self.extract_user() class UserFollowingTopicExtractor(UserExtractor): """ 用户关注的话题 """ def extract_data(self): topic = self.extract_topic() topic['feeds'] = self.extract_feed(topic) return topic def extract_topic(self): topic = self.element.get('topic', {}) or self.element or {} topic_api = topic.get('url', '') response = self.request_fn(topic_api) if response and response.status_code == 200: topic = json.loads(response.text) return { 'topic_id': topic.get('id', ''), 'topic_name': topic.get('name', ''), 'topic_url': topic.get('url', '').replace('api/v4/topics', 'topic'), 'topic_avatar_url': topic.get('avatar_url', ''), 'topic_excerpt': topic.get('excerpt', ''), 'topic_followers_count': topic.get('followers_count', 0), 'topic_introduction': topic.get('introduction', ''), 'topic_father_count': topic.get('father_count', 0), 'topic_feed_count': topic.get('questions_count', 0), 'topic_unanswered_count': topic.get('unanswered_count', 0), 'is_super_topic': topic.get('is_super_topic_vote', False), 'is_vote': topic.get('is_vote', False), 'is_black': topic.get('is_black', False) } def extract_feed(self, topic): topic_feed_count = topic.get('topic_feed_count') topic_id = topic.get('topic_id') count = self.options.get('drill_down_count') # ********* 采集话题下的feed ********* # if count > -1 and topic_feed_count > 0: topic_feed_count = count if 0 < count < topic_feed_count else topic_feed_count url = TOPIC_FEEDS_URL.format(topic_id=topic_id) page_urls = generating_page_links(url, total_num=topic_feed_count) feeds = [] for responses in self.request_fn(page_urls): for response in responses: if not response or response.status_code != 200: continue data = json.loads(response.text).get('data', []) for feed in data: target = feed.get('target', {}) info = BaseExtractor(target, self.options, self.request_fn).extract_data() if len(feeds) >= topic_feed_count: break feeds.append(info) if len(feeds) >= topic_feed_count: break return feeds
zhihu-crawler
/zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/zhihu_crawler/extractors.py
extractors.py
import asyncio from requests_html import HTMLSession, AsyncHTMLSession from functools import partial from .page_iterators import * from .zhihu_types import * from .extractors import extract_data, extract_user, extract_question_data from ..utils import get_useragent, get_headers, get_proxy import itertools from loguru import logger import json import time class ZhiHuScraper: """ 知乎采集 """ default_headers = { # 'connection': 'close', "user-agent": get_useragent() } def __init__(self, session=None, async_session=None, requests_kwargs=None): if session is None: session = HTMLSession() session.headers.update(self.default_headers) if requests_kwargs is None: requests_kwargs = {} if async_session is None: async_session = AsyncHTMLSession(workers=ASYNC_COUNT) async_session.headers.update(self.default_headers) self.async_session = async_session self.session = session self.requests_kwargs = requests_kwargs def set_proxy(self, proxy: Optional[Dict[str, str]] = None): """ 设置代理 :param proxy: proxy = {'http': 'http://ip:port', 'https': 'http://ip:port'} :return: """ proxies = { 'proxies': proxy } self.requests_kwargs.update(proxies) def set_timeout(self, timeout: int): """ 设置请求超时 单位秒 """ self.requests_kwargs['timeout'] = timeout def search_crawler(self, key_word: Union[str], **kwargs) -> Union[Iterator[ArticleType], Iterator[AnswerType], Iterator[VideoType]]: """ 通过关键词对检索结果进行采集 :param key_word: 需要采集的关键词 :return: """ kwargs['scraper'] = self iter_search_pages_fn = partial(iter_search_pages, key_word=key_word, request_fn=self.send, **kwargs) return self._generic_crawler(extract_data, iter_search_pages_fn, **kwargs) def top_search_crawler(self, top_search_url, **kwargs) -> Keyword: response = self.send(top_search_url) data = json.loads(response.text) keywords = [] for info in data.get('top_search', {}).get('words', []): keywords.append(info.get('query', '')) del response, data return { 'keywords': keywords } def question_crawler(self, question_id: Union[str], **kwargs) -> Iterator[QuestionType]: """ 通过问题id采集 """ kwargs['scraper'] = self iter_question_pages_fn = partial(iter_question_pages, question_id=question_id, request_fn=self.send, **kwargs) kwargs['total_count'] = kwargs.get('drill_down_count', 0) return self._generic_crawler(extract_data, iter_question_pages_fn, **kwargs) def article_crawler(self, article_id: Union[str], **kwargs) -> Iterator[ArticleType]: """ 通过文章id采集文章页数据 """ kwargs['scraper'] = self iter_article_pages_fn = partial(iter_article_pages, article_id=article_id, request_fn=self.send, **kwargs) return self._generic_crawler(extract_data, iter_article_pages_fn, **kwargs) def video_crawler(self, video_id: Union[str], **kwargs) -> Iterator[VideoType]: """ 通过视频id采集视频页数据 """ kwargs['scraper'] = self iter_video_pages_fn = partial(iter_video_pages, video_id=video_id, request_fn=self.send, **kwargs) return self._generic_crawler(extract_data, iter_video_pages_fn, **kwargs) def user_crawler(self, user_id: Union[str], **kwargs) -> Iterator[UserType]: """ 通过账号id采集个人主页数据 """ kwargs['scraper'] = self iter_user_page_fn = partial(iter_user_pages, user_id=user_id, request_fn=self.send, **kwargs) return self._generic_crawler(extract_user, iter_user_page_fn, **kwargs) def hot_list_crawler(self, **kwargs) -> Iterator[QuestionType]: """ 首页热榜采集 """ kwargs['scraper'] = self iter_hot_page_fn = partial(iter_hot_list_pages, request_fn=self.send, **kwargs) return self._generic_crawler(extract_question_data, iter_hot_page_fn, **kwargs) def hot_question_crawler(self, domains, **kwargs) -> Iterator[QuestionType]: """ 热点问题采集 """ kwargs['scraper'] = self kwargs['total_count'] = kwargs.pop('question_count', 0) for domain in domains: iter_hot_question_page_fn = partial(iter_hot_question_pages, domain=domain, request_fn=self.send, **kwargs) for result in self._generic_crawler(extract_question_data, iter_hot_question_page_fn, **kwargs): yield result def send(self, url, **kwargs): if not url: logger.error('url is null') method = kwargs.get('method', 'GET') return self.post(url, **kwargs) if method == 'POST' else self.get(url, **kwargs) def get(self, url, **kwargs): """ 请求方法,在该方法中进行x_zse_96参数加密 @ x_zse_96: 是否需要x_zse_96参数加密 """ assert url is not None, 'url is null' x_zse_96 = kwargs.get('x_zse_96', False) d_c0 = re.sub('d_c0=|;.*', '', self.default_headers.get('cookie', '')) or '' kwargs['d_c0'] = d_c0 if isinstance(url, str): if x_zse_96: self.default_headers.update(get_headers(url, d_c0) or {}) self.session.headers.update(self.default_headers) retry_limit = 6 response = None for retry in range(1, retry_limit + 1): try: response = self.session.get(url, **self.requests_kwargs) response.raise_for_status() return response except Exception as e: if retry < retry_limit: sleep_time = retry * 2 logger.debug(f'重连第{retry}次,休眠{sleep_time}秒, 异常:{e}') time.sleep(sleep_time) # 重新获取代理 # proxies = {'http': get_proxy(), 'https': get_proxy()} assert response is not None, f'重新请求{retry_limit}次, response为空' if isinstance(url, list): # 使用协程请求 return self.generic_response(url, **kwargs) def generic_response(self, urls, **kwargs): urls = [urls[i: i + ASYNC_COUNT] for i in range(0, len(urls), ASYNC_COUNT)] for sub_urls in urls: tasks = [lambda url=url: self.async_get(url, **kwargs) for url in sub_urls] results = self.async_session.run(*tasks) yield results async def async_get(self, url, **kwargs): if kwargs.get('x_zse_96', False): self.default_headers.update(get_headers(url, kwargs.get('d_c0')) or {}) self.async_session.headers.update(self.default_headers) response = await self.async_session.get(url, **self.requests_kwargs) if response and response.status_code != 200: logger.error(f'request url: {url}, response code: {response.status_code}') await asyncio.sleep(2) return response def post(self, url, **kwargs): pass def _generic_crawler(self, extract_fn, iter_pages_fn, options=None, **kwargs): """ 中转函数 @extract_fn 数据清洗方法 @iter_pages_fn 页面处理方法 @options 参数 """ page_limit = kwargs.get('page_limit') if kwargs.get('page_limit', 0) else DEFAULT_PAGE_LIMIT counter = itertools.count(0) if page_limit is None else range(page_limit) if options is None: options = {} elif isinstance(options, set): options = {k: True for k in options} total_count = kwargs.get('total_count', 0) count = 0 for i, page in zip(counter, iter_pages_fn()): for element in page: if 0 < total_count <= count: return None count += 1 info = extract_fn(element, options=options, request_fn=self.send) yield info
zhihu-crawler
/zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/zhihu_crawler/zhihu_scraper.py
zhihu_scraper.py
from .zhihu_types import * from .zhihu_scraper import ZhiHuScraper from .constants import * _scraper = ZhiHuScraper() def set_cookie(cookie: Dict[str, str] = {}): """ :param cookie: cookie 字典;格式为{'d_c0': '.....'}; """ assert 'd_c0' in cookie.keys(), 'd_c0 为必要参数!' cookies = [] for key in cookie.keys(): value = cookie[key] cookie_str = f'{key}="{value}"' if key == 'd_c0' else f'{key}={value}' cookies.append(cookie_str) _scraper.default_headers['cookie'] = ';'.join(cookies) def set_proxy(proxy: Optional[Dict[str, str]] = None): """ 设置代理;每次请求都切换一次代理才算最佳 """ _scraper.set_proxy(proxy) def _set_timeout(timeout: int): assert isinstance(timeout, int), 'timeout值应为大于0的整数' if timeout < 5 or timeout < 0: timeout = DEFAULT_REQUESTS_TIMEOUT _scraper.set_timeout(timeout=timeout) def search_crawler(key_word: Optional[str] = None, comment_count: Optional[int] = -1, count: Optional[int] = 0, **kwargs) -> Union[Iterator[AnswerType], Iterator[ArticleType], Iterator[VideoType]]: """ 关键词搜索采集(answer、article、video...) :param key_word: 关键词 :param kwargs: :param count: (int) 采集指定数量的回答、视频、文章。该值过大可能会导致多次请求。-1 不采集 0采集全部(默认) >0采集指定的数量 :param comment_count: (int) 采集指定数量的评论。该值过大可能会导致多次请求。默认-1 不采集 0采集全部 >0采集指定的数量 @ page_limit (int): 需要采集的页数, 默认为constants下的 DEFAULT_PAGE_LIMIT @ data_type:(str or list or set or tuple) 获取数据类型 可选择(answer、article、zvideo) 默认三个类型都会采集 @ sort: (str or None)排序。默认sort=None综合排序,sort=created_time最新发布时间排序; sort=upvoted_count最多赞同排序 @ time_interval: 时效。一天内(time_interval=a_day),可选择一周内(time_interval=a_week) 一月内(time_interval=a_month) 三月内(time_interval=three_months) 半年内(time_interval=half_a_year) 一年内(time_interval=a_year) 不限时间(time_interval=None)默认 @ drill_down_count: (int) 下钻内容采集数量(问题下的回答),,默认-1 不采集 0采集全部 >0采集指定的数量 :return: """ _set_timeout(kwargs.pop('timeout', DEFAULT_REQUESTS_TIMEOUT)) options: Union[Dict[str, Any], Set[str]] = kwargs.setdefault('options', {}) if isinstance(options, set): options = {k: True for k in options} options.setdefault('key_word', key_word) options['drill_down_count'] = kwargs.pop('drill_down_count', -1) options['comment_count'] = comment_count data_types = kwargs.get('data_type', [ANSWER, ARTICLE, VIDEO]) kwargs['data_type'] = options['data_type'] = data_types kwargs['sort'] = options['sort'] = kwargs.get('sort', None) kwargs['time_interval'] = options['time_interval'] = kwargs.get('time_interval', None) cookies = kwargs.pop('cookies', None) kwargs['total_count'] = count if cookies: pass data_types = [data_types] if isinstance(data_types, str) else data_types if isinstance(data_types, list): for data_type in data_types: kwargs['data_type'] = data_type for result in _scraper.search_crawler(key_word, **kwargs): yield result def top_search_crawl(top_search_url: Optional[str] = TOP_SEARCH_URL, **kwargs) -> Keyword: """ 热搜采集 :param top_search_url: 热搜固定url page_limit (int): 需要采集的页数, 默认为constants下的 DEFAULT_PAGE_LIMIT :return: """ _set_timeout(kwargs.pop('timeout', DEFAULT_REQUESTS_TIMEOUT)) return _scraper.top_search_crawler(top_search_url=top_search_url, **kwargs) def hot_questions_crawler(period: Union[str] = 'hour', domains: Union[str, list, tuple] = 0, **kwargs) -> Iterator[QuestionType]: """ 问题热榜 url = https://www.zhihu.com/knowledge-plan/hot-question/hot/0/hour 小时榜 api = https://api.zhihu.com/creators/rank/hot?domain=0&limit=20&offset=0&period=hou :param period: 榜单类型(小时榜-hour、 日榜-day、周榜-week) 默认hour :param domains: 频道id, 支持字符串、列表、元组形式的参数 0-所有(默认) 1001-数码 1002-科技 1003-互联网 1004-商业财经 1005-职场 1006-教育 1007-法律 1008-军事 1009-汽车 1010-人文社科 1011-自然科学 1012-工程技术 1013-情感 1014-心理学 1015-两性 1016母婴亲子 1017-家居 1018-健康 1019-艺术 1020-音乐 1021-设计 1022-影视娱乐 1023-宠物 1024-体育电竞 1025-运动健身 1026-动漫游戏 1027-美食 1028-旅行 1029-时尚 @ question_count: 采集问题的数量, 默认0采集所有; >0 采集指定数量 @ drill_down_count: (int) 下钻内容(answer)采集数量,热榜下钻内容数据量大,默认-1 不采集 0采集全部 >0采集指定的数量 @ comment_count: (int) 采集指定数量的评论。该值过大可能会导致多次请求;默认-1 不采集 0采集全部 >0采集指定的数量 """ _set_timeout(kwargs.pop('timeout', DEFAULT_REQUESTS_TIMEOUT)) if isinstance(domains, int or str): domains = [domains] options: Union[Dict[str, Any], Set[str]] = kwargs.setdefault('options', {}) kwargs['question_count'] = kwargs.pop('question_count', 0) options['drill_down_count'] = kwargs.pop('drill_down_count', -1) options['comment_count'] = kwargs.pop('comment_count', -1) kwargs['period'] = period return _scraper.hot_question_crawler(domains=domains, **kwargs) def hot_list_crawler(drill_down_count: Union[int] = -1, **kwargs) -> Iterator[QuestionType]: """ 首页热榜 https://www.zhihu.com/hot api: https://api.zhihu.com/topstory/hot-lists/total?limit=50 :param drill_down_count: (int) 下钻内容采集数量,热榜下钻内容数据量大,默认-1 不采集 0采集全部 >0采集指定的数量 @ comment_count: (int) 采集指定数量的评论。该值过大可能会导致多次请求;默认-1 不采集 0采集全部 >0采集指定的数量 """ _set_timeout(kwargs.pop('timeout', DEFAULT_REQUESTS_TIMEOUT)) options: Union[Dict[str, Any], Set[str]] = kwargs.setdefault('options', {}) options['comment_count'] = kwargs.pop('comment_count', -1) options['drill_down_count'] = drill_down_count return _scraper.hot_list_crawler(**kwargs) def common_crawler(task_id: Union[str], data_type: Optional[str] = None, drill_down_count: Optional[int] = -1, comment_count: Optional[int] = -1, similar_questions: Optional[bool] = False, similar_recommends: Optional[bool] = False, **kwargs) -> Union[Iterator[AnswerType], Iterator[QuestionType], Iterator[VideoType], Iterator[ArticleType]]: """ 通用采集(问答、视频、专栏、文章、话题) :param task_id: 问题id、视频id、文章id、话题id. :param data_type: 指定数据类型的采集 (answer or article or zvideo or question) :param drill_down_count: (int) 下钻内容(answer)采集,默认-1 不采集 0采集全部 >0采集指定的数量 :param comment_count: (int) 采集指定数量的评论。该值过大可能会导致多次请求;默认-1 不采集 0采集全部 >0采集指定的数量 :param similar_questions: (bool) 是否采集相类似的问题 默认 False 不采集 :param similar_recommends: (bool) 是否采集相类似的推荐 默认 False 不采集 pubdate_sort: (bool) 是否通过时间排序. 默认True通过最新发布时间降序形式进行采集 :return: """ _set_timeout(kwargs.pop('timeout', DEFAULT_REQUESTS_TIMEOUT)) options: Union[Dict[str, Any], Set[str]] = kwargs.setdefault('options', {}) if isinstance(options, set): options = {k: True for k in options} if data_type: options['data_type'] = data_type kwargs['pubdate_sort'] = kwargs.get('pubdate_sort', True) options['drill_down_count'] = kwargs['drill_down_count'] = drill_down_count options['comment_count'] = comment_count if similar_questions: options['similar_questions'] = similar_questions if similar_recommends: options['similar_recommends'] = similar_recommends if data_type == QUESTION: return _scraper.question_crawler(question_id=task_id, **kwargs) elif data_type == ARTICLE: return _scraper.article_crawler(article_id=task_id, **kwargs) elif data_type == VIDEO: return _scraper.video_crawler(video_id=task_id, **kwargs) assert data_type not in (ARTICLE, VIDEO, QUESTION), '匹配不到可以采集的数据类型,请校对data_type的值' def user_crawler(user_id: Union[str], following: Union[int] = -1, followers: Union[int] = -1, following_topics: Union[int] = -1, following_columns: Union[int] = -1, following_questions: Union[int] = -1, following_collections: Union[int] = -1, **kwargs) -> Iterator[UserType]: """ 账号采集 :param user_id: (str) 账号id 如 https://www.zhihu.com/people/kenneth-pan/answers中 kenneth-pan为user_id 数据api:https://api.zhihu.com/people/user_id :param following: 是否采集该账号关注人列表;-1不采集(默认)。0采集全部(可能会导致多次请求);>0将采集指定数量 :param followers: 是否采集关注该账号的人列表;参数值规则如上 :param following_topics: 是否采集该账号关注的话题列表;参数值规则如上 :param following_columns: 是否采集该账号关注的专栏列表;参数值规则如上 :param following_questions: 是否采集该账号关注的问题列表;参数值规则如上 :param following_collections: 是否采集该账号关注的收藏列表;参数值规则如上 @ answer_count: 是否采集该账号的问答列表;参数值规则如上 @ zvideo_count: 是否采集该账号的视频列表;参数值规则如上 @ question_count: 是否采集该账号的提问列表;参数值规则如上 @ article_count: 是否采集该账号的文章列表;参数值规则如上 @ column_count: 是否采集该账号的专栏列表;参数值规则如上 @ pin_count: 是否采集该账号的专栏列表;参数值规则如上 @ collection_count: 是否采集该账号的收藏夹内容;参数值规则如上 @ sort: (str)排序。默认sort=created 时间排序,sort=included 被收录回答/文章排序; sort=voteups最多赞同排序 @ drill_down_count: 是否向下钻取内容(如提问的回答、专栏下的文章、收藏话题下的内容等);参数值规则如上 @ comment_count: 需要采集的回答、视频、文章、想法的评论数;参数值规则如上 """ _set_timeout(kwargs.pop('timeout', DEFAULT_REQUESTS_TIMEOUT)) options: Union[Dict[str, Any], Set[str]] = kwargs.setdefault('options', {}) if isinstance(options, set): options = {k: True for k in options} options['comment_count'] = kwargs.pop('comment_count', -1) options['answer_count'] = kwargs.pop('answer_count', -1) options['zvideo_count'] = kwargs.pop('zvideo_count', -1) options['question_count'] = kwargs.pop('question_count', -1) options['article_count'] = kwargs.pop('article_count', -1) options['column_count'] = kwargs.pop('column_count', -1) options['pin_count'] = kwargs.pop('pin_count', -1) options['drill_down_count'] = kwargs.pop('drill_down_count', -1) options['collection_count'] = kwargs.pop('collection_count', -1) options['following'] = following options['followers'] = followers options['following_topics'] = following_topics options['following_columns'] = following_columns options['following_questions'] = following_questions options['following_collections'] = following_collections kwargs['sort'] = options['sort'] = kwargs.get('sort', 'created') return _scraper.user_crawler(user_id, **kwargs)
zhihu-crawler
/zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/zhihu_crawler/__init__.py
__init__.py
const jsdom = require("jsdom"); const { JSDOM } = jsdom; const dom = new JSDOM(`<!DOCTYPE html><p>Hello world</p>`); window = dom.window; document = window.document; function t(e) { return (t = 'function' == typeof Symbol && 'symbol' == typeof Symbol.A ? function (e) { return typeof e; } : function (e) { return e && 'function' == typeof Symbol && e.constructor === Symbol && e !== Symbol.prototype ? 'symbol' : typeof e; } )(e); } Object.defineProperty(exports, '__esModule', { value: !0, }); var A = '2.0' , __g = {}; function s() { } function i(e) { this.t = (2048 & e) >> 11, this.s = (1536 & e) >> 9, this.i = 511 & e, this.h = 511 & e; } function h(e) { this.s = (3072 & e) >> 10, this.h = 1023 & e; } function a(e) { this.a = (3072 & e) >> 10, this.c = (768 & e) >> 8, this.n = (192 & e) >> 6, this.t = 63 & e; } function c(e) { this.s = e >> 10 & 3, this.i = 1023 & e; } function n() { } function e(e) { this.a = (3072 & e) >> 10, this.c = (768 & e) >> 8, this.n = (192 & e) >> 6, this.t = 63 & e; } function o(e) { this.h = (4095 & e) >> 2, this.t = 3 & e; } function r(e) { this.s = e >> 10 & 3, this.i = e >> 2 & 255, this.t = 3 & e; } s.prototype.e = function (e) { e.o = !1; } , i.prototype.e = function (e) { switch (this.t) { case 0: e.r[this.s] = this.i; break; case 1: e.r[this.s] = e.k[this.h]; } } , h.prototype.e = function (e) { e.k[this.h] = e.r[this.s]; } , a.prototype.e = function (e) { switch (this.t) { case 0: e.r[this.a] = e.r[this.c] + e.r[this.n]; break; case 1: e.r[this.a] = e.r[this.c] - e.r[this.n]; break; case 2: e.r[this.a] = e.r[this.c] * e.r[this.n]; break; case 3: e.r[this.a] = e.r[this.c] / e.r[this.n]; break; case 4: e.r[this.a] = e.r[this.c] % e.r[this.n]; break; case 5: e.r[this.a] = e.r[this.c] == e.r[this.n]; break; case 6: e.r[this.a] = e.r[this.c] >= e.r[this.n]; break; case 7: e.r[this.a] = e.r[this.c] || e.r[this.n]; break; case 8: e.r[this.a] = e.r[this.c] && e.r[this.n]; break; case 9: e.r[this.a] = e.r[this.c] !== e.r[this.n]; break; case 10: e.r[this.a] = t(e.r[this.c]); break; case 11: e.r[this.a] = e.r[this.c] in e.r[this.n]; break; case 12: e.r[this.a] = e.r[this.c] > e.r[this.n]; break; case 13: e.r[this.a] = -e.r[this.c]; break; case 14: e.r[this.a] = e.r[this.c] < e.r[this.n]; break; case 15: e.r[this.a] = e.r[this.c] & e.r[this.n]; break; case 16: e.r[this.a] = e.r[this.c] ^ e.r[this.n]; break; case 17: e.r[this.a] = e.r[this.c] << e.r[this.n]; break; case 18: e.r[this.a] = e.r[this.c] >>> e.r[this.n]; break; case 19: e.r[this.a] = e.r[this.c] | e.r[this.n]; break; case 20: e.r[this.a] = !e.r[this.c]; } } , c.prototype.e = function (e) { e.Q.push(e.C), e.B.push(e.k), e.C = e.r[this.s], e.k = []; for (var t = 0; t < this.i; t++) e.k.unshift(e.f.pop()); e.g.push(e.f), e.f = []; } , n.prototype.e = function (e) { e.C = e.Q.pop(), e.k = e.B.pop(), e.f = e.g.pop(); } , e.prototype.e = function (e) { switch (this.t) { case 0: e.u = e.r[this.a] >= e.r[this.c]; break; case 1: e.u = e.r[this.a] <= e.r[this.c]; break; case 2: e.u = e.r[this.a] > e.r[this.c]; break; case 3: e.u = e.r[this.a] < e.r[this.c]; break; case 4: e.u = e.r[this.a] == e.r[this.c]; break; case 5: e.u = e.r[this.a] != e.r[this.c]; break; case 6: e.u = e.r[this.a]; break; case 7: e.u = !e.r[this.a]; } } , o.prototype.e = function (e) { switch (this.t) { case 0: e.C = this.h; break; case 1: e.u && (e.C = this.h); break; case 2: e.u || (e.C = this.h); break; case 3: e.C = this.h, e.w = null; } e.u = !1; } , r.prototype.e = function (e) { switch (this.t) { case 0: for (var t = [], n = 0; n < this.i; n++) t.unshift(e.f.pop()); e.r[3] = e.r[this.s](t[0], t[1]); break; case 1: for (var r = e.f.pop(), i = [], o = 0; o < this.i; o++) i.unshift(e.f.pop()); e.r[3] = e.r[this.s][r](i[0], i[1]); break; case 2: for (var a = [], s = 0; s < this.i; s++) a.unshift(e.f.pop()); e.r[3] = new e.r[this.s](a[0], a[1]); } } ; var k = function (e) { for (var t = 66, n = [], r = 0; r < e.length; r++) { var i = 24 ^ e.charCodeAt(r) ^ t; n.push(String.fromCharCode(i)), t = i; } return n.join(''); }; function Q(e) { this.t = (4095 & e) >> 10, this.s = (1023 & e) >> 8, this.i = 1023 & e, this.h = 63 & e; } function C(e) { this.t = (4095 & e) >> 10, this.a = (1023 & e) >> 8, this.c = (255 & e) >> 6; } function B(e) { this.s = (3072 & e) >> 10, this.h = 1023 & e; } function f(e) { this.h = 4095 & e; } function g(e) { this.s = (3072 & e) >> 10; } function u(e) { this.h = 4095 & e; } function w(e) { this.t = (3840 & e) >> 8, this.s = (192 & e) >> 6, this.i = 63 & e; } function G() { this.r = [0, 0, 0, 0], this.C = 0, this.Q = [], this.k = [], this.B = [], this.f = [], this.g = [], this.u = !1, this.G = [], this.b = [], this.o = !1, this.w = null, this.U = null, this.F = [], this.R = 0, this.J = { 0: s, 1: i, 2: h, 3: a, 4: c, 5: n, 6: e, 7: o, 8: r, 9: Q, 10: C, 11: B, 12: f, 13: g, 14: u, 15: w, }; } Q.prototype.e = function (e) { switch (this.t) { case 0: e.f.push(e.r[this.s]); break; case 1: e.f.push(this.i); break; case 2: e.f.push(e.k[this.h]); break; case 3: e.f.push(k(e.b[this.h])); } } , C.prototype.e = function (A) { switch (this.t) { case 0: var t = A.f.pop(); A.r[this.a] = A.r[this.c][t]; break; case 1: var s = A.f.pop() , i = A.f.pop(); A.r[this.c][s] = i; break; case 2: var h = A.f.pop(); A.r[this.a] = eval(h); } } , B.prototype.e = function (e) { e.r[this.s] = k(e.b[this.h]); } , f.prototype.e = function (e) { e.w = this.h; } , g.prototype.e = function (e) { throw e.r[this.s]; } , u.prototype.e = function (e) { var t = this , n = [0]; e.k.forEach(function (e) { n.push(e); }); var r = function (r) { var i = new G; return i.k = n, i.k[0] = r, i.v(e.G, t.h, e.b, e.F), i.r[3]; }; r.toString = function () { return '() { [native code] }'; } , e.r[3] = r; } , w.prototype.e = function (e) { switch (this.t) { case 0: for (var t = {}, n = 0; n < this.i; n++) { var r = e.f.pop(); t[e.f.pop()] = r; } e.r[this.s] = t; break; case 1: for (var i = [], o = 0; o < this.i; o++) i.unshift(e.f.pop()); e.r[this.s] = i; } } , G.prototype.D = function (e) { for (var t = window.atob(e), n = t.charCodeAt(0) << 8 | t.charCodeAt(1), r = [], i = 2; i < n + 2; i += 2) r.push(t.charCodeAt(i) << 8 | t.charCodeAt(i + 1)); this.G = r; for (var o = [], a = n + 2; a < t.length;) { var s = t.charCodeAt(a) << 8 | t.charCodeAt(a + 1) , c = t.slice(a + 2, a + 2 + s); o.push(c), a += s + 2; } this.b = o; } , G.prototype.v = function (e, t, n) { for (t = t || 0, n = n || [], this.C = t, 'string' == typeof e ? this.D(e) : (this.G = e, this.b = n), this.o = !0, this.R = Date.now(); this.o;) { var r = this.G[this.C++]; if ('number' != typeof r) break; var i = Date.now(); if (500 < i - this.R) return; this.R = i; try { this.e(r); } catch (e) { this.U = e, this.w && (this.C = this.w); } } } , G.prototype.e = function (e) { var t = (61440 & e) >> 12; new this.J[t](e).e(this); } , (new G).v('AxjgB5MAnACoAJwBpAAAABAAIAKcAqgAMAq0AzRJZAZwUpwCqACQACACGAKcBKAAIAOcBagAIAQYAjAUGgKcBqFAuAc5hTSHZAZwqrAIGgA0QJEAJAAYAzAUGgOcCaFANRQ0R2QGcOKwChoANECRACQAsAuQABgDnAmgAJwMgAGcDYwFEAAzBmAGcSqwDhoANECRACQAGAKcD6AAGgKcEKFANEcYApwRoAAxB2AGcXKwEhoANECRACQAGAKcE6AAGgKcFKFANEdkBnGqsBUaADRAkQAkABgCnBagAGAGcdKwFxoANECRACQAGAKcGKAAYAZx+rAZGgA0QJEAJAAYA5waoABgBnIisBsaADRAkQAkABgCnBygABoCnB2hQDRHZAZyWrAeGgA0QJEAJAAYBJwfoAAwFGAGcoawIBoANECRACQAGAOQALAJkAAYBJwfgAlsBnK+sCEaADRAkQAkABgDkACwGpAAGAScH4AJbAZy9rAiGgA0QJEAJACwI5AAGAScH6AAkACcJKgAnCWgAJwmoACcJ4AFnA2MBRAAMw5gBnNasCgaADRAkQAkABgBEio0R5EAJAGwKSAFGACcKqAAEgM0RCQGGAYSATRFZAZzshgAtCs0QCQAGAYSAjRFZAZz1hgAtCw0QCQAEAAgB7AtIAgYAJwqoAASATRBJAkYCRIANEZkBnYqEAgaBxQBOYAoBxQEOYQ0giQKGAmQABgAnC6ABRgBGgo0UhD/MQ8zECALEAgaBxQBOYAoBxQEOYQ0gpEAJAoYARoKNFIQ/zEPkAAgChgLGgkUATmBkgAaAJwuhAUaCjdQFAg5kTSTJAsQCBoHFAE5gCgHFAQ5hDSCkQAkChgBGgo0UhD/MQ+QACAKGAsaCRQCOYGSABoAnC6EBRoKN1AUEDmRNJMkCxgFGgsUPzmPkgAaCJwvhAU0wCQFGAUaCxQGOZISPzZPkQAaCJwvhAU0wCQFGAUaCxQMOZISPzZPkQAaCJwvhAU0wCQFGAUaCxQSOZISPzZPkQAaCJwvhAU0wCQFGAkSAzRBJAlz/B4FUAAAAwUYIAAIBSITFQkTERwABi0GHxITAAAJLwMSGRsXHxMZAAk0Fw8HFh4NAwUABhU1EBceDwAENBcUEAAGNBkTGRcBAAFKAAkvHg4PKz4aEwIAAUsACDIVHB0QEQ4YAAsuAzs7AAoPKToKDgAHMx8SGQUvMQABSAALORoVGCQgERcCAxoACAU3ABEXAgMaAAsFGDcAERcCAxoUCgABSQAGOA8LGBsPAAYYLwsYGw8AAU4ABD8QHAUAAU8ABSkbCQ4BAAFMAAktCh8eDgMHCw8AAU0ADT4TGjQsGQMaFA0FHhkAFz4TGjQsGQMaFA0FHhk1NBkCHgUbGBEPAAFCABg9GgkjIAEmOgUHDQ8eFSU5DggJAwEcAwUAAUMAAUAAAUEADQEtFw0FBwtdWxQTGSAACBwrAxUPBR4ZAAkqGgUDAwMVEQ0ACC4DJD8eAx8RAAQ5GhUYAAFGAAAABjYRExELBAACWhgAAVoAQAg/PTw0NxcQPCQ5C3JZEBs9fkcnDRcUAXZia0Q4EhQgXHojMBY3MWVCNT0uDhMXcGQ7AUFPHigkQUwQFkhaAkEACjkTEQspNBMZPC0ABjkTEQsrLQ=='); var b = function (e) { return __g._encrypt(encodeURIComponent(e)); }; exports.ENCRYPT_VERSION = A, exports.default = b; console.log(__g._encrypt(encodeURIComponent('fe5c2f6c48886fb742848fc24e330a5e')))
zhihu-crawler
/zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/encrypt.js
encrypt.js
[RFC6265](https://tools.ietf.org/html/rfc6265) Cookies and CookieJar for Node.js [![npm package](https://nodei.co/npm/tough-cookie.png?downloads=true&downloadRank=true&stars=true)](https://nodei.co/npm/tough-cookie/) [![Build Status](https://travis-ci.org/salesforce/tough-cookie.png?branch=master)](https://travis-ci.org/salesforce/tough-cookie) # Synopsis ``` javascript var tough = require('tough-cookie'); var Cookie = tough.Cookie; var cookie = Cookie.parse(header); cookie.value = 'somethingdifferent'; header = cookie.toString(); var cookiejar = new tough.CookieJar(); cookiejar.setCookie(cookie, 'http://currentdomain.example.com/path', cb); // ... cookiejar.getCookies('http://example.com/otherpath',function(err,cookies) { res.headers['cookie'] = cookies.join('; '); }); ``` # Installation It's _so_ easy! `npm install tough-cookie` Why the name? NPM modules `cookie`, `cookies` and `cookiejar` were already taken. ## Version Support Support for versions of node.js will follow that of the [request](https://www.npmjs.com/package/request) module. # API ## tough Functions on the module you get from `require('tough-cookie')`. All can be used as pure functions and don't need to be "bound". **Note**: prior to 1.0.x, several of these functions took a `strict` parameter. This has since been removed from the API as it was no longer necessary. ### `parseDate(string)` Parse a cookie date string into a `Date`. Parses according to RFC6265 Section 5.1.1, not `Date.parse()`. ### `formatDate(date)` Format a Date into a RFC1123 string (the RFC6265-recommended format). ### `canonicalDomain(str)` Transforms a domain-name into a canonical domain-name. The canonical domain-name is a trimmed, lowercased, stripped-of-leading-dot and optionally punycode-encoded domain-name (Section 5.1.2 of RFC6265). For the most part, this function is idempotent (can be run again on its output without ill effects). ### `domainMatch(str,domStr[,canonicalize=true])` Answers "does this real domain match the domain in a cookie?". The `str` is the "current" domain-name and the `domStr` is the "cookie" domain-name. Matches according to RFC6265 Section 5.1.3, but it helps to think of it as a "suffix match". The `canonicalize` parameter will run the other two parameters through `canonicalDomain` or not. ### `defaultPath(path)` Given a current request/response path, gives the Path apropriate for storing in a cookie. This is basically the "directory" of a "file" in the path, but is specified by Section 5.1.4 of the RFC. The `path` parameter MUST be _only_ the pathname part of a URI (i.e. excludes the hostname, query, fragment, etc.). This is the `.pathname` property of node's `uri.parse()` output. ### `pathMatch(reqPath,cookiePath)` Answers "does the request-path path-match a given cookie-path?" as per RFC6265 Section 5.1.4. Returns a boolean. This is essentially a prefix-match where `cookiePath` is a prefix of `reqPath`. ### `parse(cookieString[, options])` alias for `Cookie.parse(cookieString[, options])` ### `fromJSON(string)` alias for `Cookie.fromJSON(string)` ### `getPublicSuffix(hostname)` Returns the public suffix of this hostname. The public suffix is the shortest domain-name upon which a cookie can be set. Returns `null` if the hostname cannot have cookies set for it. For example: `www.example.com` and `www.subdomain.example.com` both have public suffix `example.com`. For further information, see http://publicsuffix.org/. This module derives its list from that site. This call is currently a wrapper around [`psl`](https://www.npmjs.com/package/psl)'s [get() method](https://www.npmjs.com/package/psl#pslgetdomain). ### `cookieCompare(a,b)` For use with `.sort()`, sorts a list of cookies into the recommended order given in the RFC (Section 5.4 step 2). The sort algorithm is, in order of precedence: * Longest `.path` * oldest `.creation` (which has a 1ms precision, same as `Date`) * lowest `.creationIndex` (to get beyond the 1ms precision) ``` javascript var cookies = [ /* unsorted array of Cookie objects */ ]; cookies = cookies.sort(cookieCompare); ``` **Note**: Since JavaScript's `Date` is limited to a 1ms precision, cookies within the same milisecond are entirely possible. This is especially true when using the `now` option to `.setCookie()`. The `.creationIndex` property is a per-process global counter, assigned during construction with `new Cookie()`. This preserves the spirit of the RFC sorting: older cookies go first. This works great for `MemoryCookieStore`, since `Set-Cookie` headers are parsed in order, but may not be so great for distributed systems. Sophisticated `Store`s may wish to set this to some other _logical clock_ such that if cookies A and B are created in the same millisecond, but cookie A is created before cookie B, then `A.creationIndex < B.creationIndex`. If you want to alter the global counter, which you probably _shouldn't_ do, it's stored in `Cookie.cookiesCreated`. ### `permuteDomain(domain)` Generates a list of all possible domains that `domainMatch()` the parameter. May be handy for implementing cookie stores. ### `permutePath(path)` Generates a list of all possible paths that `pathMatch()` the parameter. May be handy for implementing cookie stores. ## Cookie Exported via `tough.Cookie`. ### `Cookie.parse(cookieString[, options])` Parses a single Cookie or Set-Cookie HTTP header into a `Cookie` object. Returns `undefined` if the string can't be parsed. The options parameter is not required and currently has only one property: * _loose_ - boolean - if `true` enable parsing of key-less cookies like `=abc` and `=`, which are not RFC-compliant. If options is not an object, it is ignored, which means you can use `Array#map` with it. Here's how to process the Set-Cookie header(s) on a node HTTP/HTTPS response: ``` javascript if (res.headers['set-cookie'] instanceof Array) cookies = res.headers['set-cookie'].map(Cookie.parse); else cookies = [Cookie.parse(res.headers['set-cookie'])]; ``` _Note:_ in version 2.3.3, tough-cookie limited the number of spaces before the `=` to 256 characters. This limitation has since been removed. See [Issue 92](https://github.com/salesforce/tough-cookie/issues/92) ### Properties Cookie object properties: * _key_ - string - the name or key of the cookie (default "") * _value_ - string - the value of the cookie (default "") * _expires_ - `Date` - if set, the `Expires=` attribute of the cookie (defaults to the string `"Infinity"`). See `setExpires()` * _maxAge_ - seconds - if set, the `Max-Age=` attribute _in seconds_ of the cookie. May also be set to strings `"Infinity"` and `"-Infinity"` for non-expiry and immediate-expiry, respectively. See `setMaxAge()` * _domain_ - string - the `Domain=` attribute of the cookie * _path_ - string - the `Path=` of the cookie * _secure_ - boolean - the `Secure` cookie flag * _httpOnly_ - boolean - the `HttpOnly` cookie flag * _extensions_ - `Array` - any unrecognized cookie attributes as strings (even if equal-signs inside) * _creation_ - `Date` - when this cookie was constructed * _creationIndex_ - number - set at construction, used to provide greater sort precision (please see `cookieCompare(a,b)` for a full explanation) After a cookie has been passed through `CookieJar.setCookie()` it will have the following additional attributes: * _hostOnly_ - boolean - is this a host-only cookie (i.e. no Domain field was set, but was instead implied) * _pathIsDefault_ - boolean - if true, there was no Path field on the cookie and `defaultPath()` was used to derive one. * _creation_ - `Date` - **modified** from construction to when the cookie was added to the jar * _lastAccessed_ - `Date` - last time the cookie got accessed. Will affect cookie cleaning once implemented. Using `cookiejar.getCookies(...)` will update this attribute. ### `Cookie([{properties}])` Receives an options object that can contain any of the above Cookie properties, uses the default for unspecified properties. ### `.toString()` encode to a Set-Cookie header value. The Expires cookie field is set using `formatDate()`, but is omitted entirely if `.expires` is `Infinity`. ### `.cookieString()` encode to a Cookie header value (i.e. the `.key` and `.value` properties joined with '='). ### `.setExpires(String)` sets the expiry based on a date-string passed through `parseDate()`. If parseDate returns `null` (i.e. can't parse this date string), `.expires` is set to `"Infinity"` (a string) is set. ### `.setMaxAge(number)` sets the maxAge in seconds. Coerces `-Infinity` to `"-Infinity"` and `Infinity` to `"Infinity"` so it JSON serializes correctly. ### `.expiryTime([now=Date.now()])` ### `.expiryDate([now=Date.now()])` expiryTime() Computes the absolute unix-epoch milliseconds that this cookie expires. expiryDate() works similarly, except it returns a `Date` object. Note that in both cases the `now` parameter should be milliseconds. Max-Age takes precedence over Expires (as per the RFC). The `.creation` attribute -- or, by default, the `now` parameter -- is used to offset the `.maxAge` attribute. If Expires (`.expires`) is set, that's returned. Otherwise, `expiryTime()` returns `Infinity` and `expiryDate()` returns a `Date` object for "Tue, 19 Jan 2038 03:14:07 GMT" (latest date that can be expressed by a 32-bit `time_t`; the common limit for most user-agents). ### `.TTL([now=Date.now()])` compute the TTL relative to `now` (milliseconds). The same precedence rules as for `expiryTime`/`expiryDate` apply. The "number" `Infinity` is returned for cookies without an explicit expiry and `0` is returned if the cookie is expired. Otherwise a time-to-live in milliseconds is returned. ### `.canonicalizedDomain()` ### `.cdomain()` return the canonicalized `.domain` field. This is lower-cased and punycode (RFC3490) encoded if the domain has any non-ASCII characters. ### `.toJSON()` For convenience in using `JSON.serialize(cookie)`. Returns a plain-old `Object` that can be JSON-serialized. Any `Date` properties (i.e., `.expires`, `.creation`, and `.lastAccessed`) are exported in ISO format (`.toISOString()`). **NOTE**: Custom `Cookie` properties will be discarded. In tough-cookie 1.x, since there was no `.toJSON` method explicitly defined, all enumerable properties were captured. If you want a property to be serialized, add the property name to the `Cookie.serializableProperties` Array. ### `Cookie.fromJSON(strOrObj)` Does the reverse of `cookie.toJSON()`. If passed a string, will `JSON.parse()` that first. Any `Date` properties (i.e., `.expires`, `.creation`, and `.lastAccessed`) are parsed via `Date.parse()`, not the tough-cookie `parseDate`, since it's JavaScript/JSON-y timestamps being handled at this layer. Returns `null` upon JSON parsing error. ### `.clone()` Does a deep clone of this cookie, exactly implemented as `Cookie.fromJSON(cookie.toJSON())`. ### `.validate()` Status: *IN PROGRESS*. Works for a few things, but is by no means comprehensive. validates cookie attributes for semantic correctness. Useful for "lint" checking any Set-Cookie headers you generate. For now, it returns a boolean, but eventually could return a reason string -- you can future-proof with this construct: ``` javascript if (cookie.validate() === true) { // it's tasty } else { // yuck! } ``` ## CookieJar Exported via `tough.CookieJar`. ### `CookieJar([store],[options])` Simply use `new CookieJar()`. If you'd like to use a custom store, pass that to the constructor otherwise a `MemoryCookieStore` will be created and used. The `options` object can be omitted and can have the following properties: * _rejectPublicSuffixes_ - boolean - default `true` - reject cookies with domains like "com" and "co.uk" * _looseMode_ - boolean - default `false` - accept malformed cookies like `bar` and `=bar`, which have an implied empty name. This is not in the standard, but is used sometimes on the web and is accepted by (most) browsers. Since eventually this module would like to support database/remote/etc. CookieJars, continuation passing style is used for CookieJar methods. ### `.setCookie(cookieOrString, currentUrl, [{options},] cb(err,cookie))` Attempt to set the cookie in the cookie jar. If the operation fails, an error will be given to the callback `cb`, otherwise the cookie is passed through. The cookie will have updated `.creation`, `.lastAccessed` and `.hostOnly` properties. The `options` object can be omitted and can have the following properties: * _http_ - boolean - default `true` - indicates if this is an HTTP or non-HTTP API. Affects HttpOnly cookies. * _secure_ - boolean - autodetect from url - indicates if this is a "Secure" API. If the currentUrl starts with `https:` or `wss:` then this is defaulted to `true`, otherwise `false`. * _now_ - Date - default `new Date()` - what to use for the creation/access time of cookies * _ignoreError_ - boolean - default `false` - silently ignore things like parse errors and invalid domains. `Store` errors aren't ignored by this option. As per the RFC, the `.hostOnly` property is set if there was no "Domain=" parameter in the cookie string (or `.domain` was null on the Cookie object). The `.domain` property is set to the fully-qualified hostname of `currentUrl` in this case. Matching this cookie requires an exact hostname match (not a `domainMatch` as per usual). ### `.setCookieSync(cookieOrString, currentUrl, [{options}])` Synchronous version of `setCookie`; only works with synchronous stores (e.g. the default `MemoryCookieStore`). ### `.getCookies(currentUrl, [{options},] cb(err,cookies))` Retrieve the list of cookies that can be sent in a Cookie header for the current url. If an error is encountered, that's passed as `err` to the callback, otherwise an `Array` of `Cookie` objects is passed. The array is sorted with `cookieCompare()` unless the `{sort:false}` option is given. The `options` object can be omitted and can have the following properties: * _http_ - boolean - default `true` - indicates if this is an HTTP or non-HTTP API. Affects HttpOnly cookies. * _secure_ - boolean - autodetect from url - indicates if this is a "Secure" API. If the currentUrl starts with `https:` or `wss:` then this is defaulted to `true`, otherwise `false`. * _now_ - Date - default `new Date()` - what to use for the creation/access time of cookies * _expire_ - boolean - default `true` - perform expiry-time checking of cookies and asynchronously remove expired cookies from the store. Using `false` will return expired cookies and **not** remove them from the store (which is useful for replaying Set-Cookie headers, potentially). * _allPaths_ - boolean - default `false` - if `true`, do not scope cookies by path. The default uses RFC-compliant path scoping. **Note**: may not be supported by the underlying store (the default `MemoryCookieStore` supports it). The `.lastAccessed` property of the returned cookies will have been updated. ### `.getCookiesSync(currentUrl, [{options}])` Synchronous version of `getCookies`; only works with synchronous stores (e.g. the default `MemoryCookieStore`). ### `.getCookieString(...)` Accepts the same options as `.getCookies()` but passes a string suitable for a Cookie header rather than an array to the callback. Simply maps the `Cookie` array via `.cookieString()`. ### `.getCookieStringSync(...)` Synchronous version of `getCookieString`; only works with synchronous stores (e.g. the default `MemoryCookieStore`). ### `.getSetCookieStrings(...)` Returns an array of strings suitable for **Set-Cookie** headers. Accepts the same options as `.getCookies()`. Simply maps the cookie array via `.toString()`. ### `.getSetCookieStringsSync(...)` Synchronous version of `getSetCookieStrings`; only works with synchronous stores (e.g. the default `MemoryCookieStore`). ### `.serialize(cb(err,serializedObject))` Serialize the Jar if the underlying store supports `.getAllCookies`. **NOTE**: Custom `Cookie` properties will be discarded. If you want a property to be serialized, add the property name to the `Cookie.serializableProperties` Array. See [Serialization Format]. ### `.serializeSync()` Sync version of .serialize ### `.toJSON()` Alias of .serializeSync() for the convenience of `JSON.stringify(cookiejar)`. ### `CookieJar.deserialize(serialized, [store], cb(err,object))` A new Jar is created and the serialized Cookies are added to the underlying store. Each `Cookie` is added via `store.putCookie` in the order in which they appear in the serialization. The `store` argument is optional, but should be an instance of `Store`. By default, a new instance of `MemoryCookieStore` is created. As a convenience, if `serialized` is a string, it is passed through `JSON.parse` first. If that throws an error, this is passed to the callback. ### `CookieJar.deserializeSync(serialized, [store])` Sync version of `.deserialize`. _Note_ that the `store` must be synchronous for this to work. ### `CookieJar.fromJSON(string)` Alias of `.deserializeSync` to provide consistency with `Cookie.fromJSON()`. ### `.clone([store,]cb(err,newJar))` Produces a deep clone of this jar. Modifications to the original won't affect the clone, and vice versa. The `store` argument is optional, but should be an instance of `Store`. By default, a new instance of `MemoryCookieStore` is created. Transferring between store types is supported so long as the source implements `.getAllCookies()` and the destination implements `.putCookie()`. ### `.cloneSync([store])` Synchronous version of `.clone`, returning a new `CookieJar` instance. The `store` argument is optional, but must be a _synchronous_ `Store` instance if specified. If not passed, a new instance of `MemoryCookieStore` is used. The _source_ and _destination_ must both be synchronous `Store`s. If one or both stores are asynchronous, use `.clone` instead. Recall that `MemoryCookieStore` supports both synchronous and asynchronous API calls. ### `.removeAllCookies(cb(err))` Removes all cookies from the jar. This is a new backwards-compatible feature of `tough-cookie` version 2.5, so not all Stores will implement it efficiently. For Stores that do not implement `removeAllCookies`, the fallback is to call `removeCookie` after `getAllCookies`. If `getAllCookies` fails or isn't implemented in the Store, that error is returned. If one or more of the `removeCookie` calls fail, only the first error is returned. ### `.removeAllCookiesSync()` Sync version of `.removeAllCookies()` ## Store Base class for CookieJar stores. Available as `tough.Store`. ## Store API The storage model for each `CookieJar` instance can be replaced with a custom implementation. The default is `MemoryCookieStore` which can be found in the `lib/memstore.js` file. The API uses continuation-passing-style to allow for asynchronous stores. Stores should inherit from the base `Store` class, which is available as `require('tough-cookie').Store`. Stores are asynchronous by default, but if `store.synchronous` is set to `true`, then the `*Sync` methods on the of the containing `CookieJar` can be used (however, the continuation-passing style All `domain` parameters will have been normalized before calling. The Cookie store must have all of the following methods. ### `store.findCookie(domain, path, key, cb(err,cookie))` Retrieve a cookie with the given domain, path and key (a.k.a. name). The RFC maintains that exactly one of these cookies should exist in a store. If the store is using versioning, this means that the latest/newest such cookie should be returned. Callback takes an error and the resulting `Cookie` object. If no cookie is found then `null` MUST be passed instead (i.e. not an error). ### `store.findCookies(domain, path, cb(err,cookies))` Locates cookies matching the given domain and path. This is most often called in the context of `cookiejar.getCookies()` above. If no cookies are found, the callback MUST be passed an empty array. The resulting list will be checked for applicability to the current request according to the RFC (domain-match, path-match, http-only-flag, secure-flag, expiry, etc.), so it's OK to use an optimistic search algorithm when implementing this method. However, the search algorithm used SHOULD try to find cookies that `domainMatch()` the domain and `pathMatch()` the path in order to limit the amount of checking that needs to be done. As of version 0.9.12, the `allPaths` option to `cookiejar.getCookies()` above will cause the path here to be `null`. If the path is `null`, path-matching MUST NOT be performed (i.e. domain-matching only). ### `store.putCookie(cookie, cb(err))` Adds a new cookie to the store. The implementation SHOULD replace any existing cookie with the same `.domain`, `.path`, and `.key` properties -- depending on the nature of the implementation, it's possible that between the call to `fetchCookie` and `putCookie` that a duplicate `putCookie` can occur. The `cookie` object MUST NOT be modified; the caller will have already updated the `.creation` and `.lastAccessed` properties. Pass an error if the cookie cannot be stored. ### `store.updateCookie(oldCookie, newCookie, cb(err))` Update an existing cookie. The implementation MUST update the `.value` for a cookie with the same `domain`, `.path` and `.key`. The implementation SHOULD check that the old value in the store is equivalent to `oldCookie` - how the conflict is resolved is up to the store. The `.lastAccessed` property will always be different between the two objects (to the precision possible via JavaScript's clock). Both `.creation` and `.creationIndex` are guaranteed to be the same. Stores MAY ignore or defer the `.lastAccessed` change at the cost of affecting how cookies are selected for automatic deletion (e.g., least-recently-used, which is up to the store to implement). Stores may wish to optimize changing the `.value` of the cookie in the store versus storing a new cookie. If the implementation doesn't define this method a stub that calls `putCookie(newCookie,cb)` will be added to the store object. The `newCookie` and `oldCookie` objects MUST NOT be modified. Pass an error if the newCookie cannot be stored. ### `store.removeCookie(domain, path, key, cb(err))` Remove a cookie from the store (see notes on `findCookie` about the uniqueness constraint). The implementation MUST NOT pass an error if the cookie doesn't exist; only pass an error due to the failure to remove an existing cookie. ### `store.removeCookies(domain, path, cb(err))` Removes matching cookies from the store. The `path` parameter is optional, and if missing means all paths in a domain should be removed. Pass an error ONLY if removing any existing cookies failed. ### `store.removeAllCookies(cb(err))` _Optional_. Removes all cookies from the store. Pass an error if one or more cookies can't be removed. **Note**: New method as of `tough-cookie` version 2.5, so not all Stores will implement this, plus some stores may choose not to implement this. ### `store.getAllCookies(cb(err, cookies))` _Optional_. Produces an `Array` of all cookies during `jar.serialize()`. The items in the array can be true `Cookie` objects or generic `Object`s with the [Serialization Format] data structure. Cookies SHOULD be returned in creation order to preserve sorting via `compareCookies()`. For reference, `MemoryCookieStore` will sort by `.creationIndex` since it uses true `Cookie` objects internally. If you don't return the cookies in creation order, they'll still be sorted by creation time, but this only has a precision of 1ms. See `compareCookies` for more detail. Pass an error if retrieval fails. **Note**: not all Stores can implement this due to technical limitations, so it is optional. ## MemoryCookieStore Inherits from `Store`. A just-in-memory CookieJar synchronous store implementation, used by default. Despite being a synchronous implementation, it's usable with both the synchronous and asynchronous forms of the `CookieJar` API. Supports serialization, `getAllCookies`, and `removeAllCookies`. ## Community Cookie Stores These are some Store implementations authored and maintained by the community. They aren't official and we don't vouch for them but you may be interested to have a look: - [`db-cookie-store`](https://github.com/JSBizon/db-cookie-store): SQL including SQLite-based databases - [`file-cookie-store`](https://github.com/JSBizon/file-cookie-store): Netscape cookie file format on disk - [`redis-cookie-store`](https://github.com/benkroeger/redis-cookie-store): Redis - [`tough-cookie-filestore`](https://github.com/mitsuru/tough-cookie-filestore): JSON on disk - [`tough-cookie-web-storage-store`](https://github.com/exponentjs/tough-cookie-web-storage-store): DOM localStorage and sessionStorage # Serialization Format **NOTE**: if you want to have custom `Cookie` properties serialized, add the property name to `Cookie.serializableProperties`. ```js { // The version of tough-cookie that serialized this jar. version: '[email protected]', // add the store type, to make humans happy: storeType: 'MemoryCookieStore', // CookieJar configuration: rejectPublicSuffixes: true, // ... future items go here // Gets filled from jar.store.getAllCookies(): cookies: [ { key: 'string', value: 'string', // ... /* other Cookie.serializableProperties go here */ } ] } ``` # Copyright and License BSD-3-Clause: ```text Copyright (c) 2015, Salesforce.com, Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of Salesforce.com nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ```
zhihu-crawler
/zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/tough-cookie/README.md
README.md
'use strict'; var Store = require('tough-cookie/lib/store').Store; var permuteDomain = require('tough-cookie/lib/permuteDomain').permuteDomain; var pathMatch = require('tough-cookie/lib/pathMatch').pathMatch; var util = require('util'); function MemoryCookieStore() { Store.call(this); this.idx = {}; } util.inherits(MemoryCookieStore, Store); exports.MemoryCookieStore = MemoryCookieStore; MemoryCookieStore.prototype.idx = null; // Since it's just a struct in RAM, this Store is synchronous MemoryCookieStore.prototype.synchronous = true; // force a default depth: MemoryCookieStore.prototype.inspect = function() { return "{ idx: "+util.inspect(this.idx, false, 2)+' }'; }; // Use the new custom inspection symbol to add the custom inspect function if // available. if (util.inspect.custom) { MemoryCookieStore.prototype[util.inspect.custom] = MemoryCookieStore.prototype.inspect; } MemoryCookieStore.prototype.findCookie = function(domain, path, key, cb) { if (!this.idx[domain]) { return cb(null,undefined); } if (!this.idx[domain][path]) { return cb(null,undefined); } return cb(null,this.idx[domain][path][key]||null); }; MemoryCookieStore.prototype.findCookies = function(domain, path, cb) { var results = []; if (!domain) { return cb(null,[]); } var pathMatcher; if (!path) { // null means "all paths" pathMatcher = function matchAll(domainIndex) { for (var curPath in domainIndex) { var pathIndex = domainIndex[curPath]; for (var key in pathIndex) { results.push(pathIndex[key]); } } }; } else { pathMatcher = function matchRFC(domainIndex) { //NOTE: we should use path-match algorithm from S5.1.4 here //(see : https://github.com/ChromiumWebApps/chromium/blob/b3d3b4da8bb94c1b2e061600df106d590fda3620/net/cookies/canonical_cookie.cc#L299) Object.keys(domainIndex).forEach(function (cookiePath) { if (pathMatch(path, cookiePath)) { var pathIndex = domainIndex[cookiePath]; for (var key in pathIndex) { results.push(pathIndex[key]); } } }); }; } var domains = permuteDomain(domain) || [domain]; var idx = this.idx; domains.forEach(function(curDomain) { var domainIndex = idx[curDomain]; if (!domainIndex) { return; } pathMatcher(domainIndex); }); cb(null,results); }; MemoryCookieStore.prototype.putCookie = function(cookie, cb) { if (!this.idx[cookie.domain]) { this.idx[cookie.domain] = {}; } if (!this.idx[cookie.domain][cookie.path]) { this.idx[cookie.domain][cookie.path] = {}; } this.idx[cookie.domain][cookie.path][cookie.key] = cookie; cb(null); }; MemoryCookieStore.prototype.updateCookie = function(oldCookie, newCookie, cb) { // updateCookie() may avoid updating cookies that are identical. For example, // lastAccessed may not be important to some stores and an equality // comparison could exclude that field. this.putCookie(newCookie,cb); }; MemoryCookieStore.prototype.removeCookie = function(domain, path, key, cb) { if (this.idx[domain] && this.idx[domain][path] && this.idx[domain][path][key]) { delete this.idx[domain][path][key]; } cb(null); }; MemoryCookieStore.prototype.removeCookies = function(domain, path, cb) { if (this.idx[domain]) { if (path) { delete this.idx[domain][path]; } else { delete this.idx[domain]; } } return cb(null); }; MemoryCookieStore.prototype.removeAllCookies = function(cb) { this.idx = {}; return cb(null); } MemoryCookieStore.prototype.getAllCookies = function(cb) { var cookies = []; var idx = this.idx; var domains = Object.keys(idx); domains.forEach(function(domain) { var paths = Object.keys(idx[domain]); paths.forEach(function(path) { var keys = Object.keys(idx[domain][path]); keys.forEach(function(key) { if (key !== null) { cookies.push(idx[domain][path][key]); } }); }); }); // Sort by creationIndex so deserializing retains the creation order. // When implementing your own store, this SHOULD retain the order too cookies.sort(function(a,b) { return (a.creationIndex||0) - (b.creationIndex||0); }); cb(null, cookies); };
zhihu-crawler
/zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/tough-cookie/lib/memstore.js
memstore.js
'use strict'; var urlParse = require('url').parse; var util = require('util'); var ipRegex = require('ip-regex')({ exact: true }); var pubsuffix = require('tough-cookie/lib/pubsuffix-psl'); var Store = require('tough-cookie/lib/store').Store; var MemoryCookieStore = require('tough-cookie/lib/memstore').MemoryCookieStore; var pathMatch = require('tough-cookie/lib/pathMatch').pathMatch; var VERSION = require('tough-cookie/lib/version'); var punycode; try { punycode = require('punycode'); } catch(e) { console.warn("tough-cookie: can't load punycode; won't use punycode for domain normalization"); } // From RFC6265 S4.1.1 // note that it excludes \x3B ";" var COOKIE_OCTETS = /^[\x21\x23-\x2B\x2D-\x3A\x3C-\x5B\x5D-\x7E]+$/; var CONTROL_CHARS = /[\x00-\x1F]/; // From Chromium // '\r', '\n' and '\0' should be treated as a terminator in // the "relaxed" mode, see: // https://github.com/ChromiumWebApps/chromium/blob/b3d3b4da8bb94c1b2e061600df106d590fda3620/net/cookies/parsed_cookie.cc#L60 var TERMINATORS = ['\n', '\r', '\0']; // RFC6265 S4.1.1 defines path value as 'any CHAR except CTLs or ";"' // Note ';' is \x3B var PATH_VALUE = /[\x20-\x3A\x3C-\x7E]+/; // date-time parsing constants (RFC6265 S5.1.1) var DATE_DELIM = /[\x09\x20-\x2F\x3B-\x40\x5B-\x60\x7B-\x7E]/; var MONTH_TO_NUM = { jan:0, feb:1, mar:2, apr:3, may:4, jun:5, jul:6, aug:7, sep:8, oct:9, nov:10, dec:11 }; var NUM_TO_MONTH = [ 'Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec' ]; var NUM_TO_DAY = [ 'Sun','Mon','Tue','Wed','Thu','Fri','Sat' ]; var MAX_TIME = 2147483647000; // 31-bit max var MIN_TIME = 0; // 31-bit min /* * Parses a Natural number (i.e., non-negative integer) with either the * <min>*<max>DIGIT ( non-digit *OCTET ) * or * <min>*<max>DIGIT * grammar (RFC6265 S5.1.1). * * The "trailingOK" boolean controls if the grammar accepts a * "( non-digit *OCTET )" trailer. */ function parseDigits(token, minDigits, maxDigits, trailingOK) { var count = 0; while (count < token.length) { var c = token.charCodeAt(count); // "non-digit = %x00-2F / %x3A-FF" if (c <= 0x2F || c >= 0x3A) { break; } count++; } // constrain to a minimum and maximum number of digits. if (count < minDigits || count > maxDigits) { return null; } if (!trailingOK && count != token.length) { return null; } return parseInt(token.substr(0,count), 10); } function parseTime(token) { var parts = token.split(':'); var result = [0,0,0]; /* RF6256 S5.1.1: * time = hms-time ( non-digit *OCTET ) * hms-time = time-field ":" time-field ":" time-field * time-field = 1*2DIGIT */ if (parts.length !== 3) { return null; } for (var i = 0; i < 3; i++) { // "time-field" must be strictly "1*2DIGIT", HOWEVER, "hms-time" can be // followed by "( non-digit *OCTET )" so therefore the last time-field can // have a trailer var trailingOK = (i == 2); var num = parseDigits(parts[i], 1, 2, trailingOK); if (num === null) { return null; } result[i] = num; } return result; } function parseMonth(token) { token = String(token).substr(0,3).toLowerCase(); var num = MONTH_TO_NUM[token]; return num >= 0 ? num : null; } /* * RFC6265 S5.1.1 date parser (see RFC for full grammar) */ function parseDate(str) { if (!str) { return; } /* RFC6265 S5.1.1: * 2. Process each date-token sequentially in the order the date-tokens * appear in the cookie-date */ var tokens = str.split(DATE_DELIM); if (!tokens) { return; } var hour = null; var minute = null; var second = null; var dayOfMonth = null; var month = null; var year = null; for (var i=0; i<tokens.length; i++) { var token = tokens[i].trim(); if (!token.length) { continue; } var result; /* 2.1. If the found-time flag is not set and the token matches the time * production, set the found-time flag and set the hour- value, * minute-value, and second-value to the numbers denoted by the digits in * the date-token, respectively. Skip the remaining sub-steps and continue * to the next date-token. */ if (second === null) { result = parseTime(token); if (result) { hour = result[0]; minute = result[1]; second = result[2]; continue; } } /* 2.2. If the found-day-of-month flag is not set and the date-token matches * the day-of-month production, set the found-day-of- month flag and set * the day-of-month-value to the number denoted by the date-token. Skip * the remaining sub-steps and continue to the next date-token. */ if (dayOfMonth === null) { // "day-of-month = 1*2DIGIT ( non-digit *OCTET )" result = parseDigits(token, 1, 2, true); if (result !== null) { dayOfMonth = result; continue; } } /* 2.3. If the found-month flag is not set and the date-token matches the * month production, set the found-month flag and set the month-value to * the month denoted by the date-token. Skip the remaining sub-steps and * continue to the next date-token. */ if (month === null) { result = parseMonth(token); if (result !== null) { month = result; continue; } } /* 2.4. If the found-year flag is not set and the date-token matches the * year production, set the found-year flag and set the year-value to the * number denoted by the date-token. Skip the remaining sub-steps and * continue to the next date-token. */ if (year === null) { // "year = 2*4DIGIT ( non-digit *OCTET )" result = parseDigits(token, 2, 4, true); if (result !== null) { year = result; /* From S5.1.1: * 3. If the year-value is greater than or equal to 70 and less * than or equal to 99, increment the year-value by 1900. * 4. If the year-value is greater than or equal to 0 and less * than or equal to 69, increment the year-value by 2000. */ if (year >= 70 && year <= 99) { year += 1900; } else if (year >= 0 && year <= 69) { year += 2000; } } } } /* RFC 6265 S5.1.1 * "5. Abort these steps and fail to parse the cookie-date if: * * at least one of the found-day-of-month, found-month, found- * year, or found-time flags is not set, * * the day-of-month-value is less than 1 or greater than 31, * * the year-value is less than 1601, * * the hour-value is greater than 23, * * the minute-value is greater than 59, or * * the second-value is greater than 59. * (Note that leap seconds cannot be represented in this syntax.)" * * So, in order as above: */ if ( dayOfMonth === null || month === null || year === null || second === null || dayOfMonth < 1 || dayOfMonth > 31 || year < 1601 || hour > 23 || minute > 59 || second > 59 ) { return; } return new Date(Date.UTC(year, month, dayOfMonth, hour, minute, second)); } function formatDate(date) { var d = date.getUTCDate(); d = d >= 10 ? d : '0'+d; var h = date.getUTCHours(); h = h >= 10 ? h : '0'+h; var m = date.getUTCMinutes(); m = m >= 10 ? m : '0'+m; var s = date.getUTCSeconds(); s = s >= 10 ? s : '0'+s; return NUM_TO_DAY[date.getUTCDay()] + ', ' + d+' '+ NUM_TO_MONTH[date.getUTCMonth()] +' '+ date.getUTCFullYear() +' '+ h+':'+m+':'+s+' GMT'; } // S5.1.2 Canonicalized Host Names function canonicalDomain(str) { if (str == null) { return null; } str = str.trim().replace(/^\./,''); // S4.1.2.3 & S5.2.3: ignore leading . // convert to IDN if any non-ASCII characters if (punycode && /[^\u0001-\u007f]/.test(str)) { str = punycode.toASCII(str); } return str.toLowerCase(); } // S5.1.3 Domain Matching function domainMatch(str, domStr, canonicalize) { if (str == null || domStr == null) { return null; } if (canonicalize !== false) { str = canonicalDomain(str); domStr = canonicalDomain(domStr); } /* * "The domain string and the string are identical. (Note that both the * domain string and the string will have been canonicalized to lower case at * this point)" */ if (str == domStr) { return true; } /* "All of the following [three] conditions hold:" (order adjusted from the RFC) */ /* "* The string is a host name (i.e., not an IP address)." */ if (ipRegex.test(str)) { return false; } /* "* The domain string is a suffix of the string" */ var idx = str.indexOf(domStr); if (idx <= 0) { return false; // it's a non-match (-1) or prefix (0) } // e.g "a.b.c".indexOf("b.c") === 2 // 5 === 3+2 if (str.length !== domStr.length + idx) { // it's not a suffix return false; } /* "* The last character of the string that is not included in the domain * string is a %x2E (".") character." */ if (str.substr(idx-1,1) !== '.') { return false; } return true; } // RFC6265 S5.1.4 Paths and Path-Match /* * "The user agent MUST use an algorithm equivalent to the following algorithm * to compute the default-path of a cookie:" * * Assumption: the path (and not query part or absolute uri) is passed in. */ function defaultPath(path) { // "2. If the uri-path is empty or if the first character of the uri-path is not // a %x2F ("/") character, output %x2F ("/") and skip the remaining steps. if (!path || path.substr(0,1) !== "/") { return "/"; } // "3. If the uri-path contains no more than one %x2F ("/") character, output // %x2F ("/") and skip the remaining step." if (path === "/") { return path; } var rightSlash = path.lastIndexOf("/"); if (rightSlash === 0) { return "/"; } // "4. Output the characters of the uri-path from the first character up to, // but not including, the right-most %x2F ("/")." return path.slice(0, rightSlash); } function trimTerminator(str) { for (var t = 0; t < TERMINATORS.length; t++) { var terminatorIdx = str.indexOf(TERMINATORS[t]); if (terminatorIdx !== -1) { str = str.substr(0,terminatorIdx); } } return str; } function parseCookiePair(cookiePair, looseMode) { cookiePair = trimTerminator(cookiePair); var firstEq = cookiePair.indexOf('='); if (looseMode) { if (firstEq === 0) { // '=' is immediately at start cookiePair = cookiePair.substr(1); firstEq = cookiePair.indexOf('='); // might still need to split on '=' } } else { // non-loose mode if (firstEq <= 0) { // no '=' or is at start return; // needs to have non-empty "cookie-name" } } var cookieName, cookieValue; if (firstEq <= 0) { cookieName = ""; cookieValue = cookiePair.trim(); } else { cookieName = cookiePair.substr(0, firstEq).trim(); cookieValue = cookiePair.substr(firstEq+1).trim(); } if (CONTROL_CHARS.test(cookieName) || CONTROL_CHARS.test(cookieValue)) { return; } var c = new Cookie(); c.key = cookieName; c.value = cookieValue; return c; } function parse(str, options) { if (!options || typeof options !== 'object') { options = {}; } str = str.trim(); // We use a regex to parse the "name-value-pair" part of S5.2 var firstSemi = str.indexOf(';'); // S5.2 step 1 var cookiePair = (firstSemi === -1) ? str : str.substr(0, firstSemi); var c = parseCookiePair(cookiePair, !!options.loose); if (!c) { return; } if (firstSemi === -1) { return c; } // S5.2.3 "unparsed-attributes consist of the remainder of the set-cookie-string // (including the %x3B (";") in question)." plus later on in the same section // "discard the first ";" and trim". var unparsed = str.slice(firstSemi + 1).trim(); // "If the unparsed-attributes string is empty, skip the rest of these // steps." if (unparsed.length === 0) { return c; } /* * S5.2 says that when looping over the items "[p]rocess the attribute-name * and attribute-value according to the requirements in the following * subsections" for every item. Plus, for many of the individual attributes * in S5.3 it says to use the "attribute-value of the last attribute in the * cookie-attribute-list". Therefore, in this implementation, we overwrite * the previous value. */ var cookie_avs = unparsed.split(';'); while (cookie_avs.length) { var av = cookie_avs.shift().trim(); if (av.length === 0) { // happens if ";;" appears continue; } var av_sep = av.indexOf('='); var av_key, av_value; if (av_sep === -1) { av_key = av; av_value = null; } else { av_key = av.substr(0,av_sep); av_value = av.substr(av_sep+1); } av_key = av_key.trim().toLowerCase(); if (av_value) { av_value = av_value.trim(); } switch(av_key) { case 'expires': // S5.2.1 if (av_value) { var exp = parseDate(av_value); // "If the attribute-value failed to parse as a cookie date, ignore the // cookie-av." if (exp) { // over and underflow not realistically a concern: V8's getTime() seems to // store something larger than a 32-bit time_t (even with 32-bit node) c.expires = exp; } } break; case 'max-age': // S5.2.2 if (av_value) { // "If the first character of the attribute-value is not a DIGIT or a "-" // character ...[or]... If the remainder of attribute-value contains a // non-DIGIT character, ignore the cookie-av." if (/^-?[0-9]+$/.test(av_value)) { var delta = parseInt(av_value, 10); // "If delta-seconds is less than or equal to zero (0), let expiry-time // be the earliest representable date and time." c.setMaxAge(delta); } } break; case 'domain': // S5.2.3 // "If the attribute-value is empty, the behavior is undefined. However, // the user agent SHOULD ignore the cookie-av entirely." if (av_value) { // S5.2.3 "Let cookie-domain be the attribute-value without the leading %x2E // (".") character." var domain = av_value.trim().replace(/^\./, ''); if (domain) { // "Convert the cookie-domain to lower case." c.domain = domain.toLowerCase(); } } break; case 'path': // S5.2.4 /* * "If the attribute-value is empty or if the first character of the * attribute-value is not %x2F ("/"): * Let cookie-path be the default-path. * Otherwise: * Let cookie-path be the attribute-value." * * We'll represent the default-path as null since it depends on the * context of the parsing. */ c.path = av_value && av_value[0] === "/" ? av_value : null; break; case 'secure': // S5.2.5 /* * "If the attribute-name case-insensitively matches the string "Secure", * the user agent MUST append an attribute to the cookie-attribute-list * with an attribute-name of Secure and an empty attribute-value." */ c.secure = true; break; case 'httponly': // S5.2.6 -- effectively the same as 'secure' c.httpOnly = true; break; default: c.extensions = c.extensions || []; c.extensions.push(av); break; } } return c; } // avoid the V8 deoptimization monster! function jsonParse(str) { var obj; try { obj = JSON.parse(str); } catch (e) { return e; } return obj; } function fromJSON(str) { if (!str) { return null; } var obj; if (typeof str === 'string') { obj = jsonParse(str); if (obj instanceof Error) { return null; } } else { // assume it's an Object obj = str; } var c = new Cookie(); for (var i=0; i<Cookie.serializableProperties.length; i++) { var prop = Cookie.serializableProperties[i]; if (obj[prop] === undefined || obj[prop] === Cookie.prototype[prop]) { continue; // leave as prototype default } if (prop === 'expires' || prop === 'creation' || prop === 'lastAccessed') { if (obj[prop] === null) { c[prop] = null; } else { c[prop] = obj[prop] == "Infinity" ? "Infinity" : new Date(obj[prop]); } } else { c[prop] = obj[prop]; } } return c; } /* Section 5.4 part 2: * "* Cookies with longer paths are listed before cookies with * shorter paths. * * * Among cookies that have equal-length path fields, cookies with * earlier creation-times are listed before cookies with later * creation-times." */ function cookieCompare(a,b) { var cmp = 0; // descending for length: b CMP a var aPathLen = a.path ? a.path.length : 0; var bPathLen = b.path ? b.path.length : 0; cmp = bPathLen - aPathLen; if (cmp !== 0) { return cmp; } // ascending for time: a CMP b var aTime = a.creation ? a.creation.getTime() : MAX_TIME; var bTime = b.creation ? b.creation.getTime() : MAX_TIME; cmp = aTime - bTime; if (cmp !== 0) { return cmp; } // break ties for the same millisecond (precision of JavaScript's clock) cmp = a.creationIndex - b.creationIndex; return cmp; } // Gives the permutation of all possible pathMatch()es of a given path. The // array is in longest-to-shortest order. Handy for indexing. function permutePath(path) { if (path === '/') { return ['/']; } if (path.lastIndexOf('/') === path.length-1) { path = path.substr(0,path.length-1); } var permutations = [path]; while (path.length > 1) { var lindex = path.lastIndexOf('/'); if (lindex === 0) { break; } path = path.substr(0,lindex); permutations.push(path); } permutations.push('/'); return permutations; } function getCookieContext(url) { if (url instanceof Object) { return url; } // NOTE: decodeURI will throw on malformed URIs (see GH-32). // Therefore, we will just skip decoding for such URIs. try { url = decodeURI(url); } catch(err) { // Silently swallow error } return urlParse(url); } function Cookie(options) { options = options || {}; Object.keys(options).forEach(function(prop) { if (Cookie.prototype.hasOwnProperty(prop) && Cookie.prototype[prop] !== options[prop] && prop.substr(0,1) !== '_') { this[prop] = options[prop]; } }, this); this.creation = this.creation || new Date(); // used to break creation ties in cookieCompare(): Object.defineProperty(this, 'creationIndex', { configurable: false, enumerable: false, // important for assert.deepEqual checks writable: true, value: ++Cookie.cookiesCreated }); } Cookie.cookiesCreated = 0; // incremented each time a cookie is created Cookie.parse = parse; Cookie.fromJSON = fromJSON; Cookie.prototype.key = ""; Cookie.prototype.value = ""; // the order in which the RFC has them: Cookie.prototype.expires = "Infinity"; // coerces to literal Infinity Cookie.prototype.maxAge = null; // takes precedence over expires for TTL Cookie.prototype.domain = null; Cookie.prototype.path = null; Cookie.prototype.secure = false; Cookie.prototype.httpOnly = false; Cookie.prototype.extensions = null; // set by the CookieJar: Cookie.prototype.hostOnly = null; // boolean when set Cookie.prototype.pathIsDefault = null; // boolean when set Cookie.prototype.creation = null; // Date when set; defaulted by Cookie.parse Cookie.prototype.lastAccessed = null; // Date when set Object.defineProperty(Cookie.prototype, 'creationIndex', { configurable: true, enumerable: false, writable: true, value: 0 }); Cookie.serializableProperties = Object.keys(Cookie.prototype) .filter(function(prop) { return !( Cookie.prototype[prop] instanceof Function || prop === 'creationIndex' || prop.substr(0,1) === '_' ); }); Cookie.prototype.inspect = function inspect() { var now = Date.now(); return 'Cookie="'+this.toString() + '; hostOnly='+(this.hostOnly != null ? this.hostOnly : '?') + '; aAge='+(this.lastAccessed ? (now-this.lastAccessed.getTime())+'ms' : '?') + '; cAge='+(this.creation ? (now-this.creation.getTime())+'ms' : '?') + '"'; }; // Use the new custom inspection symbol to add the custom inspect function if // available. if (util.inspect.custom) { Cookie.prototype[util.inspect.custom] = Cookie.prototype.inspect; } Cookie.prototype.toJSON = function() { var obj = {}; var props = Cookie.serializableProperties; for (var i=0; i<props.length; i++) { var prop = props[i]; if (this[prop] === Cookie.prototype[prop]) { continue; // leave as prototype default } if (prop === 'expires' || prop === 'creation' || prop === 'lastAccessed') { if (this[prop] === null) { obj[prop] = null; } else { obj[prop] = this[prop] == "Infinity" ? // intentionally not === "Infinity" : this[prop].toISOString(); } } else if (prop === 'maxAge') { if (this[prop] !== null) { // again, intentionally not === obj[prop] = (this[prop] == Infinity || this[prop] == -Infinity) ? this[prop].toString() : this[prop]; } } else { if (this[prop] !== Cookie.prototype[prop]) { obj[prop] = this[prop]; } } } return obj; }; Cookie.prototype.clone = function() { return fromJSON(this.toJSON()); }; Cookie.prototype.validate = function validate() { if (!COOKIE_OCTETS.test(this.value)) { return false; } if (this.expires != Infinity && !(this.expires instanceof Date) && !parseDate(this.expires)) { return false; } if (this.maxAge != null && this.maxAge <= 0) { return false; // "Max-Age=" non-zero-digit *DIGIT } if (this.path != null && !PATH_VALUE.test(this.path)) { return false; } var cdomain = this.cdomain(); if (cdomain) { if (cdomain.match(/\.$/)) { return false; // S4.1.2.3 suggests that this is bad. domainMatch() tests confirm this } var suffix = pubsuffix.getPublicSuffix(cdomain); if (suffix == null) { // it's a public suffix return false; } } return true; }; Cookie.prototype.setExpires = function setExpires(exp) { if (exp instanceof Date) { this.expires = exp; } else { this.expires = parseDate(exp) || "Infinity"; } }; Cookie.prototype.setMaxAge = function setMaxAge(age) { if (age === Infinity || age === -Infinity) { this.maxAge = age.toString(); // so JSON.stringify() works } else { this.maxAge = age; } }; // gives Cookie header format Cookie.prototype.cookieString = function cookieString() { var val = this.value; if (val == null) { val = ''; } if (this.key === '') { return val; } return this.key+'='+val; }; // gives Set-Cookie header format Cookie.prototype.toString = function toString() { var str = this.cookieString(); if (this.expires != Infinity) { if (this.expires instanceof Date) { str += '; Expires='+formatDate(this.expires); } else { str += '; Expires='+this.expires; } } if (this.maxAge != null && this.maxAge != Infinity) { str += '; Max-Age='+this.maxAge; } if (this.domain && !this.hostOnly) { str += '; Domain='+this.domain; } if (this.path) { str += '; Path='+this.path; } if (this.secure) { str += '; Secure'; } if (this.httpOnly) { str += '; HttpOnly'; } if (this.extensions) { this.extensions.forEach(function(ext) { str += '; '+ext; }); } return str; }; // TTL() partially replaces the "expiry-time" parts of S5.3 step 3 (setCookie() // elsewhere) // S5.3 says to give the "latest representable date" for which we use Infinity // For "expired" we use 0 Cookie.prototype.TTL = function TTL(now) { /* RFC6265 S4.1.2.2 If a cookie has both the Max-Age and the Expires * attribute, the Max-Age attribute has precedence and controls the * expiration date of the cookie. * (Concurs with S5.3 step 3) */ if (this.maxAge != null) { return this.maxAge<=0 ? 0 : this.maxAge*1000; } var expires = this.expires; if (expires != Infinity) { if (!(expires instanceof Date)) { expires = parseDate(expires) || Infinity; } if (expires == Infinity) { return Infinity; } return expires.getTime() - (now || Date.now()); } return Infinity; }; // expiryTime() replaces the "expiry-time" parts of S5.3 step 3 (setCookie() // elsewhere) Cookie.prototype.expiryTime = function expiryTime(now) { if (this.maxAge != null) { var relativeTo = now || this.creation || new Date(); var age = (this.maxAge <= 0) ? -Infinity : this.maxAge*1000; return relativeTo.getTime() + age; } if (this.expires == Infinity) { return Infinity; } return this.expires.getTime(); }; // expiryDate() replaces the "expiry-time" parts of S5.3 step 3 (setCookie() // elsewhere), except it returns a Date Cookie.prototype.expiryDate = function expiryDate(now) { var millisec = this.expiryTime(now); if (millisec == Infinity) { return new Date(MAX_TIME); } else if (millisec == -Infinity) { return new Date(MIN_TIME); } else { return new Date(millisec); } }; // This replaces the "persistent-flag" parts of S5.3 step 3 Cookie.prototype.isPersistent = function isPersistent() { return (this.maxAge != null || this.expires != Infinity); }; // Mostly S5.1.2 and S5.2.3: Cookie.prototype.cdomain = Cookie.prototype.canonicalizedDomain = function canonicalizedDomain() { if (this.domain == null) { return null; } return canonicalDomain(this.domain); }; function CookieJar(store, options) { if (typeof options === "boolean") { options = {rejectPublicSuffixes: options}; } else if (options == null) { options = {}; } if (options.rejectPublicSuffixes != null) { this.rejectPublicSuffixes = options.rejectPublicSuffixes; } if (options.looseMode != null) { this.enableLooseMode = options.looseMode; } if (!store) { store = new MemoryCookieStore(); } this.store = store; } CookieJar.prototype.store = null; CookieJar.prototype.rejectPublicSuffixes = true; CookieJar.prototype.enableLooseMode = false; var CAN_BE_SYNC = []; CAN_BE_SYNC.push('setCookie'); CookieJar.prototype.setCookie = function(cookie, url, options, cb) { var err; var context = getCookieContext(url); if (options instanceof Function) { cb = options; options = {}; } var host = canonicalDomain(context.hostname); var loose = this.enableLooseMode; if (options.loose != null) { loose = options.loose; } // S5.3 step 1 if (typeof(cookie) === 'string' || cookie instanceof String) { cookie = Cookie.parse(cookie, { loose: loose }); if (!cookie) { err = new Error("Cookie failed to parse"); return cb(options.ignoreError ? null : err); } } else if (!(cookie instanceof Cookie)) { // If you're seeing this error, and are passing in a Cookie object, // it *might* be a Cookie object from another loaded version of tough-cookie. err = new Error("First argument to setCookie must be a Cookie object or string"); return cb(options.ignoreError ? null : err); } // S5.3 step 2 var now = options.now || new Date(); // will assign later to save effort in the face of errors // S5.3 step 3: NOOP; persistent-flag and expiry-time is handled by getCookie() // S5.3 step 4: NOOP; domain is null by default // S5.3 step 5: public suffixes if (this.rejectPublicSuffixes && cookie.domain) { var suffix = pubsuffix.getPublicSuffix(cookie.cdomain()); if (suffix == null) { // e.g. "com" err = new Error("Cookie has domain set to a public suffix"); return cb(options.ignoreError ? null : err); } } // S5.3 step 6: if (cookie.domain) { if (!domainMatch(host, cookie.cdomain(), false)) { err = new Error("Cookie not in this host's domain. Cookie:"+cookie.cdomain()+" Request:"+host); return cb(options.ignoreError ? null : err); } if (cookie.hostOnly == null) { // don't reset if already set cookie.hostOnly = false; } } else { cookie.hostOnly = true; cookie.domain = host; } //S5.2.4 If the attribute-value is empty or if the first character of the //attribute-value is not %x2F ("/"): //Let cookie-path be the default-path. if (!cookie.path || cookie.path[0] !== '/') { cookie.path = defaultPath(context.pathname); cookie.pathIsDefault = true; } // S5.3 step 8: NOOP; secure attribute // S5.3 step 9: NOOP; httpOnly attribute // S5.3 step 10 if (options.http === false && cookie.httpOnly) { err = new Error("Cookie is HttpOnly and this isn't an HTTP API"); return cb(options.ignoreError ? null : err); } var store = this.store; if (!store.updateCookie) { store.updateCookie = function(oldCookie, newCookie, cb) { this.putCookie(newCookie, cb); }; } function withCookie(err, oldCookie) { if (err) { return cb(err); } var next = function(err) { if (err) { return cb(err); } else { cb(null, cookie); } }; if (oldCookie) { // S5.3 step 11 - "If the cookie store contains a cookie with the same name, // domain, and path as the newly created cookie:" if (options.http === false && oldCookie.httpOnly) { // step 11.2 err = new Error("old Cookie is HttpOnly and this isn't an HTTP API"); return cb(options.ignoreError ? null : err); } cookie.creation = oldCookie.creation; // step 11.3 cookie.creationIndex = oldCookie.creationIndex; // preserve tie-breaker cookie.lastAccessed = now; // Step 11.4 (delete cookie) is implied by just setting the new one: store.updateCookie(oldCookie, cookie, next); // step 12 } else { cookie.creation = cookie.lastAccessed = now; store.putCookie(cookie, next); // step 12 } } store.findCookie(cookie.domain, cookie.path, cookie.key, withCookie); }; // RFC6365 S5.4 CAN_BE_SYNC.push('getCookies'); CookieJar.prototype.getCookies = function(url, options, cb) { var context = getCookieContext(url); if (options instanceof Function) { cb = options; options = {}; } var host = canonicalDomain(context.hostname); var path = context.pathname || '/'; var secure = options.secure; if (secure == null && context.protocol && (context.protocol == 'https:' || context.protocol == 'wss:')) { secure = true; } var http = options.http; if (http == null) { http = true; } var now = options.now || Date.now(); var expireCheck = options.expire !== false; var allPaths = !!options.allPaths; var store = this.store; function matchingCookie(c) { // "Either: // The cookie's host-only-flag is true and the canonicalized // request-host is identical to the cookie's domain. // Or: // The cookie's host-only-flag is false and the canonicalized // request-host domain-matches the cookie's domain." if (c.hostOnly) { if (c.domain != host) { return false; } } else { if (!domainMatch(host, c.domain, false)) { return false; } } // "The request-uri's path path-matches the cookie's path." if (!allPaths && !pathMatch(path, c.path)) { return false; } // "If the cookie's secure-only-flag is true, then the request-uri's // scheme must denote a "secure" protocol" if (c.secure && !secure) { return false; } // "If the cookie's http-only-flag is true, then exclude the cookie if the // cookie-string is being generated for a "non-HTTP" API" if (c.httpOnly && !http) { return false; } // deferred from S5.3 // non-RFC: allow retention of expired cookies by choice if (expireCheck && c.expiryTime() <= now) { store.removeCookie(c.domain, c.path, c.key, function(){}); // result ignored return false; } return true; } store.findCookies(host, allPaths ? null : path, function(err,cookies) { if (err) { return cb(err); } cookies = cookies.filter(matchingCookie); // sorting of S5.4 part 2 if (options.sort !== false) { cookies = cookies.sort(cookieCompare); } // S5.4 part 3 var now = new Date(); cookies.forEach(function(c) { c.lastAccessed = now; }); // TODO persist lastAccessed cb(null,cookies); }); }; CAN_BE_SYNC.push('getCookieString'); CookieJar.prototype.getCookieString = function(/*..., cb*/) { var args = Array.prototype.slice.call(arguments,0); var cb = args.pop(); var next = function(err,cookies) { if (err) { cb(err); } else { cb(null, cookies .sort(cookieCompare) .map(function(c){ return c.cookieString(); }) .join('; ')); } }; args.push(next); this.getCookies.apply(this,args); }; CAN_BE_SYNC.push('getSetCookieStrings'); CookieJar.prototype.getSetCookieStrings = function(/*..., cb*/) { var args = Array.prototype.slice.call(arguments,0); var cb = args.pop(); var next = function(err,cookies) { if (err) { cb(err); } else { cb(null, cookies.map(function(c){ return c.toString(); })); } }; args.push(next); this.getCookies.apply(this,args); }; CAN_BE_SYNC.push('serialize'); CookieJar.prototype.serialize = function(cb) { var type = this.store.constructor.name; if (type === 'Object') { type = null; } // update README.md "Serialization Format" if you change this, please! var serialized = { // The version of tough-cookie that serialized this jar. Generally a good // practice since future versions can make data import decisions based on // known past behavior. When/if this matters, use `semver`. version: 'tough-cookie@'+VERSION, // add the store type, to make humans happy: storeType: type, // CookieJar configuration: rejectPublicSuffixes: !!this.rejectPublicSuffixes, // this gets filled from getAllCookies: cookies: [] }; if (!(this.store.getAllCookies && typeof this.store.getAllCookies === 'function')) { return cb(new Error('store does not support getAllCookies and cannot be serialized')); } this.store.getAllCookies(function(err,cookies) { if (err) { return cb(err); } serialized.cookies = cookies.map(function(cookie) { // convert to serialized 'raw' cookies cookie = (cookie instanceof Cookie) ? cookie.toJSON() : cookie; // Remove the index so new ones get assigned during deserialization delete cookie.creationIndex; return cookie; }); return cb(null, serialized); }); }; // well-known name that JSON.stringify calls CookieJar.prototype.toJSON = function() { return this.serializeSync(); }; // use the class method CookieJar.deserialize instead of calling this directly CAN_BE_SYNC.push('_importCookies'); CookieJar.prototype._importCookies = function(serialized, cb) { var jar = this; var cookies = serialized.cookies; if (!cookies || !Array.isArray(cookies)) { return cb(new Error('serialized jar has no cookies array')); } cookies = cookies.slice(); // do not modify the original function putNext(err) { if (err) { return cb(err); } if (!cookies.length) { return cb(err, jar); } var cookie; try { cookie = fromJSON(cookies.shift()); } catch (e) { return cb(e); } if (cookie === null) { return putNext(null); // skip this cookie } jar.store.putCookie(cookie, putNext); } putNext(); }; CookieJar.deserialize = function(strOrObj, store, cb) { if (arguments.length !== 3) { // store is optional cb = store; store = null; } var serialized; if (typeof strOrObj === 'string') { serialized = jsonParse(strOrObj); if (serialized instanceof Error) { return cb(serialized); } } else { serialized = strOrObj; } var jar = new CookieJar(store, serialized.rejectPublicSuffixes); jar._importCookies(serialized, function(err) { if (err) { return cb(err); } cb(null, jar); }); }; CookieJar.deserializeSync = function(strOrObj, store) { var serialized = typeof strOrObj === 'string' ? JSON.parse(strOrObj) : strOrObj; var jar = new CookieJar(store, serialized.rejectPublicSuffixes); // catch this mistake early: if (!jar.store.synchronous) { throw new Error('CookieJar store is not synchronous; use async API instead.'); } jar._importCookiesSync(serialized); return jar; }; CookieJar.fromJSON = CookieJar.deserializeSync; CookieJar.prototype.clone = function(newStore, cb) { if (arguments.length === 1) { cb = newStore; newStore = null; } this.serialize(function(err,serialized) { if (err) { return cb(err); } CookieJar.deserialize(serialized, newStore, cb); }); }; CAN_BE_SYNC.push('removeAllCookies'); CookieJar.prototype.removeAllCookies = function(cb) { var store = this.store; // Check that the store implements its own removeAllCookies(). The default // implementation in Store will immediately call the callback with a "not // implemented" Error. if (store.removeAllCookies instanceof Function && store.removeAllCookies !== Store.prototype.removeAllCookies) { return store.removeAllCookies(cb); } store.getAllCookies(function(err, cookies) { if (err) { return cb(err); } if (cookies.length === 0) { return cb(null); } var completedCount = 0; var removeErrors = []; function removeCookieCb(removeErr) { if (removeErr) { removeErrors.push(removeErr); } completedCount++; if (completedCount === cookies.length) { return cb(removeErrors.length ? removeErrors[0] : null); } } cookies.forEach(function(cookie) { store.removeCookie(cookie.domain, cookie.path, cookie.key, removeCookieCb); }); }); }; CookieJar.prototype._cloneSync = syncWrap('clone'); CookieJar.prototype.cloneSync = function(newStore) { if (!newStore.synchronous) { throw new Error('CookieJar clone destination store is not synchronous; use async API instead.'); } return this._cloneSync(newStore); }; // Use a closure to provide a true imperative API for synchronous stores. function syncWrap(method) { return function() { if (!this.store.synchronous) { throw new Error('CookieJar store is not synchronous; use async API instead.'); } var args = Array.prototype.slice.call(arguments); var syncErr, syncResult; args.push(function syncCb(err, result) { syncErr = err; syncResult = result; }); this[method].apply(this, args); if (syncErr) { throw syncErr; } return syncResult; }; } // wrap all declared CAN_BE_SYNC methods in the sync wrapper CAN_BE_SYNC.forEach(function(method) { CookieJar.prototype[method+'Sync'] = syncWrap(method); }); exports.version = VERSION; exports.CookieJar = CookieJar; exports.Cookie = Cookie; exports.Store = Store; exports.MemoryCookieStore = MemoryCookieStore; exports.parseDate = parseDate; exports.formatDate = formatDate; exports.parse = parse; exports.fromJSON = fromJSON; exports.domainMatch = domainMatch; exports.defaultPath = defaultPath; exports.pathMatch = pathMatch; exports.getPublicSuffix = pubsuffix.getPublicSuffix; exports.cookieCompare = cookieCompare; exports.permuteDomain = require('tough-cookie/lib/permuteDomain').permuteDomain; exports.permutePath = permutePath; exports.canonicalDomain = canonicalDomain;
zhihu-crawler
/zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/tough-cookie/lib/cookie.js
cookie.js
export interface StartOfSourceMap { file?: string; sourceRoot?: string; } export interface RawSourceMap extends StartOfSourceMap { version: string; sources: string[]; names: string[]; sourcesContent?: string[]; mappings: string; } export interface Position { line: number; column: number; } export interface LineRange extends Position { lastColumn: number; } export interface FindPosition extends Position { // SourceMapConsumer.GREATEST_LOWER_BOUND or SourceMapConsumer.LEAST_UPPER_BOUND bias?: number; } export interface SourceFindPosition extends FindPosition { source: string; } export interface MappedPosition extends Position { source: string; name?: string; } export interface MappingItem { source: string; generatedLine: number; generatedColumn: number; originalLine: number; originalColumn: number; name: string; } export class SourceMapConsumer { static GENERATED_ORDER: number; static ORIGINAL_ORDER: number; static GREATEST_LOWER_BOUND: number; static LEAST_UPPER_BOUND: number; constructor(rawSourceMap: RawSourceMap); computeColumnSpans(): void; originalPositionFor(generatedPosition: FindPosition): MappedPosition; generatedPositionFor(originalPosition: SourceFindPosition): LineRange; allGeneratedPositionsFor(originalPosition: MappedPosition): Position[]; hasContentsOfAllSources(): boolean; sourceContentFor(source: string, returnNullOnMissing?: boolean): string; eachMapping(callback: (mapping: MappingItem) => void, context?: any, order?: number): void; } export interface Mapping { generated: Position; original: Position; source: string; name?: string; } export class SourceMapGenerator { constructor(startOfSourceMap?: StartOfSourceMap); static fromSourceMap(sourceMapConsumer: SourceMapConsumer): SourceMapGenerator; addMapping(mapping: Mapping): void; setSourceContent(sourceFile: string, sourceContent: string): void; applySourceMap(sourceMapConsumer: SourceMapConsumer, sourceFile?: string, sourceMapPath?: string): void; toString(): string; } export interface CodeWithSourceMap { code: string; map: SourceMapGenerator; } export class SourceNode { constructor(); constructor(line: number, column: number, source: string); constructor(line: number, column: number, source: string, chunk?: string, name?: string); static fromStringWithSourceMap(code: string, sourceMapConsumer: SourceMapConsumer, relativePath?: string): SourceNode; add(chunk: string): void; prepend(chunk: string): void; setSourceContent(sourceFile: string, sourceContent: string): void; walk(fn: (chunk: string, mapping: MappedPosition) => void): void; walkSourceContents(fn: (file: string, content: string) => void): void; join(sep: string): SourceNode; replaceRight(pattern: string, replacement: string): SourceNode; toString(): string; toStringWithSourceMap(startOfSourceMap?: StartOfSourceMap): CodeWithSourceMap; }
zhihu-crawler
/zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/source-map/source-map.d.ts
source-map.d.ts
# Source Map [![Build Status](https://travis-ci.org/mozilla/source-map.png?branch=master)](https://travis-ci.org/mozilla/source-map) [![NPM](https://nodei.co/npm/source-map.png?downloads=true&downloadRank=true)](https://www.npmjs.com/package/source-map) This is a library to generate and consume the source map format [described here][format]. [format]: https://docs.google.com/document/d/1U1RGAehQwRypUTovF1KRlpiOFze0b-_2gc6fAH0KY0k/edit ## Use with Node $ npm install source-map ## Use on the Web <script src="https://raw.githubusercontent.com/mozilla/source-map/master/dist/source-map.min.js" defer></script> -------------------------------------------------------------------------------- <!-- `npm run toc` to regenerate the Table of Contents --> <!-- START doctoc generated TOC please keep comment here to allow auto update --> <!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE --> ## Table of Contents - [Examples](#examples) - [Consuming a source map](#consuming-a-source-map) - [Generating a source map](#generating-a-source-map) - [With SourceNode (high level API)](#with-sourcenode-high-level-api) - [With SourceMapGenerator (low level API)](#with-sourcemapgenerator-low-level-api) - [API](#api) - [SourceMapConsumer](#sourcemapconsumer) - [new SourceMapConsumer(rawSourceMap)](#new-sourcemapconsumerrawsourcemap) - [SourceMapConsumer.prototype.computeColumnSpans()](#sourcemapconsumerprototypecomputecolumnspans) - [SourceMapConsumer.prototype.originalPositionFor(generatedPosition)](#sourcemapconsumerprototypeoriginalpositionforgeneratedposition) - [SourceMapConsumer.prototype.generatedPositionFor(originalPosition)](#sourcemapconsumerprototypegeneratedpositionfororiginalposition) - [SourceMapConsumer.prototype.allGeneratedPositionsFor(originalPosition)](#sourcemapconsumerprototypeallgeneratedpositionsfororiginalposition) - [SourceMapConsumer.prototype.hasContentsOfAllSources()](#sourcemapconsumerprototypehascontentsofallsources) - [SourceMapConsumer.prototype.sourceContentFor(source[, returnNullOnMissing])](#sourcemapconsumerprototypesourcecontentforsource-returnnullonmissing) - [SourceMapConsumer.prototype.eachMapping(callback, context, order)](#sourcemapconsumerprototypeeachmappingcallback-context-order) - [SourceMapGenerator](#sourcemapgenerator) - [new SourceMapGenerator([startOfSourceMap])](#new-sourcemapgeneratorstartofsourcemap) - [SourceMapGenerator.fromSourceMap(sourceMapConsumer)](#sourcemapgeneratorfromsourcemapsourcemapconsumer) - [SourceMapGenerator.prototype.addMapping(mapping)](#sourcemapgeneratorprototypeaddmappingmapping) - [SourceMapGenerator.prototype.setSourceContent(sourceFile, sourceContent)](#sourcemapgeneratorprototypesetsourcecontentsourcefile-sourcecontent) - [SourceMapGenerator.prototype.applySourceMap(sourceMapConsumer[, sourceFile[, sourceMapPath]])](#sourcemapgeneratorprototypeapplysourcemapsourcemapconsumer-sourcefile-sourcemappath) - [SourceMapGenerator.prototype.toString()](#sourcemapgeneratorprototypetostring) - [SourceNode](#sourcenode) - [new SourceNode([line, column, source[, chunk[, name]]])](#new-sourcenodeline-column-source-chunk-name) - [SourceNode.fromStringWithSourceMap(code, sourceMapConsumer[, relativePath])](#sourcenodefromstringwithsourcemapcode-sourcemapconsumer-relativepath) - [SourceNode.prototype.add(chunk)](#sourcenodeprototypeaddchunk) - [SourceNode.prototype.prepend(chunk)](#sourcenodeprototypeprependchunk) - [SourceNode.prototype.setSourceContent(sourceFile, sourceContent)](#sourcenodeprototypesetsourcecontentsourcefile-sourcecontent) - [SourceNode.prototype.walk(fn)](#sourcenodeprototypewalkfn) - [SourceNode.prototype.walkSourceContents(fn)](#sourcenodeprototypewalksourcecontentsfn) - [SourceNode.prototype.join(sep)](#sourcenodeprototypejoinsep) - [SourceNode.prototype.replaceRight(pattern, replacement)](#sourcenodeprototypereplacerightpattern-replacement) - [SourceNode.prototype.toString()](#sourcenodeprototypetostring) - [SourceNode.prototype.toStringWithSourceMap([startOfSourceMap])](#sourcenodeprototypetostringwithsourcemapstartofsourcemap) <!-- END doctoc generated TOC please keep comment here to allow auto update --> ## Examples ### Consuming a source map ```js var rawSourceMap = { version: 3, file: 'min.js', names: ['bar', 'baz', 'n'], sources: ['one.js', 'two.js'], sourceRoot: 'http://example.com/www/js/', mappings: 'CAAC,IAAI,IAAM,SAAUA,GAClB,OAAOC,IAAID;CCDb,IAAI,IAAM,SAAUE,GAClB,OAAOA' }; var smc = new SourceMapConsumer(rawSourceMap); console.log(smc.sources); // [ 'http://example.com/www/js/one.js', // 'http://example.com/www/js/two.js' ] console.log(smc.originalPositionFor({ line: 2, column: 28 })); // { source: 'http://example.com/www/js/two.js', // line: 2, // column: 10, // name: 'n' } console.log(smc.generatedPositionFor({ source: 'http://example.com/www/js/two.js', line: 2, column: 10 })); // { line: 2, column: 28 } smc.eachMapping(function (m) { // ... }); ``` ### Generating a source map In depth guide: [**Compiling to JavaScript, and Debugging with Source Maps**](https://hacks.mozilla.org/2013/05/compiling-to-javascript-and-debugging-with-source-maps/) #### With SourceNode (high level API) ```js function compile(ast) { switch (ast.type) { case 'BinaryExpression': return new SourceNode( ast.location.line, ast.location.column, ast.location.source, [compile(ast.left), " + ", compile(ast.right)] ); case 'Literal': return new SourceNode( ast.location.line, ast.location.column, ast.location.source, String(ast.value) ); // ... default: throw new Error("Bad AST"); } } var ast = parse("40 + 2", "add.js"); console.log(compile(ast).toStringWithSourceMap({ file: 'add.js' })); // { code: '40 + 2', // map: [object SourceMapGenerator] } ``` #### With SourceMapGenerator (low level API) ```js var map = new SourceMapGenerator({ file: "source-mapped.js" }); map.addMapping({ generated: { line: 10, column: 35 }, source: "foo.js", original: { line: 33, column: 2 }, name: "christopher" }); console.log(map.toString()); // '{"version":3,"file":"source-mapped.js","sources":["foo.js"],"names":["christopher"],"mappings":";;;;;;;;;mCAgCEA"}' ``` ## API Get a reference to the module: ```js // Node.js var sourceMap = require('source-map'); // Browser builds var sourceMap = window.sourceMap; // Inside Firefox const sourceMap = require("devtools/toolkit/sourcemap/source-map.js"); ``` ### SourceMapConsumer A SourceMapConsumer instance represents a parsed source map which we can query for information about the original file positions by giving it a file position in the generated source. #### new SourceMapConsumer(rawSourceMap) The only parameter is the raw source map (either as a string which can be `JSON.parse`'d, or an object). According to the spec, source maps have the following attributes: * `version`: Which version of the source map spec this map is following. * `sources`: An array of URLs to the original source files. * `names`: An array of identifiers which can be referenced by individual mappings. * `sourceRoot`: Optional. The URL root from which all sources are relative. * `sourcesContent`: Optional. An array of contents of the original source files. * `mappings`: A string of base64 VLQs which contain the actual mappings. * `file`: Optional. The generated filename this source map is associated with. ```js var consumer = new sourceMap.SourceMapConsumer(rawSourceMapJsonData); ``` #### SourceMapConsumer.prototype.computeColumnSpans() Compute the last column for each generated mapping. The last column is inclusive. ```js // Before: consumer.allGeneratedPositionsFor({ line: 2, source: "foo.coffee" }) // [ { line: 2, // column: 1 }, // { line: 2, // column: 10 }, // { line: 2, // column: 20 } ] consumer.computeColumnSpans(); // After: consumer.allGeneratedPositionsFor({ line: 2, source: "foo.coffee" }) // [ { line: 2, // column: 1, // lastColumn: 9 }, // { line: 2, // column: 10, // lastColumn: 19 }, // { line: 2, // column: 20, // lastColumn: Infinity } ] ``` #### SourceMapConsumer.prototype.originalPositionFor(generatedPosition) Returns the original source, line, and column information for the generated source's line and column positions provided. The only argument is an object with the following properties: * `line`: The line number in the generated source. Line numbers in this library are 1-based (note that the underlying source map specification uses 0-based line numbers -- this library handles the translation). * `column`: The column number in the generated source. Column numbers in this library are 0-based. * `bias`: Either `SourceMapConsumer.GREATEST_LOWER_BOUND` or `SourceMapConsumer.LEAST_UPPER_BOUND`. Specifies whether to return the closest element that is smaller than or greater than the one we are searching for, respectively, if the exact element cannot be found. Defaults to `SourceMapConsumer.GREATEST_LOWER_BOUND`. and an object is returned with the following properties: * `source`: The original source file, or null if this information is not available. * `line`: The line number in the original source, or null if this information is not available. The line number is 1-based. * `column`: The column number in the original source, or null if this information is not available. The column number is 0-based. * `name`: The original identifier, or null if this information is not available. ```js consumer.originalPositionFor({ line: 2, column: 10 }) // { source: 'foo.coffee', // line: 2, // column: 2, // name: null } consumer.originalPositionFor({ line: 99999999999999999, column: 999999999999999 }) // { source: null, // line: null, // column: null, // name: null } ``` #### SourceMapConsumer.prototype.generatedPositionFor(originalPosition) Returns the generated line and column information for the original source, line, and column positions provided. The only argument is an object with the following properties: * `source`: The filename of the original source. * `line`: The line number in the original source. The line number is 1-based. * `column`: The column number in the original source. The column number is 0-based. and an object is returned with the following properties: * `line`: The line number in the generated source, or null. The line number is 1-based. * `column`: The column number in the generated source, or null. The column number is 0-based. ```js consumer.generatedPositionFor({ source: "example.js", line: 2, column: 10 }) // { line: 1, // column: 56 } ``` #### SourceMapConsumer.prototype.allGeneratedPositionsFor(originalPosition) Returns all generated line and column information for the original source, line, and column provided. If no column is provided, returns all mappings corresponding to a either the line we are searching for or the next closest line that has any mappings. Otherwise, returns all mappings corresponding to the given line and either the column we are searching for or the next closest column that has any offsets. The only argument is an object with the following properties: * `source`: The filename of the original source. * `line`: The line number in the original source. The line number is 1-based. * `column`: Optional. The column number in the original source. The column number is 0-based. and an array of objects is returned, each with the following properties: * `line`: The line number in the generated source, or null. The line number is 1-based. * `column`: The column number in the generated source, or null. The column number is 0-based. ```js consumer.allGeneratedpositionsfor({ line: 2, source: "foo.coffee" }) // [ { line: 2, // column: 1 }, // { line: 2, // column: 10 }, // { line: 2, // column: 20 } ] ``` #### SourceMapConsumer.prototype.hasContentsOfAllSources() Return true if we have the embedded source content for every source listed in the source map, false otherwise. In other words, if this method returns `true`, then `consumer.sourceContentFor(s)` will succeed for every source `s` in `consumer.sources`. ```js // ... if (consumer.hasContentsOfAllSources()) { consumerReadyCallback(consumer); } else { fetchSources(consumer, consumerReadyCallback); } // ... ``` #### SourceMapConsumer.prototype.sourceContentFor(source[, returnNullOnMissing]) Returns the original source content for the source provided. The only argument is the URL of the original source file. If the source content for the given source is not found, then an error is thrown. Optionally, pass `true` as the second param to have `null` returned instead. ```js consumer.sources // [ "my-cool-lib.clj" ] consumer.sourceContentFor("my-cool-lib.clj") // "..." consumer.sourceContentFor("this is not in the source map"); // Error: "this is not in the source map" is not in the source map consumer.sourceContentFor("this is not in the source map", true); // null ``` #### SourceMapConsumer.prototype.eachMapping(callback, context, order) Iterate over each mapping between an original source/line/column and a generated line/column in this source map. * `callback`: The function that is called with each mapping. Mappings have the form `{ source, generatedLine, generatedColumn, originalLine, originalColumn, name }` * `context`: Optional. If specified, this object will be the value of `this` every time that `callback` is called. * `order`: Either `SourceMapConsumer.GENERATED_ORDER` or `SourceMapConsumer.ORIGINAL_ORDER`. Specifies whether you want to iterate over the mappings sorted by the generated file's line/column order or the original's source/line/column order, respectively. Defaults to `SourceMapConsumer.GENERATED_ORDER`. ```js consumer.eachMapping(function (m) { console.log(m); }) // ... // { source: 'illmatic.js', // generatedLine: 1, // generatedColumn: 0, // originalLine: 1, // originalColumn: 0, // name: null } // { source: 'illmatic.js', // generatedLine: 2, // generatedColumn: 0, // originalLine: 2, // originalColumn: 0, // name: null } // ... ``` ### SourceMapGenerator An instance of the SourceMapGenerator represents a source map which is being built incrementally. #### new SourceMapGenerator([startOfSourceMap]) You may pass an object with the following properties: * `file`: The filename of the generated source that this source map is associated with. * `sourceRoot`: A root for all relative URLs in this source map. * `skipValidation`: Optional. When `true`, disables validation of mappings as they are added. This can improve performance but should be used with discretion, as a last resort. Even then, one should avoid using this flag when running tests, if possible. ```js var generator = new sourceMap.SourceMapGenerator({ file: "my-generated-javascript-file.js", sourceRoot: "http://example.com/app/js/" }); ``` #### SourceMapGenerator.fromSourceMap(sourceMapConsumer) Creates a new `SourceMapGenerator` from an existing `SourceMapConsumer` instance. * `sourceMapConsumer` The SourceMap. ```js var generator = sourceMap.SourceMapGenerator.fromSourceMap(consumer); ``` #### SourceMapGenerator.prototype.addMapping(mapping) Add a single mapping from original source line and column to the generated source's line and column for this source map being created. The mapping object should have the following properties: * `generated`: An object with the generated line and column positions. * `original`: An object with the original line and column positions. * `source`: The original source file (relative to the sourceRoot). * `name`: An optional original token name for this mapping. ```js generator.addMapping({ source: "module-one.scm", original: { line: 128, column: 0 }, generated: { line: 3, column: 456 } }) ``` #### SourceMapGenerator.prototype.setSourceContent(sourceFile, sourceContent) Set the source content for an original source file. * `sourceFile` the URL of the original source file. * `sourceContent` the content of the source file. ```js generator.setSourceContent("module-one.scm", fs.readFileSync("path/to/module-one.scm")) ``` #### SourceMapGenerator.prototype.applySourceMap(sourceMapConsumer[, sourceFile[, sourceMapPath]]) Applies a SourceMap for a source file to the SourceMap. Each mapping to the supplied source file is rewritten using the supplied SourceMap. Note: The resolution for the resulting mappings is the minimum of this map and the supplied map. * `sourceMapConsumer`: The SourceMap to be applied. * `sourceFile`: Optional. The filename of the source file. If omitted, sourceMapConsumer.file will be used, if it exists. Otherwise an error will be thrown. * `sourceMapPath`: Optional. The dirname of the path to the SourceMap to be applied. If relative, it is relative to the SourceMap. This parameter is needed when the two SourceMaps aren't in the same directory, and the SourceMap to be applied contains relative source paths. If so, those relative source paths need to be rewritten relative to the SourceMap. If omitted, it is assumed that both SourceMaps are in the same directory, thus not needing any rewriting. (Supplying `'.'` has the same effect.) #### SourceMapGenerator.prototype.toString() Renders the source map being generated to a string. ```js generator.toString() // '{"version":3,"sources":["module-one.scm"],"names":[],"mappings":"...snip...","file":"my-generated-javascript-file.js","sourceRoot":"http://example.com/app/js/"}' ``` ### SourceNode SourceNodes provide a way to abstract over interpolating and/or concatenating snippets of generated JavaScript source code, while maintaining the line and column information associated between those snippets and the original source code. This is useful as the final intermediate representation a compiler might use before outputting the generated JS and source map. #### new SourceNode([line, column, source[, chunk[, name]]]) * `line`: The original line number associated with this source node, or null if it isn't associated with an original line. The line number is 1-based. * `column`: The original column number associated with this source node, or null if it isn't associated with an original column. The column number is 0-based. * `source`: The original source's filename; null if no filename is provided. * `chunk`: Optional. Is immediately passed to `SourceNode.prototype.add`, see below. * `name`: Optional. The original identifier. ```js var node = new SourceNode(1, 2, "a.cpp", [ new SourceNode(3, 4, "b.cpp", "extern int status;\n"), new SourceNode(5, 6, "c.cpp", "std::string* make_string(size_t n);\n"), new SourceNode(7, 8, "d.cpp", "int main(int argc, char** argv) {}\n"), ]); ``` #### SourceNode.fromStringWithSourceMap(code, sourceMapConsumer[, relativePath]) Creates a SourceNode from generated code and a SourceMapConsumer. * `code`: The generated code * `sourceMapConsumer` The SourceMap for the generated code * `relativePath` The optional path that relative sources in `sourceMapConsumer` should be relative to. ```js var consumer = new SourceMapConsumer(fs.readFileSync("path/to/my-file.js.map", "utf8")); var node = SourceNode.fromStringWithSourceMap(fs.readFileSync("path/to/my-file.js"), consumer); ``` #### SourceNode.prototype.add(chunk) Add a chunk of generated JS to this source node. * `chunk`: A string snippet of generated JS code, another instance of `SourceNode`, or an array where each member is one of those things. ```js node.add(" + "); node.add(otherNode); node.add([leftHandOperandNode, " + ", rightHandOperandNode]); ``` #### SourceNode.prototype.prepend(chunk) Prepend a chunk of generated JS to this source node. * `chunk`: A string snippet of generated JS code, another instance of `SourceNode`, or an array where each member is one of those things. ```js node.prepend("/** Build Id: f783haef86324gf **/\n\n"); ``` #### SourceNode.prototype.setSourceContent(sourceFile, sourceContent) Set the source content for a source file. This will be added to the `SourceMap` in the `sourcesContent` field. * `sourceFile`: The filename of the source file * `sourceContent`: The content of the source file ```js node.setSourceContent("module-one.scm", fs.readFileSync("path/to/module-one.scm")) ``` #### SourceNode.prototype.walk(fn) Walk over the tree of JS snippets in this node and its children. The walking function is called once for each snippet of JS and is passed that snippet and the its original associated source's line/column location. * `fn`: The traversal function. ```js var node = new SourceNode(1, 2, "a.js", [ new SourceNode(3, 4, "b.js", "uno"), "dos", [ "tres", new SourceNode(5, 6, "c.js", "quatro") ] ]); node.walk(function (code, loc) { console.log("WALK:", code, loc); }) // WALK: uno { source: 'b.js', line: 3, column: 4, name: null } // WALK: dos { source: 'a.js', line: 1, column: 2, name: null } // WALK: tres { source: 'a.js', line: 1, column: 2, name: null } // WALK: quatro { source: 'c.js', line: 5, column: 6, name: null } ``` #### SourceNode.prototype.walkSourceContents(fn) Walk over the tree of SourceNodes. The walking function is called for each source file content and is passed the filename and source content. * `fn`: The traversal function. ```js var a = new SourceNode(1, 2, "a.js", "generated from a"); a.setSourceContent("a.js", "original a"); var b = new SourceNode(1, 2, "b.js", "generated from b"); b.setSourceContent("b.js", "original b"); var c = new SourceNode(1, 2, "c.js", "generated from c"); c.setSourceContent("c.js", "original c"); var node = new SourceNode(null, null, null, [a, b, c]); node.walkSourceContents(function (source, contents) { console.log("WALK:", source, ":", contents); }) // WALK: a.js : original a // WALK: b.js : original b // WALK: c.js : original c ``` #### SourceNode.prototype.join(sep) Like `Array.prototype.join` except for SourceNodes. Inserts the separator between each of this source node's children. * `sep`: The separator. ```js var lhs = new SourceNode(1, 2, "a.rs", "my_copy"); var operand = new SourceNode(3, 4, "a.rs", "="); var rhs = new SourceNode(5, 6, "a.rs", "orig.clone()"); var node = new SourceNode(null, null, null, [ lhs, operand, rhs ]); var joinedNode = node.join(" "); ``` #### SourceNode.prototype.replaceRight(pattern, replacement) Call `String.prototype.replace` on the very right-most source snippet. Useful for trimming white space from the end of a source node, etc. * `pattern`: The pattern to replace. * `replacement`: The thing to replace the pattern with. ```js // Trim trailing white space. node.replaceRight(/\s*$/, ""); ``` #### SourceNode.prototype.toString() Return the string representation of this source node. Walks over the tree and concatenates all the various snippets together to one string. ```js var node = new SourceNode(1, 2, "a.js", [ new SourceNode(3, 4, "b.js", "uno"), "dos", [ "tres", new SourceNode(5, 6, "c.js", "quatro") ] ]); node.toString() // 'unodostresquatro' ``` #### SourceNode.prototype.toStringWithSourceMap([startOfSourceMap]) Returns the string representation of this tree of source nodes, plus a SourceMapGenerator which contains all the mappings between the generated and original sources. The arguments are the same as those to `new SourceMapGenerator`. ```js var node = new SourceNode(1, 2, "a.js", [ new SourceNode(3, 4, "b.js", "uno"), "dos", [ "tres", new SourceNode(5, 6, "c.js", "quatro") ] ]); node.toStringWithSourceMap({ file: "my-output-file.js" }) // { code: 'unodostresquatro', // map: [object SourceMapGenerator] } ```
zhihu-crawler
/zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/source-map/README.md
README.md
# Change Log ## 0.5.6 * Fix for regression when people were using numbers as names in source maps. See #236. ## 0.5.5 * Fix "regression" of unsupported, implementation behavior that half the world happens to have come to depend on. See #235. * Fix regression involving function hoisting in SpiderMonkey. See #233. ## 0.5.4 * Large performance improvements to source-map serialization. See #228 and #229. ## 0.5.3 * Do not include unnecessary distribution files. See commit ef7006f8d1647e0a83fdc60f04f5a7ca54886f86. ## 0.5.2 * Include browser distributions of the library in package.json's `files`. See issue #212. ## 0.5.1 * Fix latent bugs in IndexedSourceMapConsumer.prototype._parseMappings. See ff05274becc9e6e1295ed60f3ea090d31d843379. ## 0.5.0 * Node 0.8 is no longer supported. * Use webpack instead of dryice for bundling. * Big speedups serializing source maps. See pull request #203. * Fix a bug with `SourceMapConsumer.prototype.sourceContentFor` and sources that explicitly start with the source root. See issue #199. ## 0.4.4 * Fix an issue where using a `SourceMapGenerator` after having created a `SourceMapConsumer` from it via `SourceMapConsumer.fromSourceMap` failed. See issue #191. * Fix an issue with where `SourceMapGenerator` would mistakenly consider different mappings as duplicates of each other and avoid generating them. See issue #192. ## 0.4.3 * A very large number of performance improvements, particularly when parsing source maps. Collectively about 75% of time shaved off of the source map parsing benchmark! * Fix a bug in `SourceMapConsumer.prototype.allGeneratedPositionsFor` and fuzzy searching in the presence of a column option. See issue #177. * Fix a bug with joining a source and its source root when the source is above the root. See issue #182. * Add the `SourceMapConsumer.prototype.hasContentsOfAllSources` method to determine when all sources' contents are inlined into the source map. See issue #190. ## 0.4.2 * Add an `.npmignore` file so that the benchmarks aren't pulled down by dependent projects. Issue #169. * Add an optional `column` argument to `SourceMapConsumer.prototype.allGeneratedPositionsFor` and better handle lines with no mappings. Issues #172 and #173. ## 0.4.1 * Fix accidentally defining a global variable. #170. ## 0.4.0 * The default direction for fuzzy searching was changed back to its original direction. See #164. * There is now a `bias` option you can supply to `SourceMapConsumer` to control the fuzzy searching direction. See #167. * About an 8% speed up in parsing source maps. See #159. * Added a benchmark for parsing and generating source maps. ## 0.3.0 * Change the default direction that searching for positions fuzzes when there is not an exact match. See #154. * Support for environments using json2.js for JSON serialization. See #156. ## 0.2.0 * Support for consuming "indexed" source maps which do not have any remote sections. See pull request #127. This introduces a minor backwards incompatibility if you are monkey patching `SourceMapConsumer.prototype` methods. ## 0.1.43 * Performance improvements for `SourceMapGenerator` and `SourceNode`. See issue #148 for some discussion and issues #150, #151, and #152 for implementations. ## 0.1.42 * Fix an issue where `SourceNode`s from different versions of the source-map library couldn't be used in conjunction with each other. See issue #142. ## 0.1.41 * Fix a bug with getting the source content of relative sources with a "./" prefix. See issue #145 and [Bug 1090768](bugzil.la/1090768). * Add the `SourceMapConsumer.prototype.computeColumnSpans` method to compute the column span of each mapping. * Add the `SourceMapConsumer.prototype.allGeneratedPositionsFor` method to find all generated positions associated with a given original source and line. ## 0.1.40 * Performance improvements for parsing source maps in SourceMapConsumer. ## 0.1.39 * Fix a bug where setting a source's contents to null before any source content had been set before threw a TypeError. See issue #131. ## 0.1.38 * Fix a bug where finding relative paths from an empty path were creating absolute paths. See issue #129. ## 0.1.37 * Fix a bug where if the source root was an empty string, relative source paths would turn into absolute source paths. Issue #124. ## 0.1.36 * Allow the `names` mapping property to be an empty string. Issue #121. ## 0.1.35 * A third optional parameter was added to `SourceNode.fromStringWithSourceMap` to specify a path that relative sources in the second parameter should be relative to. Issue #105. * If no file property is given to a `SourceMapGenerator`, then the resulting source map will no longer have a `null` file property. The property will simply not exist. Issue #104. * Fixed a bug where consecutive newlines were ignored in `SourceNode`s. Issue #116. ## 0.1.34 * Make `SourceNode` work with windows style ("\r\n") newlines. Issue #103. * Fix bug involving source contents and the `SourceMapGenerator.prototype.applySourceMap`. Issue #100. ## 0.1.33 * Fix some edge cases surrounding path joining and URL resolution. * Add a third parameter for relative path to `SourceMapGenerator.prototype.applySourceMap`. * Fix issues with mappings and EOLs. ## 0.1.32 * Fixed a bug where SourceMapConsumer couldn't handle negative relative columns (issue 92). * Fixed test runner to actually report number of failed tests as its process exit code. * Fixed a typo when reporting bad mappings (issue 87). ## 0.1.31 * Delay parsing the mappings in SourceMapConsumer until queried for a source location. * Support Sass source maps (which at the time of writing deviate from the spec in small ways) in SourceMapConsumer. ## 0.1.30 * Do not join source root with a source, when the source is a data URI. * Extend the test runner to allow running single specific test files at a time. * Performance improvements in `SourceNode.prototype.walk` and `SourceMapConsumer.prototype.eachMapping`. * Source map browser builds will now work inside Workers. * Better error messages when attempting to add an invalid mapping to a `SourceMapGenerator`. ## 0.1.29 * Allow duplicate entries in the `names` and `sources` arrays of source maps (usually from TypeScript) we are parsing. Fixes github issue 72. ## 0.1.28 * Skip duplicate mappings when creating source maps from SourceNode; github issue 75. ## 0.1.27 * Don't throw an error when the `file` property is missing in SourceMapConsumer, we don't use it anyway. ## 0.1.26 * Fix SourceNode.fromStringWithSourceMap for empty maps. Fixes github issue 70. ## 0.1.25 * Make compatible with browserify ## 0.1.24 * Fix issue with absolute paths and `file://` URIs. See https://bugzilla.mozilla.org/show_bug.cgi?id=885597 ## 0.1.23 * Fix issue with absolute paths and sourcesContent, github issue 64. ## 0.1.22 * Ignore duplicate mappings in SourceMapGenerator. Fixes github issue 21. ## 0.1.21 * Fixed handling of sources that start with a slash so that they are relative to the source root's host. ## 0.1.20 * Fixed github issue #43: absolute URLs aren't joined with the source root anymore. ## 0.1.19 * Using Travis CI to run tests. ## 0.1.18 * Fixed a bug in the handling of sourceRoot. ## 0.1.17 * Added SourceNode.fromStringWithSourceMap. ## 0.1.16 * Added missing documentation. * Fixed the generating of empty mappings in SourceNode. ## 0.1.15 * Added SourceMapGenerator.applySourceMap. ## 0.1.14 * The sourceRoot is now handled consistently. ## 0.1.13 * Added SourceMapGenerator.fromSourceMap. ## 0.1.12 * SourceNode now generates empty mappings too. ## 0.1.11 * Added name support to SourceNode. ## 0.1.10 * Added sourcesContent support to the customer and generator.
zhihu-crawler
/zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/source-map/CHANGELOG.md
CHANGELOG.md
(function webpackUniversalModuleDefinition(root, factory) { if(typeof exports === 'object' && typeof module === 'object') module.exports = factory(); else if(typeof define === 'function' && define.amd) define([], factory); else if(typeof exports === 'object') exports["sourceMap"] = factory(); else root["sourceMap"] = factory(); })(this, function() { return /******/ (function(modules) { // webpackBootstrap /******/ // The module cache /******/ var installedModules = {}; /******/ // The require function /******/ function __webpack_require__(moduleId) { /******/ // Check if module is in cache /******/ if(installedModules[moduleId]) /******/ return installedModules[moduleId].exports; /******/ // Create a new module (and put it into the cache) /******/ var module = installedModules[moduleId] = { /******/ exports: {}, /******/ id: moduleId, /******/ loaded: false /******/ }; /******/ // Execute the module function /******/ modules[moduleId].call(module.exports, module, module.exports, __webpack_require__); /******/ // Flag the module as loaded /******/ module.loaded = true; /******/ // Return the exports of the module /******/ return module.exports; /******/ } /******/ // expose the modules object (__webpack_modules__) /******/ __webpack_require__.m = modules; /******/ // expose the module cache /******/ __webpack_require__.c = installedModules; /******/ // __webpack_public_path__ /******/ __webpack_require__.p = ""; /******/ // Load entry module and return exports /******/ return __webpack_require__(0); /******/ }) /************************************************************************/ /******/ ([ /* 0 */ /***/ (function(module, exports, __webpack_require__) { /* * Copyright 2009-2011 Mozilla Foundation and contributors * Licensed under the New BSD license. See LICENSE.txt or: * http://opensource.org/licenses/BSD-3-Clause */ exports.SourceMapGenerator = __webpack_require__(1).SourceMapGenerator; exports.SourceMapConsumer = __webpack_require__(7).SourceMapConsumer; exports.SourceNode = __webpack_require__(10).SourceNode; /***/ }), /* 1 */ /***/ (function(module, exports, __webpack_require__) { /* -*- Mode: js; js-indent-level: 2; -*- */ /* * Copyright 2011 Mozilla Foundation and contributors * Licensed under the New BSD license. See LICENSE or: * http://opensource.org/licenses/BSD-3-Clause */ var base64VLQ = __webpack_require__(2); var util = __webpack_require__(4); var ArraySet = __webpack_require__(5).ArraySet; var MappingList = __webpack_require__(6).MappingList; /** * An instance of the SourceMapGenerator represents a source map which is * being built incrementally. You may pass an object with the following * properties: * * - file: The filename of the generated source. * - sourceRoot: A root for all relative URLs in this source map. */ function SourceMapGenerator(aArgs) { if (!aArgs) { aArgs = {}; } this._file = util.getArg(aArgs, 'file', null); this._sourceRoot = util.getArg(aArgs, 'sourceRoot', null); this._skipValidation = util.getArg(aArgs, 'skipValidation', false); this._sources = new ArraySet(); this._names = new ArraySet(); this._mappings = new MappingList(); this._sourcesContents = null; } SourceMapGenerator.prototype._version = 3; /** * Creates a new SourceMapGenerator based on a SourceMapConsumer * * @param aSourceMapConsumer The SourceMap. */ SourceMapGenerator.fromSourceMap = function SourceMapGenerator_fromSourceMap(aSourceMapConsumer) { var sourceRoot = aSourceMapConsumer.sourceRoot; var generator = new SourceMapGenerator({ file: aSourceMapConsumer.file, sourceRoot: sourceRoot }); aSourceMapConsumer.eachMapping(function (mapping) { var newMapping = { generated: { line: mapping.generatedLine, column: mapping.generatedColumn } }; if (mapping.source != null) { newMapping.source = mapping.source; if (sourceRoot != null) { newMapping.source = util.relative(sourceRoot, newMapping.source); } newMapping.original = { line: mapping.originalLine, column: mapping.originalColumn }; if (mapping.name != null) { newMapping.name = mapping.name; } } generator.addMapping(newMapping); }); aSourceMapConsumer.sources.forEach(function (sourceFile) { var sourceRelative = sourceFile; if (sourceRoot !== null) { sourceRelative = util.relative(sourceRoot, sourceFile); } if (!generator._sources.has(sourceRelative)) { generator._sources.add(sourceRelative); } var content = aSourceMapConsumer.sourceContentFor(sourceFile); if (content != null) { generator.setSourceContent(sourceFile, content); } }); return generator; }; /** * Add a single mapping from original source line and column to the generated * source's line and column for this source map being created. The mapping * object should have the following properties: * * - generated: An object with the generated line and column positions. * - original: An object with the original line and column positions. * - source: The original source file (relative to the sourceRoot). * - name: An optional original token name for this mapping. */ SourceMapGenerator.prototype.addMapping = function SourceMapGenerator_addMapping(aArgs) { var generated = util.getArg(aArgs, 'generated'); var original = util.getArg(aArgs, 'original', null); var source = util.getArg(aArgs, 'source', null); var name = util.getArg(aArgs, 'name', null); if (!this._skipValidation) { this._validateMapping(generated, original, source, name); } if (source != null) { source = String(source); if (!this._sources.has(source)) { this._sources.add(source); } } if (name != null) { name = String(name); if (!this._names.has(name)) { this._names.add(name); } } this._mappings.add({ generatedLine: generated.line, generatedColumn: generated.column, originalLine: original != null && original.line, originalColumn: original != null && original.column, source: source, name: name }); }; /** * Set the source content for a source file. */ SourceMapGenerator.prototype.setSourceContent = function SourceMapGenerator_setSourceContent(aSourceFile, aSourceContent) { var source = aSourceFile; if (this._sourceRoot != null) { source = util.relative(this._sourceRoot, source); } if (aSourceContent != null) { // Add the source content to the _sourcesContents map. // Create a new _sourcesContents map if the property is null. if (!this._sourcesContents) { this._sourcesContents = Object.create(null); } this._sourcesContents[util.toSetString(source)] = aSourceContent; } else if (this._sourcesContents) { // Remove the source file from the _sourcesContents map. // If the _sourcesContents map is empty, set the property to null. delete this._sourcesContents[util.toSetString(source)]; if (Object.keys(this._sourcesContents).length === 0) { this._sourcesContents = null; } } }; /** * Applies the mappings of a sub-source-map for a specific source file to the * source map being generated. Each mapping to the supplied source file is * rewritten using the supplied source map. Note: The resolution for the * resulting mappings is the minimium of this map and the supplied map. * * @param aSourceMapConsumer The source map to be applied. * @param aSourceFile Optional. The filename of the source file. * If omitted, SourceMapConsumer's file property will be used. * @param aSourceMapPath Optional. The dirname of the path to the source map * to be applied. If relative, it is relative to the SourceMapConsumer. * This parameter is needed when the two source maps aren't in the same * directory, and the source map to be applied contains relative source * paths. If so, those relative source paths need to be rewritten * relative to the SourceMapGenerator. */ SourceMapGenerator.prototype.applySourceMap = function SourceMapGenerator_applySourceMap(aSourceMapConsumer, aSourceFile, aSourceMapPath) { var sourceFile = aSourceFile; // If aSourceFile is omitted, we will use the file property of the SourceMap if (aSourceFile == null) { if (aSourceMapConsumer.file == null) { throw new Error( 'SourceMapGenerator.prototype.applySourceMap requires either an explicit source file, ' + 'or the source map\'s "file" property. Both were omitted.' ); } sourceFile = aSourceMapConsumer.file; } var sourceRoot = this._sourceRoot; // Make "sourceFile" relative if an absolute Url is passed. if (sourceRoot != null) { sourceFile = util.relative(sourceRoot, sourceFile); } // Applying the SourceMap can add and remove items from the sources and // the names array. var newSources = new ArraySet(); var newNames = new ArraySet(); // Find mappings for the "sourceFile" this._mappings.unsortedForEach(function (mapping) { if (mapping.source === sourceFile && mapping.originalLine != null) { // Check if it can be mapped by the source map, then update the mapping. var original = aSourceMapConsumer.originalPositionFor({ line: mapping.originalLine, column: mapping.originalColumn }); if (original.source != null) { // Copy mapping mapping.source = original.source; if (aSourceMapPath != null) { mapping.source = util.join(aSourceMapPath, mapping.source) } if (sourceRoot != null) { mapping.source = util.relative(sourceRoot, mapping.source); } mapping.originalLine = original.line; mapping.originalColumn = original.column; if (original.name != null) { mapping.name = original.name; } } } var source = mapping.source; if (source != null && !newSources.has(source)) { newSources.add(source); } var name = mapping.name; if (name != null && !newNames.has(name)) { newNames.add(name); } }, this); this._sources = newSources; this._names = newNames; // Copy sourcesContents of applied map. aSourceMapConsumer.sources.forEach(function (sourceFile) { var content = aSourceMapConsumer.sourceContentFor(sourceFile); if (content != null) { if (aSourceMapPath != null) { sourceFile = util.join(aSourceMapPath, sourceFile); } if (sourceRoot != null) { sourceFile = util.relative(sourceRoot, sourceFile); } this.setSourceContent(sourceFile, content); } }, this); }; /** * A mapping can have one of the three levels of data: * * 1. Just the generated position. * 2. The Generated position, original position, and original source. * 3. Generated and original position, original source, as well as a name * token. * * To maintain consistency, we validate that any new mapping being added falls * in to one of these categories. */ SourceMapGenerator.prototype._validateMapping = function SourceMapGenerator_validateMapping(aGenerated, aOriginal, aSource, aName) { // When aOriginal is truthy but has empty values for .line and .column, // it is most likely a programmer error. In this case we throw a very // specific error message to try to guide them the right way. // For example: https://github.com/Polymer/polymer-bundler/pull/519 if (aOriginal && typeof aOriginal.line !== 'number' && typeof aOriginal.column !== 'number') { throw new Error( 'original.line and original.column are not numbers -- you probably meant to omit ' + 'the original mapping entirely and only map the generated position. If so, pass ' + 'null for the original mapping instead of an object with empty or null values.' ); } if (aGenerated && 'line' in aGenerated && 'column' in aGenerated && aGenerated.line > 0 && aGenerated.column >= 0 && !aOriginal && !aSource && !aName) { // Case 1. return; } else if (aGenerated && 'line' in aGenerated && 'column' in aGenerated && aOriginal && 'line' in aOriginal && 'column' in aOriginal && aGenerated.line > 0 && aGenerated.column >= 0 && aOriginal.line > 0 && aOriginal.column >= 0 && aSource) { // Cases 2 and 3. return; } else { throw new Error('Invalid mapping: ' + JSON.stringify({ generated: aGenerated, source: aSource, original: aOriginal, name: aName })); } }; /** * Serialize the accumulated mappings in to the stream of base 64 VLQs * specified by the source map format. */ SourceMapGenerator.prototype._serializeMappings = function SourceMapGenerator_serializeMappings() { var previousGeneratedColumn = 0; var previousGeneratedLine = 1; var previousOriginalColumn = 0; var previousOriginalLine = 0; var previousName = 0; var previousSource = 0; var result = ''; var next; var mapping; var nameIdx; var sourceIdx; var mappings = this._mappings.toArray(); for (var i = 0, len = mappings.length; i < len; i++) { mapping = mappings[i]; next = '' if (mapping.generatedLine !== previousGeneratedLine) { previousGeneratedColumn = 0; while (mapping.generatedLine !== previousGeneratedLine) { next += ';'; previousGeneratedLine++; } } else { if (i > 0) { if (!util.compareByGeneratedPositionsInflated(mapping, mappings[i - 1])) { continue; } next += ','; } } next += base64VLQ.encode(mapping.generatedColumn - previousGeneratedColumn); previousGeneratedColumn = mapping.generatedColumn; if (mapping.source != null) { sourceIdx = this._sources.indexOf(mapping.source); next += base64VLQ.encode(sourceIdx - previousSource); previousSource = sourceIdx; // lines are stored 0-based in SourceMap spec version 3 next += base64VLQ.encode(mapping.originalLine - 1 - previousOriginalLine); previousOriginalLine = mapping.originalLine - 1; next += base64VLQ.encode(mapping.originalColumn - previousOriginalColumn); previousOriginalColumn = mapping.originalColumn; if (mapping.name != null) { nameIdx = this._names.indexOf(mapping.name); next += base64VLQ.encode(nameIdx - previousName); previousName = nameIdx; } } result += next; } return result; }; SourceMapGenerator.prototype._generateSourcesContent = function SourceMapGenerator_generateSourcesContent(aSources, aSourceRoot) { return aSources.map(function (source) { if (!this._sourcesContents) { return null; } if (aSourceRoot != null) { source = util.relative(aSourceRoot, source); } var key = util.toSetString(source); return Object.prototype.hasOwnProperty.call(this._sourcesContents, key) ? this._sourcesContents[key] : null; }, this); }; /** * Externalize the source map. */ SourceMapGenerator.prototype.toJSON = function SourceMapGenerator_toJSON() { var map = { version: this._version, sources: this._sources.toArray(), names: this._names.toArray(), mappings: this._serializeMappings() }; if (this._file != null) { map.file = this._file; } if (this._sourceRoot != null) { map.sourceRoot = this._sourceRoot; } if (this._sourcesContents) { map.sourcesContent = this._generateSourcesContent(map.sources, map.sourceRoot); } return map; }; /** * Render the source map being generated to a string. */ SourceMapGenerator.prototype.toString = function SourceMapGenerator_toString() { return JSON.stringify(this.toJSON()); }; exports.SourceMapGenerator = SourceMapGenerator; /***/ }), /* 2 */ /***/ (function(module, exports, __webpack_require__) { /* -*- Mode: js; js-indent-level: 2; -*- */ /* * Copyright 2011 Mozilla Foundation and contributors * Licensed under the New BSD license. See LICENSE or: * http://opensource.org/licenses/BSD-3-Clause * * Based on the Base 64 VLQ implementation in Closure Compiler: * https://code.google.com/p/closure-compiler/source/browse/trunk/src/com/google/debugging/sourcemap/Base64VLQ.java * * Copyright 2011 The Closure Compiler Authors. All rights reserved. * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ var base64 = __webpack_require__(3); // A single base 64 digit can contain 6 bits of data. For the base 64 variable // length quantities we use in the source map spec, the first bit is the sign, // the next four bits are the actual value, and the 6th bit is the // continuation bit. The continuation bit tells us whether there are more // digits in this value following this digit. // // Continuation // | Sign // | | // V V // 101011 var VLQ_BASE_SHIFT = 5; // binary: 100000 var VLQ_BASE = 1 << VLQ_BASE_SHIFT; // binary: 011111 var VLQ_BASE_MASK = VLQ_BASE - 1; // binary: 100000 var VLQ_CONTINUATION_BIT = VLQ_BASE; /** * Converts from a two-complement value to a value where the sign bit is * placed in the least significant bit. For example, as decimals: * 1 becomes 2 (10 binary), -1 becomes 3 (11 binary) * 2 becomes 4 (100 binary), -2 becomes 5 (101 binary) */ function toVLQSigned(aValue) { return aValue < 0 ? ((-aValue) << 1) + 1 : (aValue << 1) + 0; } /** * Converts to a two-complement value from a value where the sign bit is * placed in the least significant bit. For example, as decimals: * 2 (10 binary) becomes 1, 3 (11 binary) becomes -1 * 4 (100 binary) becomes 2, 5 (101 binary) becomes -2 */ function fromVLQSigned(aValue) { var isNegative = (aValue & 1) === 1; var shifted = aValue >> 1; return isNegative ? -shifted : shifted; } /** * Returns the base 64 VLQ encoded value. */ exports.encode = function base64VLQ_encode(aValue) { var encoded = ""; var digit; var vlq = toVLQSigned(aValue); do { digit = vlq & VLQ_BASE_MASK; vlq >>>= VLQ_BASE_SHIFT; if (vlq > 0) { // There are still more digits in this value, so we must make sure the // continuation bit is marked. digit |= VLQ_CONTINUATION_BIT; } encoded += base64.encode(digit); } while (vlq > 0); return encoded; }; /** * Decodes the next base 64 VLQ value from the given string and returns the * value and the rest of the string via the out parameter. */ exports.decode = function base64VLQ_decode(aStr, aIndex, aOutParam) { var strLen = aStr.length; var result = 0; var shift = 0; var continuation, digit; do { if (aIndex >= strLen) { throw new Error("Expected more digits in base 64 VLQ value."); } digit = base64.decode(aStr.charCodeAt(aIndex++)); if (digit === -1) { throw new Error("Invalid base64 digit: " + aStr.charAt(aIndex - 1)); } continuation = !!(digit & VLQ_CONTINUATION_BIT); digit &= VLQ_BASE_MASK; result = result + (digit << shift); shift += VLQ_BASE_SHIFT; } while (continuation); aOutParam.value = fromVLQSigned(result); aOutParam.rest = aIndex; }; /***/ }), /* 3 */ /***/ (function(module, exports) { /* -*- Mode: js; js-indent-level: 2; -*- */ /* * Copyright 2011 Mozilla Foundation and contributors * Licensed under the New BSD license. See LICENSE or: * http://opensource.org/licenses/BSD-3-Clause */ var intToCharMap = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'.split(''); /** * Encode an integer in the range of 0 to 63 to a single base 64 digit. */ exports.encode = function (number) { if (0 <= number && number < intToCharMap.length) { return intToCharMap[number]; } throw new TypeError("Must be between 0 and 63: " + number); }; /** * Decode a single base 64 character code digit to an integer. Returns -1 on * failure. */ exports.decode = function (charCode) { var bigA = 65; // 'A' var bigZ = 90; // 'Z' var littleA = 97; // 'a' var littleZ = 122; // 'z' var zero = 48; // '0' var nine = 57; // '9' var plus = 43; // '+' var slash = 47; // '/' var littleOffset = 26; var numberOffset = 52; // 0 - 25: ABCDEFGHIJKLMNOPQRSTUVWXYZ if (bigA <= charCode && charCode <= bigZ) { return (charCode - bigA); } // 26 - 51: abcdefghijklmnopqrstuvwxyz if (littleA <= charCode && charCode <= littleZ) { return (charCode - littleA + littleOffset); } // 52 - 61: 0123456789 if (zero <= charCode && charCode <= nine) { return (charCode - zero + numberOffset); } // 62: + if (charCode == plus) { return 62; } // 63: / if (charCode == slash) { return 63; } // Invalid base64 digit. return -1; }; /***/ }), /* 4 */ /***/ (function(module, exports) { /* -*- Mode: js; js-indent-level: 2; -*- */ /* * Copyright 2011 Mozilla Foundation and contributors * Licensed under the New BSD license. See LICENSE or: * http://opensource.org/licenses/BSD-3-Clause */ /** * This is a helper function for getting values from parameter/options * objects. * * @param args The object we are extracting values from * @param name The name of the property we are getting. * @param defaultValue An optional value to return if the property is missing * from the object. If this is not specified and the property is missing, an * error will be thrown. */ function getArg(aArgs, aName, aDefaultValue) { if (aName in aArgs) { return aArgs[aName]; } else if (arguments.length === 3) { return aDefaultValue; } else { throw new Error('"' + aName + '" is a required argument.'); } } exports.getArg = getArg; var urlRegexp = /^(?:([\w+\-.]+):)?\/\/(?:(\w+:\w+)@)?([\w.-]*)(?::(\d+))?(.*)$/; var dataUrlRegexp = /^data:.+\,.+$/; function urlParse(aUrl) { var match = aUrl.match(urlRegexp); if (!match) { return null; } return { scheme: match[1], auth: match[2], host: match[3], port: match[4], path: match[5] }; } exports.urlParse = urlParse; function urlGenerate(aParsedUrl) { var url = ''; if (aParsedUrl.scheme) { url += aParsedUrl.scheme + ':'; } url += '//'; if (aParsedUrl.auth) { url += aParsedUrl.auth + '@'; } if (aParsedUrl.host) { url += aParsedUrl.host; } if (aParsedUrl.port) { url += ":" + aParsedUrl.port } if (aParsedUrl.path) { url += aParsedUrl.path; } return url; } exports.urlGenerate = urlGenerate; /** * Normalizes a path, or the path portion of a URL: * * - Replaces consecutive slashes with one slash. * - Removes unnecessary '.' parts. * - Removes unnecessary '<dir>/..' parts. * * Based on code in the Node.js 'path' core module. * * @param aPath The path or url to normalize. */ function normalize(aPath) { var path = aPath; var url = urlParse(aPath); if (url) { if (!url.path) { return aPath; } path = url.path; } var isAbsolute = exports.isAbsolute(path); var parts = path.split(/\/+/); for (var part, up = 0, i = parts.length - 1; i >= 0; i--) { part = parts[i]; if (part === '.') { parts.splice(i, 1); } else if (part === '..') { up++; } else if (up > 0) { if (part === '') { // The first part is blank if the path is absolute. Trying to go // above the root is a no-op. Therefore we can remove all '..' parts // directly after the root. parts.splice(i + 1, up); up = 0; } else { parts.splice(i, 2); up--; } } } path = parts.join('/'); if (path === '') { path = isAbsolute ? '/' : '.'; } if (url) { url.path = path; return urlGenerate(url); } return path; } exports.normalize = normalize; /** * Joins two paths/URLs. * * @param aRoot The root path or URL. * @param aPath The path or URL to be joined with the root. * * - If aPath is a URL or a data URI, aPath is returned, unless aPath is a * scheme-relative URL: Then the scheme of aRoot, if any, is prepended * first. * - Otherwise aPath is a path. If aRoot is a URL, then its path portion * is updated with the result and aRoot is returned. Otherwise the result * is returned. * - If aPath is absolute, the result is aPath. * - Otherwise the two paths are joined with a slash. * - Joining for example 'http://' and 'www.example.com' is also supported. */ function join(aRoot, aPath) { if (aRoot === "") { aRoot = "."; } if (aPath === "") { aPath = "."; } var aPathUrl = urlParse(aPath); var aRootUrl = urlParse(aRoot); if (aRootUrl) { aRoot = aRootUrl.path || '/'; } // `join(foo, '//www.example.org')` if (aPathUrl && !aPathUrl.scheme) { if (aRootUrl) { aPathUrl.scheme = aRootUrl.scheme; } return urlGenerate(aPathUrl); } if (aPathUrl || aPath.match(dataUrlRegexp)) { return aPath; } // `join('http://', 'www.example.com')` if (aRootUrl && !aRootUrl.host && !aRootUrl.path) { aRootUrl.host = aPath; return urlGenerate(aRootUrl); } var joined = aPath.charAt(0) === '/' ? aPath : normalize(aRoot.replace(/\/+$/, '') + '/' + aPath); if (aRootUrl) { aRootUrl.path = joined; return urlGenerate(aRootUrl); } return joined; } exports.join = join; exports.isAbsolute = function (aPath) { return aPath.charAt(0) === '/' || urlRegexp.test(aPath); }; /** * Make a path relative to a URL or another path. * * @param aRoot The root path or URL. * @param aPath The path or URL to be made relative to aRoot. */ function relative(aRoot, aPath) { if (aRoot === "") { aRoot = "."; } aRoot = aRoot.replace(/\/$/, ''); // It is possible for the path to be above the root. In this case, simply // checking whether the root is a prefix of the path won't work. Instead, we // need to remove components from the root one by one, until either we find // a prefix that fits, or we run out of components to remove. var level = 0; while (aPath.indexOf(aRoot + '/') !== 0) { var index = aRoot.lastIndexOf("/"); if (index < 0) { return aPath; } // If the only part of the root that is left is the scheme (i.e. http://, // file:///, etc.), one or more slashes (/), or simply nothing at all, we // have exhausted all components, so the path is not relative to the root. aRoot = aRoot.slice(0, index); if (aRoot.match(/^([^\/]+:\/)?\/*$/)) { return aPath; } ++level; } // Make sure we add a "../" for each component we removed from the root. return Array(level + 1).join("../") + aPath.substr(aRoot.length + 1); } exports.relative = relative; var supportsNullProto = (function () { var obj = Object.create(null); return !('__proto__' in obj); }()); function identity (s) { return s; } /** * Because behavior goes wacky when you set `__proto__` on objects, we * have to prefix all the strings in our set with an arbitrary character. * * See https://github.com/mozilla/source-map/pull/31 and * https://github.com/mozilla/source-map/issues/30 * * @param String aStr */ function toSetString(aStr) { if (isProtoString(aStr)) { return '$' + aStr; } return aStr; } exports.toSetString = supportsNullProto ? identity : toSetString; function fromSetString(aStr) { if (isProtoString(aStr)) { return aStr.slice(1); } return aStr; } exports.fromSetString = supportsNullProto ? identity : fromSetString; function isProtoString(s) { if (!s) { return false; } var length = s.length; if (length < 9 /* "__proto__".length */) { return false; } if (s.charCodeAt(length - 1) !== 95 /* '_' */ || s.charCodeAt(length - 2) !== 95 /* '_' */ || s.charCodeAt(length - 3) !== 111 /* 'o' */ || s.charCodeAt(length - 4) !== 116 /* 't' */ || s.charCodeAt(length - 5) !== 111 /* 'o' */ || s.charCodeAt(length - 6) !== 114 /* 'r' */ || s.charCodeAt(length - 7) !== 112 /* 'p' */ || s.charCodeAt(length - 8) !== 95 /* '_' */ || s.charCodeAt(length - 9) !== 95 /* '_' */) { return false; } for (var i = length - 10; i >= 0; i--) { if (s.charCodeAt(i) !== 36 /* '$' */) { return false; } } return true; } /** * Comparator between two mappings where the original positions are compared. * * Optionally pass in `true` as `onlyCompareGenerated` to consider two * mappings with the same original source/line/column, but different generated * line and column the same. Useful when searching for a mapping with a * stubbed out mapping. */ function compareByOriginalPositions(mappingA, mappingB, onlyCompareOriginal) { var cmp = strcmp(mappingA.source, mappingB.source); if (cmp !== 0) { return cmp; } cmp = mappingA.originalLine - mappingB.originalLine; if (cmp !== 0) { return cmp; } cmp = mappingA.originalColumn - mappingB.originalColumn; if (cmp !== 0 || onlyCompareOriginal) { return cmp; } cmp = mappingA.generatedColumn - mappingB.generatedColumn; if (cmp !== 0) { return cmp; } cmp = mappingA.generatedLine - mappingB.generatedLine; if (cmp !== 0) { return cmp; } return strcmp(mappingA.name, mappingB.name); } exports.compareByOriginalPositions = compareByOriginalPositions; /** * Comparator between two mappings with deflated source and name indices where * the generated positions are compared. * * Optionally pass in `true` as `onlyCompareGenerated` to consider two * mappings with the same generated line and column, but different * source/name/original line and column the same. Useful when searching for a * mapping with a stubbed out mapping. */ function compareByGeneratedPositionsDeflated(mappingA, mappingB, onlyCompareGenerated) { var cmp = mappingA.generatedLine - mappingB.generatedLine; if (cmp !== 0) { return cmp; } cmp = mappingA.generatedColumn - mappingB.generatedColumn; if (cmp !== 0 || onlyCompareGenerated) { return cmp; } cmp = strcmp(mappingA.source, mappingB.source); if (cmp !== 0) { return cmp; } cmp = mappingA.originalLine - mappingB.originalLine; if (cmp !== 0) { return cmp; } cmp = mappingA.originalColumn - mappingB.originalColumn; if (cmp !== 0) { return cmp; } return strcmp(mappingA.name, mappingB.name); } exports.compareByGeneratedPositionsDeflated = compareByGeneratedPositionsDeflated; function strcmp(aStr1, aStr2) { if (aStr1 === aStr2) { return 0; } if (aStr1 === null) { return 1; // aStr2 !== null } if (aStr2 === null) { return -1; // aStr1 !== null } if (aStr1 > aStr2) { return 1; } return -1; } /** * Comparator between two mappings with inflated source and name strings where * the generated positions are compared. */ function compareByGeneratedPositionsInflated(mappingA, mappingB) { var cmp = mappingA.generatedLine - mappingB.generatedLine; if (cmp !== 0) { return cmp; } cmp = mappingA.generatedColumn - mappingB.generatedColumn; if (cmp !== 0) { return cmp; } cmp = strcmp(mappingA.source, mappingB.source); if (cmp !== 0) { return cmp; } cmp = mappingA.originalLine - mappingB.originalLine; if (cmp !== 0) { return cmp; } cmp = mappingA.originalColumn - mappingB.originalColumn; if (cmp !== 0) { return cmp; } return strcmp(mappingA.name, mappingB.name); } exports.compareByGeneratedPositionsInflated = compareByGeneratedPositionsInflated; /** * Strip any JSON XSSI avoidance prefix from the string (as documented * in the source maps specification), and then parse the string as * JSON. */ function parseSourceMapInput(str) { return JSON.parse(str.replace(/^\)]}'[^\n]*\n/, '')); } exports.parseSourceMapInput = parseSourceMapInput; /** * Compute the URL of a source given the the source root, the source's * URL, and the source map's URL. */ function computeSourceURL(sourceRoot, sourceURL, sourceMapURL) { sourceURL = sourceURL || ''; if (sourceRoot) { // This follows what Chrome does. if (sourceRoot[sourceRoot.length - 1] !== '/' && sourceURL[0] !== '/') { sourceRoot += '/'; } // The spec says: // Line 4: An optional source root, useful for relocating source // files on a server or removing repeated values in the // “sources” entry. This value is prepended to the individual // entries in the “source” field. sourceURL = sourceRoot + sourceURL; } // Historically, SourceMapConsumer did not take the sourceMapURL as // a parameter. This mode is still somewhat supported, which is why // this code block is conditional. However, it's preferable to pass // the source map URL to SourceMapConsumer, so that this function // can implement the source URL resolution algorithm as outlined in // the spec. This block is basically the equivalent of: // new URL(sourceURL, sourceMapURL).toString() // ... except it avoids using URL, which wasn't available in the // older releases of node still supported by this library. // // The spec says: // If the sources are not absolute URLs after prepending of the // “sourceRoot”, the sources are resolved relative to the // SourceMap (like resolving script src in a html document). if (sourceMapURL) { var parsed = urlParse(sourceMapURL); if (!parsed) { throw new Error("sourceMapURL could not be parsed"); } if (parsed.path) { // Strip the last path component, but keep the "/". var index = parsed.path.lastIndexOf('/'); if (index >= 0) { parsed.path = parsed.path.substring(0, index + 1); } } sourceURL = join(urlGenerate(parsed), sourceURL); } return normalize(sourceURL); } exports.computeSourceURL = computeSourceURL; /***/ }), /* 5 */ /***/ (function(module, exports, __webpack_require__) { /* -*- Mode: js; js-indent-level: 2; -*- */ /* * Copyright 2011 Mozilla Foundation and contributors * Licensed under the New BSD license. See LICENSE or: * http://opensource.org/licenses/BSD-3-Clause */ var util = __webpack_require__(4); var has = Object.prototype.hasOwnProperty; var hasNativeMap = typeof Map !== "undefined"; /** * A data structure which is a combination of an array and a set. Adding a new * member is O(1), testing for membership is O(1), and finding the index of an * element is O(1). Removing elements from the set is not supported. Only * strings are supported for membership. */ function ArraySet() { this._array = []; this._set = hasNativeMap ? new Map() : Object.create(null); } /** * Static method for creating ArraySet instances from an existing array. */ ArraySet.fromArray = function ArraySet_fromArray(aArray, aAllowDuplicates) { var set = new ArraySet(); for (var i = 0, len = aArray.length; i < len; i++) { set.add(aArray[i], aAllowDuplicates); } return set; }; /** * Return how many unique items are in this ArraySet. If duplicates have been * added, than those do not count towards the size. * * @returns Number */ ArraySet.prototype.size = function ArraySet_size() { return hasNativeMap ? this._set.size : Object.getOwnPropertyNames(this._set).length; }; /** * Add the given string to this set. * * @param String aStr */ ArraySet.prototype.add = function ArraySet_add(aStr, aAllowDuplicates) { var sStr = hasNativeMap ? aStr : util.toSetString(aStr); var isDuplicate = hasNativeMap ? this.has(aStr) : has.call(this._set, sStr); var idx = this._array.length; if (!isDuplicate || aAllowDuplicates) { this._array.push(aStr); } if (!isDuplicate) { if (hasNativeMap) { this._set.set(aStr, idx); } else { this._set[sStr] = idx; } } }; /** * Is the given string a member of this set? * * @param String aStr */ ArraySet.prototype.has = function ArraySet_has(aStr) { if (hasNativeMap) { return this._set.has(aStr); } else { var sStr = util.toSetString(aStr); return has.call(this._set, sStr); } }; /** * What is the index of the given string in the array? * * @param String aStr */ ArraySet.prototype.indexOf = function ArraySet_indexOf(aStr) { if (hasNativeMap) { var idx = this._set.get(aStr); if (idx >= 0) { return idx; } } else { var sStr = util.toSetString(aStr); if (has.call(this._set, sStr)) { return this._set[sStr]; } } throw new Error('"' + aStr + '" is not in the set.'); }; /** * What is the element at the given index? * * @param Number aIdx */ ArraySet.prototype.at = function ArraySet_at(aIdx) { if (aIdx >= 0 && aIdx < this._array.length) { return this._array[aIdx]; } throw new Error('No element indexed by ' + aIdx); }; /** * Returns the array representation of this set (which has the proper indices * indicated by indexOf). Note that this is a copy of the internal array used * for storing the members so that no one can mess with internal state. */ ArraySet.prototype.toArray = function ArraySet_toArray() { return this._array.slice(); }; exports.ArraySet = ArraySet; /***/ }), /* 6 */ /***/ (function(module, exports, __webpack_require__) { /* -*- Mode: js; js-indent-level: 2; -*- */ /* * Copyright 2014 Mozilla Foundation and contributors * Licensed under the New BSD license. See LICENSE or: * http://opensource.org/licenses/BSD-3-Clause */ var util = __webpack_require__(4); /** * Determine whether mappingB is after mappingA with respect to generated * position. */ function generatedPositionAfter(mappingA, mappingB) { // Optimized for most common case var lineA = mappingA.generatedLine; var lineB = mappingB.generatedLine; var columnA = mappingA.generatedColumn; var columnB = mappingB.generatedColumn; return lineB > lineA || lineB == lineA && columnB >= columnA || util.compareByGeneratedPositionsInflated(mappingA, mappingB) <= 0; } /** * A data structure to provide a sorted view of accumulated mappings in a * performance conscious manner. It trades a neglibable overhead in general * case for a large speedup in case of mappings being added in order. */ function MappingList() { this._array = []; this._sorted = true; // Serves as infimum this._last = {generatedLine: -1, generatedColumn: 0}; } /** * Iterate through internal items. This method takes the same arguments that * `Array.prototype.forEach` takes. * * NOTE: The order of the mappings is NOT guaranteed. */ MappingList.prototype.unsortedForEach = function MappingList_forEach(aCallback, aThisArg) { this._array.forEach(aCallback, aThisArg); }; /** * Add the given source mapping. * * @param Object aMapping */ MappingList.prototype.add = function MappingList_add(aMapping) { if (generatedPositionAfter(this._last, aMapping)) { this._last = aMapping; this._array.push(aMapping); } else { this._sorted = false; this._array.push(aMapping); } }; /** * Returns the flat, sorted array of mappings. The mappings are sorted by * generated position. * * WARNING: This method returns internal data without copying, for * performance. The return value must NOT be mutated, and should be treated as * an immutable borrow. If you want to take ownership, you must make your own * copy. */ MappingList.prototype.toArray = function MappingList_toArray() { if (!this._sorted) { this._array.sort(util.compareByGeneratedPositionsInflated); this._sorted = true; } return this._array; }; exports.MappingList = MappingList; /***/ }), /* 7 */ /***/ (function(module, exports, __webpack_require__) { /* -*- Mode: js; js-indent-level: 2; -*- */ /* * Copyright 2011 Mozilla Foundation and contributors * Licensed under the New BSD license. See LICENSE or: * http://opensource.org/licenses/BSD-3-Clause */ var util = __webpack_require__(4); var binarySearch = __webpack_require__(8); var ArraySet = __webpack_require__(5).ArraySet; var base64VLQ = __webpack_require__(2); var quickSort = __webpack_require__(9).quickSort; function SourceMapConsumer(aSourceMap, aSourceMapURL) { var sourceMap = aSourceMap; if (typeof aSourceMap === 'string') { sourceMap = util.parseSourceMapInput(aSourceMap); } return sourceMap.sections != null ? new IndexedSourceMapConsumer(sourceMap, aSourceMapURL) : new BasicSourceMapConsumer(sourceMap, aSourceMapURL); } SourceMapConsumer.fromSourceMap = function(aSourceMap, aSourceMapURL) { return BasicSourceMapConsumer.fromSourceMap(aSourceMap, aSourceMapURL); } /** * The version of the source mapping spec that we are consuming. */ SourceMapConsumer.prototype._version = 3; // `__generatedMappings` and `__originalMappings` are arrays that hold the // parsed mapping coordinates from the source map's "mappings" attribute. They // are lazily instantiated, accessed via the `_generatedMappings` and // `_originalMappings` getters respectively, and we only parse the mappings // and create these arrays once queried for a source location. We jump through // these hoops because there can be many thousands of mappings, and parsing // them is expensive, so we only want to do it if we must. // // Each object in the arrays is of the form: // // { // generatedLine: The line number in the generated code, // generatedColumn: The column number in the generated code, // source: The path to the original source file that generated this // chunk of code, // originalLine: The line number in the original source that // corresponds to this chunk of generated code, // originalColumn: The column number in the original source that // corresponds to this chunk of generated code, // name: The name of the original symbol which generated this chunk of // code. // } // // All properties except for `generatedLine` and `generatedColumn` can be // `null`. // // `_generatedMappings` is ordered by the generated positions. // // `_originalMappings` is ordered by the original positions. SourceMapConsumer.prototype.__generatedMappings = null; Object.defineProperty(SourceMapConsumer.prototype, '_generatedMappings', { configurable: true, enumerable: true, get: function () { if (!this.__generatedMappings) { this._parseMappings(this._mappings, this.sourceRoot); } return this.__generatedMappings; } }); SourceMapConsumer.prototype.__originalMappings = null; Object.defineProperty(SourceMapConsumer.prototype, '_originalMappings', { configurable: true, enumerable: true, get: function () { if (!this.__originalMappings) { this._parseMappings(this._mappings, this.sourceRoot); } return this.__originalMappings; } }); SourceMapConsumer.prototype._charIsMappingSeparator = function SourceMapConsumer_charIsMappingSeparator(aStr, index) { var c = aStr.charAt(index); return c === ";" || c === ","; }; /** * Parse the mappings in a string in to a data structure which we can easily * query (the ordered arrays in the `this.__generatedMappings` and * `this.__originalMappings` properties). */ SourceMapConsumer.prototype._parseMappings = function SourceMapConsumer_parseMappings(aStr, aSourceRoot) { throw new Error("Subclasses must implement _parseMappings"); }; SourceMapConsumer.GENERATED_ORDER = 1; SourceMapConsumer.ORIGINAL_ORDER = 2; SourceMapConsumer.GREATEST_LOWER_BOUND = 1; SourceMapConsumer.LEAST_UPPER_BOUND = 2; /** * Iterate over each mapping between an original source/line/column and a * generated line/column in this source map. * * @param Function aCallback * The function that is called with each mapping. * @param Object aContext * Optional. If specified, this object will be the value of `this` every * time that `aCallback` is called. * @param aOrder * Either `SourceMapConsumer.GENERATED_ORDER` or * `SourceMapConsumer.ORIGINAL_ORDER`. Specifies whether you want to * iterate over the mappings sorted by the generated file's line/column * order or the original's source/line/column order, respectively. Defaults to * `SourceMapConsumer.GENERATED_ORDER`. */ SourceMapConsumer.prototype.eachMapping = function SourceMapConsumer_eachMapping(aCallback, aContext, aOrder) { var context = aContext || null; var order = aOrder || SourceMapConsumer.GENERATED_ORDER; var mappings; switch (order) { case SourceMapConsumer.GENERATED_ORDER: mappings = this._generatedMappings; break; case SourceMapConsumer.ORIGINAL_ORDER: mappings = this._originalMappings; break; default: throw new Error("Unknown order of iteration."); } var sourceRoot = this.sourceRoot; mappings.map(function (mapping) { var source = mapping.source === null ? null : this._sources.at(mapping.source); source = util.computeSourceURL(sourceRoot, source, this._sourceMapURL); return { source: source, generatedLine: mapping.generatedLine, generatedColumn: mapping.generatedColumn, originalLine: mapping.originalLine, originalColumn: mapping.originalColumn, name: mapping.name === null ? null : this._names.at(mapping.name) }; }, this).forEach(aCallback, context); }; /** * Returns all generated line and column information for the original source, * line, and column provided. If no column is provided, returns all mappings * corresponding to a either the line we are searching for or the next * closest line that has any mappings. Otherwise, returns all mappings * corresponding to the given line and either the column we are searching for * or the next closest column that has any offsets. * * The only argument is an object with the following properties: * * - source: The filename of the original source. * - line: The line number in the original source. The line number is 1-based. * - column: Optional. the column number in the original source. * The column number is 0-based. * * and an array of objects is returned, each with the following properties: * * - line: The line number in the generated source, or null. The * line number is 1-based. * - column: The column number in the generated source, or null. * The column number is 0-based. */ SourceMapConsumer.prototype.allGeneratedPositionsFor = function SourceMapConsumer_allGeneratedPositionsFor(aArgs) { var line = util.getArg(aArgs, 'line'); // When there is no exact match, BasicSourceMapConsumer.prototype._findMapping // returns the index of the closest mapping less than the needle. By // setting needle.originalColumn to 0, we thus find the last mapping for // the given line, provided such a mapping exists. var needle = { source: util.getArg(aArgs, 'source'), originalLine: line, originalColumn: util.getArg(aArgs, 'column', 0) }; needle.source = this._findSourceIndex(needle.source); if (needle.source < 0) { return []; } var mappings = []; var index = this._findMapping(needle, this._originalMappings, "originalLine", "originalColumn", util.compareByOriginalPositions, binarySearch.LEAST_UPPER_BOUND); if (index >= 0) { var mapping = this._originalMappings[index]; if (aArgs.column === undefined) { var originalLine = mapping.originalLine; // Iterate until either we run out of mappings, or we run into // a mapping for a different line than the one we found. Since // mappings are sorted, this is guaranteed to find all mappings for // the line we found. while (mapping && mapping.originalLine === originalLine) { mappings.push({ line: util.getArg(mapping, 'generatedLine', null), column: util.getArg(mapping, 'generatedColumn', null), lastColumn: util.getArg(mapping, 'lastGeneratedColumn', null) }); mapping = this._originalMappings[++index]; } } else { var originalColumn = mapping.originalColumn; // Iterate until either we run out of mappings, or we run into // a mapping for a different line than the one we were searching for. // Since mappings are sorted, this is guaranteed to find all mappings for // the line we are searching for. while (mapping && mapping.originalLine === line && mapping.originalColumn == originalColumn) { mappings.push({ line: util.getArg(mapping, 'generatedLine', null), column: util.getArg(mapping, 'generatedColumn', null), lastColumn: util.getArg(mapping, 'lastGeneratedColumn', null) }); mapping = this._originalMappings[++index]; } } } return mappings; }; exports.SourceMapConsumer = SourceMapConsumer; /** * A BasicSourceMapConsumer instance represents a parsed source map which we can * query for information about the original file positions by giving it a file * position in the generated source. * * The first parameter is the raw source map (either as a JSON string, or * already parsed to an object). According to the spec, source maps have the * following attributes: * * - version: Which version of the source map spec this map is following. * - sources: An array of URLs to the original source files. * - names: An array of identifiers which can be referrenced by individual mappings. * - sourceRoot: Optional. The URL root from which all sources are relative. * - sourcesContent: Optional. An array of contents of the original source files. * - mappings: A string of base64 VLQs which contain the actual mappings. * - file: Optional. The generated file this source map is associated with. * * Here is an example source map, taken from the source map spec[0]: * * { * version : 3, * file: "out.js", * sourceRoot : "", * sources: ["foo.js", "bar.js"], * names: ["src", "maps", "are", "fun"], * mappings: "AA,AB;;ABCDE;" * } * * The second parameter, if given, is a string whose value is the URL * at which the source map was found. This URL is used to compute the * sources array. * * [0]: https://docs.google.com/document/d/1U1RGAehQwRypUTovF1KRlpiOFze0b-_2gc6fAH0KY0k/edit?pli=1# */ function BasicSourceMapConsumer(aSourceMap, aSourceMapURL) { var sourceMap = aSourceMap; if (typeof aSourceMap === 'string') { sourceMap = util.parseSourceMapInput(aSourceMap); } var version = util.getArg(sourceMap, 'version'); var sources = util.getArg(sourceMap, 'sources'); // Sass 3.3 leaves out the 'names' array, so we deviate from the spec (which // requires the array) to play nice here. var names = util.getArg(sourceMap, 'names', []); var sourceRoot = util.getArg(sourceMap, 'sourceRoot', null); var sourcesContent = util.getArg(sourceMap, 'sourcesContent', null); var mappings = util.getArg(sourceMap, 'mappings'); var file = util.getArg(sourceMap, 'file', null); // Once again, Sass deviates from the spec and supplies the version as a // string rather than a number, so we use loose equality checking here. if (version != this._version) { throw new Error('Unsupported version: ' + version); } if (sourceRoot) { sourceRoot = util.normalize(sourceRoot); } sources = sources .map(String) // Some source maps produce relative source paths like "./foo.js" instead of // "foo.js". Normalize these first so that future comparisons will succeed. // See bugzil.la/1090768. .map(util.normalize) // Always ensure that absolute sources are internally stored relative to // the source root, if the source root is absolute. Not doing this would // be particularly problematic when the source root is a prefix of the // source (valid, but why??). See github issue #199 and bugzil.la/1188982. .map(function (source) { return sourceRoot && util.isAbsolute(sourceRoot) && util.isAbsolute(source) ? util.relative(sourceRoot, source) : source; }); // Pass `true` below to allow duplicate names and sources. While source maps // are intended to be compressed and deduplicated, the TypeScript compiler // sometimes generates source maps with duplicates in them. See Github issue // #72 and bugzil.la/889492. this._names = ArraySet.fromArray(names.map(String), true); this._sources = ArraySet.fromArray(sources, true); this._absoluteSources = this._sources.toArray().map(function (s) { return util.computeSourceURL(sourceRoot, s, aSourceMapURL); }); this.sourceRoot = sourceRoot; this.sourcesContent = sourcesContent; this._mappings = mappings; this._sourceMapURL = aSourceMapURL; this.file = file; } BasicSourceMapConsumer.prototype = Object.create(SourceMapConsumer.prototype); BasicSourceMapConsumer.prototype.consumer = SourceMapConsumer; /** * Utility function to find the index of a source. Returns -1 if not * found. */ BasicSourceMapConsumer.prototype._findSourceIndex = function(aSource) { var relativeSource = aSource; if (this.sourceRoot != null) { relativeSource = util.relative(this.sourceRoot, relativeSource); } if (this._sources.has(relativeSource)) { return this._sources.indexOf(relativeSource); } // Maybe aSource is an absolute URL as returned by |sources|. In // this case we can't simply undo the transform. var i; for (i = 0; i < this._absoluteSources.length; ++i) { if (this._absoluteSources[i] == aSource) { return i; } } return -1; }; /** * Create a BasicSourceMapConsumer from a SourceMapGenerator. * * @param SourceMapGenerator aSourceMap * The source map that will be consumed. * @param String aSourceMapURL * The URL at which the source map can be found (optional) * @returns BasicSourceMapConsumer */ BasicSourceMapConsumer.fromSourceMap = function SourceMapConsumer_fromSourceMap(aSourceMap, aSourceMapURL) { var smc = Object.create(BasicSourceMapConsumer.prototype); var names = smc._names = ArraySet.fromArray(aSourceMap._names.toArray(), true); var sources = smc._sources = ArraySet.fromArray(aSourceMap._sources.toArray(), true); smc.sourceRoot = aSourceMap._sourceRoot; smc.sourcesContent = aSourceMap._generateSourcesContent(smc._sources.toArray(), smc.sourceRoot); smc.file = aSourceMap._file; smc._sourceMapURL = aSourceMapURL; smc._absoluteSources = smc._sources.toArray().map(function (s) { return util.computeSourceURL(smc.sourceRoot, s, aSourceMapURL); }); // Because we are modifying the entries (by converting string sources and // names to indices into the sources and names ArraySets), we have to make // a copy of the entry or else bad things happen. Shared mutable state // strikes again! See github issue #191. var generatedMappings = aSourceMap._mappings.toArray().slice(); var destGeneratedMappings = smc.__generatedMappings = []; var destOriginalMappings = smc.__originalMappings = []; for (var i = 0, length = generatedMappings.length; i < length; i++) { var srcMapping = generatedMappings[i]; var destMapping = new Mapping; destMapping.generatedLine = srcMapping.generatedLine; destMapping.generatedColumn = srcMapping.generatedColumn; if (srcMapping.source) { destMapping.source = sources.indexOf(srcMapping.source); destMapping.originalLine = srcMapping.originalLine; destMapping.originalColumn = srcMapping.originalColumn; if (srcMapping.name) { destMapping.name = names.indexOf(srcMapping.name); } destOriginalMappings.push(destMapping); } destGeneratedMappings.push(destMapping); } quickSort(smc.__originalMappings, util.compareByOriginalPositions); return smc; }; /** * The version of the source mapping spec that we are consuming. */ BasicSourceMapConsumer.prototype._version = 3; /** * The list of original sources. */ Object.defineProperty(BasicSourceMapConsumer.prototype, 'sources', { get: function () { return this._absoluteSources.slice(); } }); /** * Provide the JIT with a nice shape / hidden class. */ function Mapping() { this.generatedLine = 0; this.generatedColumn = 0; this.source = null; this.originalLine = null; this.originalColumn = null; this.name = null; } /** * Parse the mappings in a string in to a data structure which we can easily * query (the ordered arrays in the `this.__generatedMappings` and * `this.__originalMappings` properties). */ BasicSourceMapConsumer.prototype._parseMappings = function SourceMapConsumer_parseMappings(aStr, aSourceRoot) { var generatedLine = 1; var previousGeneratedColumn = 0; var previousOriginalLine = 0; var previousOriginalColumn = 0; var previousSource = 0; var previousName = 0; var length = aStr.length; var index = 0; var cachedSegments = {}; var temp = {}; var originalMappings = []; var generatedMappings = []; var mapping, str, segment, end, value; while (index < length) { if (aStr.charAt(index) === ';') { generatedLine++; index++; previousGeneratedColumn = 0; } else if (aStr.charAt(index) === ',') { index++; } else { mapping = new Mapping(); mapping.generatedLine = generatedLine; // Because each offset is encoded relative to the previous one, // many segments often have the same encoding. We can exploit this // fact by caching the parsed variable length fields of each segment, // allowing us to avoid a second parse if we encounter the same // segment again. for (end = index; end < length; end++) { if (this._charIsMappingSeparator(aStr, end)) { break; } } str = aStr.slice(index, end); segment = cachedSegments[str]; if (segment) { index += str.length; } else { segment = []; while (index < end) { base64VLQ.decode(aStr, index, temp); value = temp.value; index = temp.rest; segment.push(value); } if (segment.length === 2) { throw new Error('Found a source, but no line and column'); } if (segment.length === 3) { throw new Error('Found a source and line, but no column'); } cachedSegments[str] = segment; } // Generated column. mapping.generatedColumn = previousGeneratedColumn + segment[0]; previousGeneratedColumn = mapping.generatedColumn; if (segment.length > 1) { // Original source. mapping.source = previousSource + segment[1]; previousSource += segment[1]; // Original line. mapping.originalLine = previousOriginalLine + segment[2]; previousOriginalLine = mapping.originalLine; // Lines are stored 0-based mapping.originalLine += 1; // Original column. mapping.originalColumn = previousOriginalColumn + segment[3]; previousOriginalColumn = mapping.originalColumn; if (segment.length > 4) { // Original name. mapping.name = previousName + segment[4]; previousName += segment[4]; } } generatedMappings.push(mapping); if (typeof mapping.originalLine === 'number') { originalMappings.push(mapping); } } } quickSort(generatedMappings, util.compareByGeneratedPositionsDeflated); this.__generatedMappings = generatedMappings; quickSort(originalMappings, util.compareByOriginalPositions); this.__originalMappings = originalMappings; }; /** * Find the mapping that best matches the hypothetical "needle" mapping that * we are searching for in the given "haystack" of mappings. */ BasicSourceMapConsumer.prototype._findMapping = function SourceMapConsumer_findMapping(aNeedle, aMappings, aLineName, aColumnName, aComparator, aBias) { // To return the position we are searching for, we must first find the // mapping for the given position and then return the opposite position it // points to. Because the mappings are sorted, we can use binary search to // find the best mapping. if (aNeedle[aLineName] <= 0) { throw new TypeError('Line must be greater than or equal to 1, got ' + aNeedle[aLineName]); } if (aNeedle[aColumnName] < 0) { throw new TypeError('Column must be greater than or equal to 0, got ' + aNeedle[aColumnName]); } return binarySearch.search(aNeedle, aMappings, aComparator, aBias); }; /** * Compute the last column for each generated mapping. The last column is * inclusive. */ BasicSourceMapConsumer.prototype.computeColumnSpans = function SourceMapConsumer_computeColumnSpans() { for (var index = 0; index < this._generatedMappings.length; ++index) { var mapping = this._generatedMappings[index]; // Mappings do not contain a field for the last generated columnt. We // can come up with an optimistic estimate, however, by assuming that // mappings are contiguous (i.e. given two consecutive mappings, the // first mapping ends where the second one starts). if (index + 1 < this._generatedMappings.length) { var nextMapping = this._generatedMappings[index + 1]; if (mapping.generatedLine === nextMapping.generatedLine) { mapping.lastGeneratedColumn = nextMapping.generatedColumn - 1; continue; } } // The last mapping for each line spans the entire line. mapping.lastGeneratedColumn = Infinity; } }; /** * Returns the original source, line, and column information for the generated * source's line and column positions provided. The only argument is an object * with the following properties: * * - line: The line number in the generated source. The line number * is 1-based. * - column: The column number in the generated source. The column * number is 0-based. * - bias: Either 'SourceMapConsumer.GREATEST_LOWER_BOUND' or * 'SourceMapConsumer.LEAST_UPPER_BOUND'. Specifies whether to return the * closest element that is smaller than or greater than the one we are * searching for, respectively, if the exact element cannot be found. * Defaults to 'SourceMapConsumer.GREATEST_LOWER_BOUND'. * * and an object is returned with the following properties: * * - source: The original source file, or null. * - line: The line number in the original source, or null. The * line number is 1-based. * - column: The column number in the original source, or null. The * column number is 0-based. * - name: The original identifier, or null. */ BasicSourceMapConsumer.prototype.originalPositionFor = function SourceMapConsumer_originalPositionFor(aArgs) { var needle = { generatedLine: util.getArg(aArgs, 'line'), generatedColumn: util.getArg(aArgs, 'column') }; var index = this._findMapping( needle, this._generatedMappings, "generatedLine", "generatedColumn", util.compareByGeneratedPositionsDeflated, util.getArg(aArgs, 'bias', SourceMapConsumer.GREATEST_LOWER_BOUND) ); if (index >= 0) { var mapping = this._generatedMappings[index]; if (mapping.generatedLine === needle.generatedLine) { var source = util.getArg(mapping, 'source', null); if (source !== null) { source = this._sources.at(source); source = util.computeSourceURL(this.sourceRoot, source, this._sourceMapURL); } var name = util.getArg(mapping, 'name', null); if (name !== null) { name = this._names.at(name); } return { source: source, line: util.getArg(mapping, 'originalLine', null), column: util.getArg(mapping, 'originalColumn', null), name: name }; } } return { source: null, line: null, column: null, name: null }; }; /** * Return true if we have the source content for every source in the source * map, false otherwise. */ BasicSourceMapConsumer.prototype.hasContentsOfAllSources = function BasicSourceMapConsumer_hasContentsOfAllSources() { if (!this.sourcesContent) { return false; } return this.sourcesContent.length >= this._sources.size() && !this.sourcesContent.some(function (sc) { return sc == null; }); }; /** * Returns the original source content. The only argument is the url of the * original source file. Returns null if no original source content is * available. */ BasicSourceMapConsumer.prototype.sourceContentFor = function SourceMapConsumer_sourceContentFor(aSource, nullOnMissing) { if (!this.sourcesContent) { return null; } var index = this._findSourceIndex(aSource); if (index >= 0) { return this.sourcesContent[index]; } var relativeSource = aSource; if (this.sourceRoot != null) { relativeSource = util.relative(this.sourceRoot, relativeSource); } var url; if (this.sourceRoot != null && (url = util.urlParse(this.sourceRoot))) { // XXX: file:// URIs and absolute paths lead to unexpected behavior for // many users. We can help them out when they expect file:// URIs to // behave like it would if they were running a local HTTP server. See // https://bugzilla.mozilla.org/show_bug.cgi?id=885597. var fileUriAbsPath = relativeSource.replace(/^file:\/\//, ""); if (url.scheme == "file" && this._sources.has(fileUriAbsPath)) { return this.sourcesContent[this._sources.indexOf(fileUriAbsPath)] } if ((!url.path || url.path == "/") && this._sources.has("/" + relativeSource)) { return this.sourcesContent[this._sources.indexOf("/" + relativeSource)]; } } // This function is used recursively from // IndexedSourceMapConsumer.prototype.sourceContentFor. In that case, we // don't want to throw if we can't find the source - we just want to // return null, so we provide a flag to exit gracefully. if (nullOnMissing) { return null; } else { throw new Error('"' + relativeSource + '" is not in the SourceMap.'); } }; /** * Returns the generated line and column information for the original source, * line, and column positions provided. The only argument is an object with * the following properties: * * - source: The filename of the original source. * - line: The line number in the original source. The line number * is 1-based. * - column: The column number in the original source. The column * number is 0-based. * - bias: Either 'SourceMapConsumer.GREATEST_LOWER_BOUND' or * 'SourceMapConsumer.LEAST_UPPER_BOUND'. Specifies whether to return the * closest element that is smaller than or greater than the one we are * searching for, respectively, if the exact element cannot be found. * Defaults to 'SourceMapConsumer.GREATEST_LOWER_BOUND'. * * and an object is returned with the following properties: * * - line: The line number in the generated source, or null. The * line number is 1-based. * - column: The column number in the generated source, or null. * The column number is 0-based. */ BasicSourceMapConsumer.prototype.generatedPositionFor = function SourceMapConsumer_generatedPositionFor(aArgs) { var source = util.getArg(aArgs, 'source'); source = this._findSourceIndex(source); if (source < 0) { return { line: null, column: null, lastColumn: null }; } var needle = { source: source, originalLine: util.getArg(aArgs, 'line'), originalColumn: util.getArg(aArgs, 'column') }; var index = this._findMapping( needle, this._originalMappings, "originalLine", "originalColumn", util.compareByOriginalPositions, util.getArg(aArgs, 'bias', SourceMapConsumer.GREATEST_LOWER_BOUND) ); if (index >= 0) { var mapping = this._originalMappings[index]; if (mapping.source === needle.source) { return { line: util.getArg(mapping, 'generatedLine', null), column: util.getArg(mapping, 'generatedColumn', null), lastColumn: util.getArg(mapping, 'lastGeneratedColumn', null) }; } } return { line: null, column: null, lastColumn: null }; }; exports.BasicSourceMapConsumer = BasicSourceMapConsumer; /** * An IndexedSourceMapConsumer instance represents a parsed source map which * we can query for information. It differs from BasicSourceMapConsumer in * that it takes "indexed" source maps (i.e. ones with a "sections" field) as * input. * * The first parameter is a raw source map (either as a JSON string, or already * parsed to an object). According to the spec for indexed source maps, they * have the following attributes: * * - version: Which version of the source map spec this map is following. * - file: Optional. The generated file this source map is associated with. * - sections: A list of section definitions. * * Each value under the "sections" field has two fields: * - offset: The offset into the original specified at which this section * begins to apply, defined as an object with a "line" and "column" * field. * - map: A source map definition. This source map could also be indexed, * but doesn't have to be. * * Instead of the "map" field, it's also possible to have a "url" field * specifying a URL to retrieve a source map from, but that's currently * unsupported. * * Here's an example source map, taken from the source map spec[0], but * modified to omit a section which uses the "url" field. * * { * version : 3, * file: "app.js", * sections: [{ * offset: {line:100, column:10}, * map: { * version : 3, * file: "section.js", * sources: ["foo.js", "bar.js"], * names: ["src", "maps", "are", "fun"], * mappings: "AAAA,E;;ABCDE;" * } * }], * } * * The second parameter, if given, is a string whose value is the URL * at which the source map was found. This URL is used to compute the * sources array. * * [0]: https://docs.google.com/document/d/1U1RGAehQwRypUTovF1KRlpiOFze0b-_2gc6fAH0KY0k/edit#heading=h.535es3xeprgt */ function IndexedSourceMapConsumer(aSourceMap, aSourceMapURL) { var sourceMap = aSourceMap; if (typeof aSourceMap === 'string') { sourceMap = util.parseSourceMapInput(aSourceMap); } var version = util.getArg(sourceMap, 'version'); var sections = util.getArg(sourceMap, 'sections'); if (version != this._version) { throw new Error('Unsupported version: ' + version); } this._sources = new ArraySet(); this._names = new ArraySet(); var lastOffset = { line: -1, column: 0 }; this._sections = sections.map(function (s) { if (s.url) { // The url field will require support for asynchronicity. // See https://github.com/mozilla/source-map/issues/16 throw new Error('Support for url field in sections not implemented.'); } var offset = util.getArg(s, 'offset'); var offsetLine = util.getArg(offset, 'line'); var offsetColumn = util.getArg(offset, 'column'); if (offsetLine < lastOffset.line || (offsetLine === lastOffset.line && offsetColumn < lastOffset.column)) { throw new Error('Section offsets must be ordered and non-overlapping.'); } lastOffset = offset; return { generatedOffset: { // The offset fields are 0-based, but we use 1-based indices when // encoding/decoding from VLQ. generatedLine: offsetLine + 1, generatedColumn: offsetColumn + 1 }, consumer: new SourceMapConsumer(util.getArg(s, 'map'), aSourceMapURL) } }); } IndexedSourceMapConsumer.prototype = Object.create(SourceMapConsumer.prototype); IndexedSourceMapConsumer.prototype.constructor = SourceMapConsumer; /** * The version of the source mapping spec that we are consuming. */ IndexedSourceMapConsumer.prototype._version = 3; /** * The list of original sources. */ Object.defineProperty(IndexedSourceMapConsumer.prototype, 'sources', { get: function () { var sources = []; for (var i = 0; i < this._sections.length; i++) { for (var j = 0; j < this._sections[i].consumer.sources.length; j++) { sources.push(this._sections[i].consumer.sources[j]); } } return sources; } }); /** * Returns the original source, line, and column information for the generated * source's line and column positions provided. The only argument is an object * with the following properties: * * - line: The line number in the generated source. The line number * is 1-based. * - column: The column number in the generated source. The column * number is 0-based. * * and an object is returned with the following properties: * * - source: The original source file, or null. * - line: The line number in the original source, or null. The * line number is 1-based. * - column: The column number in the original source, or null. The * column number is 0-based. * - name: The original identifier, or null. */ IndexedSourceMapConsumer.prototype.originalPositionFor = function IndexedSourceMapConsumer_originalPositionFor(aArgs) { var needle = { generatedLine: util.getArg(aArgs, 'line'), generatedColumn: util.getArg(aArgs, 'column') }; // Find the section containing the generated position we're trying to map // to an original position. var sectionIndex = binarySearch.search(needle, this._sections, function(needle, section) { var cmp = needle.generatedLine - section.generatedOffset.generatedLine; if (cmp) { return cmp; } return (needle.generatedColumn - section.generatedOffset.generatedColumn); }); var section = this._sections[sectionIndex]; if (!section) { return { source: null, line: null, column: null, name: null }; } return section.consumer.originalPositionFor({ line: needle.generatedLine - (section.generatedOffset.generatedLine - 1), column: needle.generatedColumn - (section.generatedOffset.generatedLine === needle.generatedLine ? section.generatedOffset.generatedColumn - 1 : 0), bias: aArgs.bias }); }; /** * Return true if we have the source content for every source in the source * map, false otherwise. */ IndexedSourceMapConsumer.prototype.hasContentsOfAllSources = function IndexedSourceMapConsumer_hasContentsOfAllSources() { return this._sections.every(function (s) { return s.consumer.hasContentsOfAllSources(); }); }; /** * Returns the original source content. The only argument is the url of the * original source file. Returns null if no original source content is * available. */ IndexedSourceMapConsumer.prototype.sourceContentFor = function IndexedSourceMapConsumer_sourceContentFor(aSource, nullOnMissing) { for (var i = 0; i < this._sections.length; i++) { var section = this._sections[i]; var content = section.consumer.sourceContentFor(aSource, true); if (content) { return content; } } if (nullOnMissing) { return null; } else { throw new Error('"' + aSource + '" is not in the SourceMap.'); } }; /** * Returns the generated line and column information for the original source, * line, and column positions provided. The only argument is an object with * the following properties: * * - source: The filename of the original source. * - line: The line number in the original source. The line number * is 1-based. * - column: The column number in the original source. The column * number is 0-based. * * and an object is returned with the following properties: * * - line: The line number in the generated source, or null. The * line number is 1-based. * - column: The column number in the generated source, or null. * The column number is 0-based. */ IndexedSourceMapConsumer.prototype.generatedPositionFor = function IndexedSourceMapConsumer_generatedPositionFor(aArgs) { for (var i = 0; i < this._sections.length; i++) { var section = this._sections[i]; // Only consider this section if the requested source is in the list of // sources of the consumer. if (section.consumer._findSourceIndex(util.getArg(aArgs, 'source')) === -1) { continue; } var generatedPosition = section.consumer.generatedPositionFor(aArgs); if (generatedPosition) { var ret = { line: generatedPosition.line + (section.generatedOffset.generatedLine - 1), column: generatedPosition.column + (section.generatedOffset.generatedLine === generatedPosition.line ? section.generatedOffset.generatedColumn - 1 : 0) }; return ret; } } return { line: null, column: null }; }; /** * Parse the mappings in a string in to a data structure which we can easily * query (the ordered arrays in the `this.__generatedMappings` and * `this.__originalMappings` properties). */ IndexedSourceMapConsumer.prototype._parseMappings = function IndexedSourceMapConsumer_parseMappings(aStr, aSourceRoot) { this.__generatedMappings = []; this.__originalMappings = []; for (var i = 0; i < this._sections.length; i++) { var section = this._sections[i]; var sectionMappings = section.consumer._generatedMappings; for (var j = 0; j < sectionMappings.length; j++) { var mapping = sectionMappings[j]; var source = section.consumer._sources.at(mapping.source); source = util.computeSourceURL(section.consumer.sourceRoot, source, this._sourceMapURL); this._sources.add(source); source = this._sources.indexOf(source); var name = null; if (mapping.name) { name = section.consumer._names.at(mapping.name); this._names.add(name); name = this._names.indexOf(name); } // The mappings coming from the consumer for the section have // generated positions relative to the start of the section, so we // need to offset them to be relative to the start of the concatenated // generated file. var adjustedMapping = { source: source, generatedLine: mapping.generatedLine + (section.generatedOffset.generatedLine - 1), generatedColumn: mapping.generatedColumn + (section.generatedOffset.generatedLine === mapping.generatedLine ? section.generatedOffset.generatedColumn - 1 : 0), originalLine: mapping.originalLine, originalColumn: mapping.originalColumn, name: name }; this.__generatedMappings.push(adjustedMapping); if (typeof adjustedMapping.originalLine === 'number') { this.__originalMappings.push(adjustedMapping); } } } quickSort(this.__generatedMappings, util.compareByGeneratedPositionsDeflated); quickSort(this.__originalMappings, util.compareByOriginalPositions); }; exports.IndexedSourceMapConsumer = IndexedSourceMapConsumer; /***/ }), /* 8 */ /***/ (function(module, exports) { /* -*- Mode: js; js-indent-level: 2; -*- */ /* * Copyright 2011 Mozilla Foundation and contributors * Licensed under the New BSD license. See LICENSE or: * http://opensource.org/licenses/BSD-3-Clause */ exports.GREATEST_LOWER_BOUND = 1; exports.LEAST_UPPER_BOUND = 2; /** * Recursive implementation of binary search. * * @param aLow Indices here and lower do not contain the needle. * @param aHigh Indices here and higher do not contain the needle. * @param aNeedle The element being searched for. * @param aHaystack The non-empty array being searched. * @param aCompare Function which takes two elements and returns -1, 0, or 1. * @param aBias Either 'binarySearch.GREATEST_LOWER_BOUND' or * 'binarySearch.LEAST_UPPER_BOUND'. Specifies whether to return the * closest element that is smaller than or greater than the one we are * searching for, respectively, if the exact element cannot be found. */ function recursiveSearch(aLow, aHigh, aNeedle, aHaystack, aCompare, aBias) { // This function terminates when one of the following is true: // // 1. We find the exact element we are looking for. // // 2. We did not find the exact element, but we can return the index of // the next-closest element. // // 3. We did not find the exact element, and there is no next-closest // element than the one we are searching for, so we return -1. var mid = Math.floor((aHigh - aLow) / 2) + aLow; var cmp = aCompare(aNeedle, aHaystack[mid], true); if (cmp === 0) { // Found the element we are looking for. return mid; } else if (cmp > 0) { // Our needle is greater than aHaystack[mid]. if (aHigh - mid > 1) { // The element is in the upper half. return recursiveSearch(mid, aHigh, aNeedle, aHaystack, aCompare, aBias); } // The exact needle element was not found in this haystack. Determine if // we are in termination case (3) or (2) and return the appropriate thing. if (aBias == exports.LEAST_UPPER_BOUND) { return aHigh < aHaystack.length ? aHigh : -1; } else { return mid; } } else { // Our needle is less than aHaystack[mid]. if (mid - aLow > 1) { // The element is in the lower half. return recursiveSearch(aLow, mid, aNeedle, aHaystack, aCompare, aBias); } // we are in termination case (3) or (2) and return the appropriate thing. if (aBias == exports.LEAST_UPPER_BOUND) { return mid; } else { return aLow < 0 ? -1 : aLow; } } } /** * This is an implementation of binary search which will always try and return * the index of the closest element if there is no exact hit. This is because * mappings between original and generated line/col pairs are single points, * and there is an implicit region between each of them, so a miss just means * that you aren't on the very start of a region. * * @param aNeedle The element you are looking for. * @param aHaystack The array that is being searched. * @param aCompare A function which takes the needle and an element in the * array and returns -1, 0, or 1 depending on whether the needle is less * than, equal to, or greater than the element, respectively. * @param aBias Either 'binarySearch.GREATEST_LOWER_BOUND' or * 'binarySearch.LEAST_UPPER_BOUND'. Specifies whether to return the * closest element that is smaller than or greater than the one we are * searching for, respectively, if the exact element cannot be found. * Defaults to 'binarySearch.GREATEST_LOWER_BOUND'. */ exports.search = function search(aNeedle, aHaystack, aCompare, aBias) { if (aHaystack.length === 0) { return -1; } var index = recursiveSearch(-1, aHaystack.length, aNeedle, aHaystack, aCompare, aBias || exports.GREATEST_LOWER_BOUND); if (index < 0) { return -1; } // We have found either the exact element, or the next-closest element than // the one we are searching for. However, there may be more than one such // element. Make sure we always return the smallest of these. while (index - 1 >= 0) { if (aCompare(aHaystack[index], aHaystack[index - 1], true) !== 0) { break; } --index; } return index; }; /***/ }), /* 9 */ /***/ (function(module, exports) { /* -*- Mode: js; js-indent-level: 2; -*- */ /* * Copyright 2011 Mozilla Foundation and contributors * Licensed under the New BSD license. See LICENSE or: * http://opensource.org/licenses/BSD-3-Clause */ // It turns out that some (most?) JavaScript engines don't self-host // `Array.prototype.sort`. This makes sense because C++ will likely remain // faster than JS when doing raw CPU-intensive sorting. However, when using a // custom comparator function, calling back and forth between the VM's C++ and // JIT'd JS is rather slow *and* loses JIT type information, resulting in // worse generated code for the comparator function than would be optimal. In // fact, when sorting with a comparator, these costs outweigh the benefits of // sorting in C++. By using our own JS-implemented Quick Sort (below), we get // a ~3500ms mean speed-up in `bench/bench.html`. /** * Swap the elements indexed by `x` and `y` in the array `ary`. * * @param {Array} ary * The array. * @param {Number} x * The index of the first item. * @param {Number} y * The index of the second item. */ function swap(ary, x, y) { var temp = ary[x]; ary[x] = ary[y]; ary[y] = temp; } /** * Returns a random integer within the range `low .. high` inclusive. * * @param {Number} low * The lower bound on the range. * @param {Number} high * The upper bound on the range. */ function randomIntInRange(low, high) { return Math.round(low + (Math.random() * (high - low))); } /** * The Quick Sort algorithm. * * @param {Array} ary * An array to sort. * @param {function} comparator * Function to use to compare two items. * @param {Number} p * Start index of the array * @param {Number} r * End index of the array */ function doQuickSort(ary, comparator, p, r) { // If our lower bound is less than our upper bound, we (1) partition the // array into two pieces and (2) recurse on each half. If it is not, this is // the empty array and our base case. if (p < r) { // (1) Partitioning. // // The partitioning chooses a pivot between `p` and `r` and moves all // elements that are less than or equal to the pivot to the before it, and // all the elements that are greater than it after it. The effect is that // once partition is done, the pivot is in the exact place it will be when // the array is put in sorted order, and it will not need to be moved // again. This runs in O(n) time. // Always choose a random pivot so that an input array which is reverse // sorted does not cause O(n^2) running time. var pivotIndex = randomIntInRange(p, r); var i = p - 1; swap(ary, pivotIndex, r); var pivot = ary[r]; // Immediately after `j` is incremented in this loop, the following hold // true: // // * Every element in `ary[p .. i]` is less than or equal to the pivot. // // * Every element in `ary[i+1 .. j-1]` is greater than the pivot. for (var j = p; j < r; j++) { if (comparator(ary[j], pivot) <= 0) { i += 1; swap(ary, i, j); } } swap(ary, i + 1, j); var q = i + 1; // (2) Recurse on each half. doQuickSort(ary, comparator, p, q - 1); doQuickSort(ary, comparator, q + 1, r); } } /** * Sort the given array in-place with the given comparator function. * * @param {Array} ary * An array to sort. * @param {function} comparator * Function to use to compare two items. */ exports.quickSort = function (ary, comparator) { doQuickSort(ary, comparator, 0, ary.length - 1); }; /***/ }), /* 10 */ /***/ (function(module, exports, __webpack_require__) { /* -*- Mode: js; js-indent-level: 2; -*- */ /* * Copyright 2011 Mozilla Foundation and contributors * Licensed under the New BSD license. See LICENSE or: * http://opensource.org/licenses/BSD-3-Clause */ var SourceMapGenerator = __webpack_require__(1).SourceMapGenerator; var util = __webpack_require__(4); // Matches a Windows-style `\r\n` newline or a `\n` newline used by all other // operating systems these days (capturing the result). var REGEX_NEWLINE = /(\r?\n)/; // Newline character code for charCodeAt() comparisons var NEWLINE_CODE = 10; // Private symbol for identifying `SourceNode`s when multiple versions of // the source-map library are loaded. This MUST NOT CHANGE across // versions! var isSourceNode = "$$$isSourceNode$$$"; /** * SourceNodes provide a way to abstract over interpolating/concatenating * snippets of generated JavaScript source code while maintaining the line and * column information associated with the original source code. * * @param aLine The original line number. * @param aColumn The original column number. * @param aSource The original source's filename. * @param aChunks Optional. An array of strings which are snippets of * generated JS, or other SourceNodes. * @param aName The original identifier. */ function SourceNode(aLine, aColumn, aSource, aChunks, aName) { this.children = []; this.sourceContents = {}; this.line = aLine == null ? null : aLine; this.column = aColumn == null ? null : aColumn; this.source = aSource == null ? null : aSource; this.name = aName == null ? null : aName; this[isSourceNode] = true; if (aChunks != null) this.add(aChunks); } /** * Creates a SourceNode from generated code and a SourceMapConsumer. * * @param aGeneratedCode The generated code * @param aSourceMapConsumer The SourceMap for the generated code * @param aRelativePath Optional. The path that relative sources in the * SourceMapConsumer should be relative to. */ SourceNode.fromStringWithSourceMap = function SourceNode_fromStringWithSourceMap(aGeneratedCode, aSourceMapConsumer, aRelativePath) { // The SourceNode we want to fill with the generated code // and the SourceMap var node = new SourceNode(); // All even indices of this array are one line of the generated code, // while all odd indices are the newlines between two adjacent lines // (since `REGEX_NEWLINE` captures its match). // Processed fragments are accessed by calling `shiftNextLine`. var remainingLines = aGeneratedCode.split(REGEX_NEWLINE); var remainingLinesIndex = 0; var shiftNextLine = function() { var lineContents = getNextLine(); // The last line of a file might not have a newline. var newLine = getNextLine() || ""; return lineContents + newLine; function getNextLine() { return remainingLinesIndex < remainingLines.length ? remainingLines[remainingLinesIndex++] : undefined; } }; // We need to remember the position of "remainingLines" var lastGeneratedLine = 1, lastGeneratedColumn = 0; // The generate SourceNodes we need a code range. // To extract it current and last mapping is used. // Here we store the last mapping. var lastMapping = null; aSourceMapConsumer.eachMapping(function (mapping) { if (lastMapping !== null) { // We add the code from "lastMapping" to "mapping": // First check if there is a new line in between. if (lastGeneratedLine < mapping.generatedLine) { // Associate first line with "lastMapping" addMappingWithCode(lastMapping, shiftNextLine()); lastGeneratedLine++; lastGeneratedColumn = 0; // The remaining code is added without mapping } else { // There is no new line in between. // Associate the code between "lastGeneratedColumn" and // "mapping.generatedColumn" with "lastMapping" var nextLine = remainingLines[remainingLinesIndex] || ''; var code = nextLine.substr(0, mapping.generatedColumn - lastGeneratedColumn); remainingLines[remainingLinesIndex] = nextLine.substr(mapping.generatedColumn - lastGeneratedColumn); lastGeneratedColumn = mapping.generatedColumn; addMappingWithCode(lastMapping, code); // No more remaining code, continue lastMapping = mapping; return; } } // We add the generated code until the first mapping // to the SourceNode without any mapping. // Each line is added as separate string. while (lastGeneratedLine < mapping.generatedLine) { node.add(shiftNextLine()); lastGeneratedLine++; } if (lastGeneratedColumn < mapping.generatedColumn) { var nextLine = remainingLines[remainingLinesIndex] || ''; node.add(nextLine.substr(0, mapping.generatedColumn)); remainingLines[remainingLinesIndex] = nextLine.substr(mapping.generatedColumn); lastGeneratedColumn = mapping.generatedColumn; } lastMapping = mapping; }, this); // We have processed all mappings. if (remainingLinesIndex < remainingLines.length) { if (lastMapping) { // Associate the remaining code in the current line with "lastMapping" addMappingWithCode(lastMapping, shiftNextLine()); } // and add the remaining lines without any mapping node.add(remainingLines.splice(remainingLinesIndex).join("")); } // Copy sourcesContent into SourceNode aSourceMapConsumer.sources.forEach(function (sourceFile) { var content = aSourceMapConsumer.sourceContentFor(sourceFile); if (content != null) { if (aRelativePath != null) { sourceFile = util.join(aRelativePath, sourceFile); } node.setSourceContent(sourceFile, content); } }); return node; function addMappingWithCode(mapping, code) { if (mapping === null || mapping.source === undefined) { node.add(code); } else { var source = aRelativePath ? util.join(aRelativePath, mapping.source) : mapping.source; node.add(new SourceNode(mapping.originalLine, mapping.originalColumn, source, code, mapping.name)); } } }; /** * Add a chunk of generated JS to this source node. * * @param aChunk A string snippet of generated JS code, another instance of * SourceNode, or an array where each member is one of those things. */ SourceNode.prototype.add = function SourceNode_add(aChunk) { if (Array.isArray(aChunk)) { aChunk.forEach(function (chunk) { this.add(chunk); }, this); } else if (aChunk[isSourceNode] || typeof aChunk === "string") { if (aChunk) { this.children.push(aChunk); } } else { throw new TypeError( "Expected a SourceNode, string, or an array of SourceNodes and strings. Got " + aChunk ); } return this; }; /** * Add a chunk of generated JS to the beginning of this source node. * * @param aChunk A string snippet of generated JS code, another instance of * SourceNode, or an array where each member is one of those things. */ SourceNode.prototype.prepend = function SourceNode_prepend(aChunk) { if (Array.isArray(aChunk)) { for (var i = aChunk.length-1; i >= 0; i--) { this.prepend(aChunk[i]); } } else if (aChunk[isSourceNode] || typeof aChunk === "string") { this.children.unshift(aChunk); } else { throw new TypeError( "Expected a SourceNode, string, or an array of SourceNodes and strings. Got " + aChunk ); } return this; }; /** * Walk over the tree of JS snippets in this node and its children. The * walking function is called once for each snippet of JS and is passed that * snippet and the its original associated source's line/column location. * * @param aFn The traversal function. */ SourceNode.prototype.walk = function SourceNode_walk(aFn) { var chunk; for (var i = 0, len = this.children.length; i < len; i++) { chunk = this.children[i]; if (chunk[isSourceNode]) { chunk.walk(aFn); } else { if (chunk !== '') { aFn(chunk, { source: this.source, line: this.line, column: this.column, name: this.name }); } } } }; /** * Like `String.prototype.join` except for SourceNodes. Inserts `aStr` between * each of `this.children`. * * @param aSep The separator. */ SourceNode.prototype.join = function SourceNode_join(aSep) { var newChildren; var i; var len = this.children.length; if (len > 0) { newChildren = []; for (i = 0; i < len-1; i++) { newChildren.push(this.children[i]); newChildren.push(aSep); } newChildren.push(this.children[i]); this.children = newChildren; } return this; }; /** * Call String.prototype.replace on the very right-most source snippet. Useful * for trimming whitespace from the end of a source node, etc. * * @param aPattern The pattern to replace. * @param aReplacement The thing to replace the pattern with. */ SourceNode.prototype.replaceRight = function SourceNode_replaceRight(aPattern, aReplacement) { var lastChild = this.children[this.children.length - 1]; if (lastChild[isSourceNode]) { lastChild.replaceRight(aPattern, aReplacement); } else if (typeof lastChild === 'string') { this.children[this.children.length - 1] = lastChild.replace(aPattern, aReplacement); } else { this.children.push(''.replace(aPattern, aReplacement)); } return this; }; /** * Set the source content for a source file. This will be added to the SourceMapGenerator * in the sourcesContent field. * * @param aSourceFile The filename of the source file * @param aSourceContent The content of the source file */ SourceNode.prototype.setSourceContent = function SourceNode_setSourceContent(aSourceFile, aSourceContent) { this.sourceContents[util.toSetString(aSourceFile)] = aSourceContent; }; /** * Walk over the tree of SourceNodes. The walking function is called for each * source file content and is passed the filename and source content. * * @param aFn The traversal function. */ SourceNode.prototype.walkSourceContents = function SourceNode_walkSourceContents(aFn) { for (var i = 0, len = this.children.length; i < len; i++) { if (this.children[i][isSourceNode]) { this.children[i].walkSourceContents(aFn); } } var sources = Object.keys(this.sourceContents); for (var i = 0, len = sources.length; i < len; i++) { aFn(util.fromSetString(sources[i]), this.sourceContents[sources[i]]); } }; /** * Return the string representation of this source node. Walks over the tree * and concatenates all the various snippets together to one string. */ SourceNode.prototype.toString = function SourceNode_toString() { var str = ""; this.walk(function (chunk) { str += chunk; }); return str; }; /** * Returns the string representation of this source node along with a source * map. */ SourceNode.prototype.toStringWithSourceMap = function SourceNode_toStringWithSourceMap(aArgs) { var generated = { code: "", line: 1, column: 0 }; var map = new SourceMapGenerator(aArgs); var sourceMappingActive = false; var lastOriginalSource = null; var lastOriginalLine = null; var lastOriginalColumn = null; var lastOriginalName = null; this.walk(function (chunk, original) { generated.code += chunk; if (original.source !== null && original.line !== null && original.column !== null) { if(lastOriginalSource !== original.source || lastOriginalLine !== original.line || lastOriginalColumn !== original.column || lastOriginalName !== original.name) { map.addMapping({ source: original.source, original: { line: original.line, column: original.column }, generated: { line: generated.line, column: generated.column }, name: original.name }); } lastOriginalSource = original.source; lastOriginalLine = original.line; lastOriginalColumn = original.column; lastOriginalName = original.name; sourceMappingActive = true; } else if (sourceMappingActive) { map.addMapping({ generated: { line: generated.line, column: generated.column } }); lastOriginalSource = null; sourceMappingActive = false; } for (var idx = 0, length = chunk.length; idx < length; idx++) { if (chunk.charCodeAt(idx) === NEWLINE_CODE) { generated.line++; generated.column = 0; // Mappings end at eol if (idx + 1 === length) { lastOriginalSource = null; sourceMappingActive = false; } else if (sourceMappingActive) { map.addMapping({ source: original.source, original: { line: original.line, column: original.column }, generated: { line: generated.line, column: generated.column }, name: original.name }); } } else { generated.column++; } } }); this.walkSourceContents(function (sourceFile, sourceContent) { map.setSourceContent(sourceFile, sourceContent); }); return { code: generated.code, map: map }; }; exports.SourceNode = SourceNode; /***/ }) /******/ ]) }); ;
zhihu-crawler
/zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/source-map/dist/source-map.js
source-map.js
!function(e,n){"object"==typeof exports&&"object"==typeof module?module.exports=n():"function"==typeof define&&define.amd?define([],n):"object"==typeof exports?exports.sourceMap=n():e.sourceMap=n()}(this,function(){return function(e){function n(t){if(r[t])return r[t].exports;var o=r[t]={exports:{},id:t,loaded:!1};return e[t].call(o.exports,o,o.exports,n),o.loaded=!0,o.exports}var r={};return n.m=e,n.c=r,n.p="",n(0)}([function(e,n,r){n.SourceMapGenerator=r(1).SourceMapGenerator,n.SourceMapConsumer=r(7).SourceMapConsumer,n.SourceNode=r(10).SourceNode},function(e,n,r){function t(e){e||(e={}),this._file=i.getArg(e,"file",null),this._sourceRoot=i.getArg(e,"sourceRoot",null),this._skipValidation=i.getArg(e,"skipValidation",!1),this._sources=new s,this._names=new s,this._mappings=new a,this._sourcesContents=null}var o=r(2),i=r(4),s=r(5).ArraySet,a=r(6).MappingList;t.prototype._version=3,t.fromSourceMap=function(e){var n=e.sourceRoot,r=new t({file:e.file,sourceRoot:n});return e.eachMapping(function(e){var t={generated:{line:e.generatedLine,column:e.generatedColumn}};null!=e.source&&(t.source=e.source,null!=n&&(t.source=i.relative(n,t.source)),t.original={line:e.originalLine,column:e.originalColumn},null!=e.name&&(t.name=e.name)),r.addMapping(t)}),e.sources.forEach(function(t){var o=t;null!==n&&(o=i.relative(n,t)),r._sources.has(o)||r._sources.add(o);var s=e.sourceContentFor(t);null!=s&&r.setSourceContent(t,s)}),r},t.prototype.addMapping=function(e){var n=i.getArg(e,"generated"),r=i.getArg(e,"original",null),t=i.getArg(e,"source",null),o=i.getArg(e,"name",null);this._skipValidation||this._validateMapping(n,r,t,o),null!=t&&(t=String(t),this._sources.has(t)||this._sources.add(t)),null!=o&&(o=String(o),this._names.has(o)||this._names.add(o)),this._mappings.add({generatedLine:n.line,generatedColumn:n.column,originalLine:null!=r&&r.line,originalColumn:null!=r&&r.column,source:t,name:o})},t.prototype.setSourceContent=function(e,n){var r=e;null!=this._sourceRoot&&(r=i.relative(this._sourceRoot,r)),null!=n?(this._sourcesContents||(this._sourcesContents=Object.create(null)),this._sourcesContents[i.toSetString(r)]=n):this._sourcesContents&&(delete this._sourcesContents[i.toSetString(r)],0===Object.keys(this._sourcesContents).length&&(this._sourcesContents=null))},t.prototype.applySourceMap=function(e,n,r){var t=n;if(null==n){if(null==e.file)throw new Error('SourceMapGenerator.prototype.applySourceMap requires either an explicit source file, or the source map\'s "file" property. Both were omitted.');t=e.file}var o=this._sourceRoot;null!=o&&(t=i.relative(o,t));var a=new s,u=new s;this._mappings.unsortedForEach(function(n){if(n.source===t&&null!=n.originalLine){var s=e.originalPositionFor({line:n.originalLine,column:n.originalColumn});null!=s.source&&(n.source=s.source,null!=r&&(n.source=i.join(r,n.source)),null!=o&&(n.source=i.relative(o,n.source)),n.originalLine=s.line,n.originalColumn=s.column,null!=s.name&&(n.name=s.name))}var l=n.source;null==l||a.has(l)||a.add(l);var c=n.name;null==c||u.has(c)||u.add(c)},this),this._sources=a,this._names=u,e.sources.forEach(function(n){var t=e.sourceContentFor(n);null!=t&&(null!=r&&(n=i.join(r,n)),null!=o&&(n=i.relative(o,n)),this.setSourceContent(n,t))},this)},t.prototype._validateMapping=function(e,n,r,t){if(n&&"number"!=typeof n.line&&"number"!=typeof n.column)throw new Error("original.line and original.column are not numbers -- you probably meant to omit the original mapping entirely and only map the generated position. If so, pass null for the original mapping instead of an object with empty or null values.");if((!(e&&"line"in e&&"column"in e&&e.line>0&&e.column>=0)||n||r||t)&&!(e&&"line"in e&&"column"in e&&n&&"line"in n&&"column"in n&&e.line>0&&e.column>=0&&n.line>0&&n.column>=0&&r))throw new Error("Invalid mapping: "+JSON.stringify({generated:e,source:r,original:n,name:t}))},t.prototype._serializeMappings=function(){for(var e,n,r,t,s=0,a=1,u=0,l=0,c=0,g=0,p="",h=this._mappings.toArray(),f=0,d=h.length;f<d;f++){if(n=h[f],e="",n.generatedLine!==a)for(s=0;n.generatedLine!==a;)e+=";",a++;else if(f>0){if(!i.compareByGeneratedPositionsInflated(n,h[f-1]))continue;e+=","}e+=o.encode(n.generatedColumn-s),s=n.generatedColumn,null!=n.source&&(t=this._sources.indexOf(n.source),e+=o.encode(t-g),g=t,e+=o.encode(n.originalLine-1-l),l=n.originalLine-1,e+=o.encode(n.originalColumn-u),u=n.originalColumn,null!=n.name&&(r=this._names.indexOf(n.name),e+=o.encode(r-c),c=r)),p+=e}return p},t.prototype._generateSourcesContent=function(e,n){return e.map(function(e){if(!this._sourcesContents)return null;null!=n&&(e=i.relative(n,e));var r=i.toSetString(e);return Object.prototype.hasOwnProperty.call(this._sourcesContents,r)?this._sourcesContents[r]:null},this)},t.prototype.toJSON=function(){var e={version:this._version,sources:this._sources.toArray(),names:this._names.toArray(),mappings:this._serializeMappings()};return null!=this._file&&(e.file=this._file),null!=this._sourceRoot&&(e.sourceRoot=this._sourceRoot),this._sourcesContents&&(e.sourcesContent=this._generateSourcesContent(e.sources,e.sourceRoot)),e},t.prototype.toString=function(){return JSON.stringify(this.toJSON())},n.SourceMapGenerator=t},function(e,n,r){function t(e){return e<0?(-e<<1)+1:(e<<1)+0}function o(e){var n=1===(1&e),r=e>>1;return n?-r:r}var i=r(3),s=5,a=1<<s,u=a-1,l=a;n.encode=function(e){var n,r="",o=t(e);do n=o&u,o>>>=s,o>0&&(n|=l),r+=i.encode(n);while(o>0);return r},n.decode=function(e,n,r){var t,a,c=e.length,g=0,p=0;do{if(n>=c)throw new Error("Expected more digits in base 64 VLQ value.");if(a=i.decode(e.charCodeAt(n++)),a===-1)throw new Error("Invalid base64 digit: "+e.charAt(n-1));t=!!(a&l),a&=u,g+=a<<p,p+=s}while(t);r.value=o(g),r.rest=n}},function(e,n){var r="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".split("");n.encode=function(e){if(0<=e&&e<r.length)return r[e];throw new TypeError("Must be between 0 and 63: "+e)},n.decode=function(e){var n=65,r=90,t=97,o=122,i=48,s=57,a=43,u=47,l=26,c=52;return n<=e&&e<=r?e-n:t<=e&&e<=o?e-t+l:i<=e&&e<=s?e-i+c:e==a?62:e==u?63:-1}},function(e,n){function r(e,n,r){if(n in e)return e[n];if(3===arguments.length)return r;throw new Error('"'+n+'" is a required argument.')}function t(e){var n=e.match(v);return n?{scheme:n[1],auth:n[2],host:n[3],port:n[4],path:n[5]}:null}function o(e){var n="";return e.scheme&&(n+=e.scheme+":"),n+="//",e.auth&&(n+=e.auth+"@"),e.host&&(n+=e.host),e.port&&(n+=":"+e.port),e.path&&(n+=e.path),n}function i(e){var r=e,i=t(e);if(i){if(!i.path)return e;r=i.path}for(var s,a=n.isAbsolute(r),u=r.split(/\/+/),l=0,c=u.length-1;c>=0;c--)s=u[c],"."===s?u.splice(c,1):".."===s?l++:l>0&&(""===s?(u.splice(c+1,l),l=0):(u.splice(c,2),l--));return r=u.join("/"),""===r&&(r=a?"/":"."),i?(i.path=r,o(i)):r}function s(e,n){""===e&&(e="."),""===n&&(n=".");var r=t(n),s=t(e);if(s&&(e=s.path||"/"),r&&!r.scheme)return s&&(r.scheme=s.scheme),o(r);if(r||n.match(y))return n;if(s&&!s.host&&!s.path)return s.host=n,o(s);var a="/"===n.charAt(0)?n:i(e.replace(/\/+$/,"")+"/"+n);return s?(s.path=a,o(s)):a}function a(e,n){""===e&&(e="."),e=e.replace(/\/$/,"");for(var r=0;0!==n.indexOf(e+"/");){var t=e.lastIndexOf("/");if(t<0)return n;if(e=e.slice(0,t),e.match(/^([^\/]+:\/)?\/*$/))return n;++r}return Array(r+1).join("../")+n.substr(e.length+1)}function u(e){return e}function l(e){return g(e)?"$"+e:e}function c(e){return g(e)?e.slice(1):e}function g(e){if(!e)return!1;var n=e.length;if(n<9)return!1;if(95!==e.charCodeAt(n-1)||95!==e.charCodeAt(n-2)||111!==e.charCodeAt(n-3)||116!==e.charCodeAt(n-4)||111!==e.charCodeAt(n-5)||114!==e.charCodeAt(n-6)||112!==e.charCodeAt(n-7)||95!==e.charCodeAt(n-8)||95!==e.charCodeAt(n-9))return!1;for(var r=n-10;r>=0;r--)if(36!==e.charCodeAt(r))return!1;return!0}function p(e,n,r){var t=f(e.source,n.source);return 0!==t?t:(t=e.originalLine-n.originalLine,0!==t?t:(t=e.originalColumn-n.originalColumn,0!==t||r?t:(t=e.generatedColumn-n.generatedColumn,0!==t?t:(t=e.generatedLine-n.generatedLine,0!==t?t:f(e.name,n.name)))))}function h(e,n,r){var t=e.generatedLine-n.generatedLine;return 0!==t?t:(t=e.generatedColumn-n.generatedColumn,0!==t||r?t:(t=f(e.source,n.source),0!==t?t:(t=e.originalLine-n.originalLine,0!==t?t:(t=e.originalColumn-n.originalColumn,0!==t?t:f(e.name,n.name)))))}function f(e,n){return e===n?0:null===e?1:null===n?-1:e>n?1:-1}function d(e,n){var r=e.generatedLine-n.generatedLine;return 0!==r?r:(r=e.generatedColumn-n.generatedColumn,0!==r?r:(r=f(e.source,n.source),0!==r?r:(r=e.originalLine-n.originalLine,0!==r?r:(r=e.originalColumn-n.originalColumn,0!==r?r:f(e.name,n.name)))))}function m(e){return JSON.parse(e.replace(/^\)]}'[^\n]*\n/,""))}function _(e,n,r){if(n=n||"",e&&("/"!==e[e.length-1]&&"/"!==n[0]&&(e+="/"),n=e+n),r){var a=t(r);if(!a)throw new Error("sourceMapURL could not be parsed");if(a.path){var u=a.path.lastIndexOf("/");u>=0&&(a.path=a.path.substring(0,u+1))}n=s(o(a),n)}return i(n)}n.getArg=r;var v=/^(?:([\w+\-.]+):)?\/\/(?:(\w+:\w+)@)?([\w.-]*)(?::(\d+))?(.*)$/,y=/^data:.+\,.+$/;n.urlParse=t,n.urlGenerate=o,n.normalize=i,n.join=s,n.isAbsolute=function(e){return"/"===e.charAt(0)||v.test(e)},n.relative=a;var C=function(){var e=Object.create(null);return!("__proto__"in e)}();n.toSetString=C?u:l,n.fromSetString=C?u:c,n.compareByOriginalPositions=p,n.compareByGeneratedPositionsDeflated=h,n.compareByGeneratedPositionsInflated=d,n.parseSourceMapInput=m,n.computeSourceURL=_},function(e,n,r){function t(){this._array=[],this._set=s?new Map:Object.create(null)}var o=r(4),i=Object.prototype.hasOwnProperty,s="undefined"!=typeof Map;t.fromArray=function(e,n){for(var r=new t,o=0,i=e.length;o<i;o++)r.add(e[o],n);return r},t.prototype.size=function(){return s?this._set.size:Object.getOwnPropertyNames(this._set).length},t.prototype.add=function(e,n){var r=s?e:o.toSetString(e),t=s?this.has(e):i.call(this._set,r),a=this._array.length;t&&!n||this._array.push(e),t||(s?this._set.set(e,a):this._set[r]=a)},t.prototype.has=function(e){if(s)return this._set.has(e);var n=o.toSetString(e);return i.call(this._set,n)},t.prototype.indexOf=function(e){if(s){var n=this._set.get(e);if(n>=0)return n}else{var r=o.toSetString(e);if(i.call(this._set,r))return this._set[r]}throw new Error('"'+e+'" is not in the set.')},t.prototype.at=function(e){if(e>=0&&e<this._array.length)return this._array[e];throw new Error("No element indexed by "+e)},t.prototype.toArray=function(){return this._array.slice()},n.ArraySet=t},function(e,n,r){function t(e,n){var r=e.generatedLine,t=n.generatedLine,o=e.generatedColumn,s=n.generatedColumn;return t>r||t==r&&s>=o||i.compareByGeneratedPositionsInflated(e,n)<=0}function o(){this._array=[],this._sorted=!0,this._last={generatedLine:-1,generatedColumn:0}}var i=r(4);o.prototype.unsortedForEach=function(e,n){this._array.forEach(e,n)},o.prototype.add=function(e){t(this._last,e)?(this._last=e,this._array.push(e)):(this._sorted=!1,this._array.push(e))},o.prototype.toArray=function(){return this._sorted||(this._array.sort(i.compareByGeneratedPositionsInflated),this._sorted=!0),this._array},n.MappingList=o},function(e,n,r){function t(e,n){var r=e;return"string"==typeof e&&(r=a.parseSourceMapInput(e)),null!=r.sections?new s(r,n):new o(r,n)}function o(e,n){var r=e;"string"==typeof e&&(r=a.parseSourceMapInput(e));var t=a.getArg(r,"version"),o=a.getArg(r,"sources"),i=a.getArg(r,"names",[]),s=a.getArg(r,"sourceRoot",null),u=a.getArg(r,"sourcesContent",null),c=a.getArg(r,"mappings"),g=a.getArg(r,"file",null);if(t!=this._version)throw new Error("Unsupported version: "+t);s&&(s=a.normalize(s)),o=o.map(String).map(a.normalize).map(function(e){return s&&a.isAbsolute(s)&&a.isAbsolute(e)?a.relative(s,e):e}),this._names=l.fromArray(i.map(String),!0),this._sources=l.fromArray(o,!0),this._absoluteSources=this._sources.toArray().map(function(e){return a.computeSourceURL(s,e,n)}),this.sourceRoot=s,this.sourcesContent=u,this._mappings=c,this._sourceMapURL=n,this.file=g}function i(){this.generatedLine=0,this.generatedColumn=0,this.source=null,this.originalLine=null,this.originalColumn=null,this.name=null}function s(e,n){var r=e;"string"==typeof e&&(r=a.parseSourceMapInput(e));var o=a.getArg(r,"version"),i=a.getArg(r,"sections");if(o!=this._version)throw new Error("Unsupported version: "+o);this._sources=new l,this._names=new l;var s={line:-1,column:0};this._sections=i.map(function(e){if(e.url)throw new Error("Support for url field in sections not implemented.");var r=a.getArg(e,"offset"),o=a.getArg(r,"line"),i=a.getArg(r,"column");if(o<s.line||o===s.line&&i<s.column)throw new Error("Section offsets must be ordered and non-overlapping.");return s=r,{generatedOffset:{generatedLine:o+1,generatedColumn:i+1},consumer:new t(a.getArg(e,"map"),n)}})}var a=r(4),u=r(8),l=r(5).ArraySet,c=r(2),g=r(9).quickSort;t.fromSourceMap=function(e,n){return o.fromSourceMap(e,n)},t.prototype._version=3,t.prototype.__generatedMappings=null,Object.defineProperty(t.prototype,"_generatedMappings",{configurable:!0,enumerable:!0,get:function(){return this.__generatedMappings||this._parseMappings(this._mappings,this.sourceRoot),this.__generatedMappings}}),t.prototype.__originalMappings=null,Object.defineProperty(t.prototype,"_originalMappings",{configurable:!0,enumerable:!0,get:function(){return this.__originalMappings||this._parseMappings(this._mappings,this.sourceRoot),this.__originalMappings}}),t.prototype._charIsMappingSeparator=function(e,n){var r=e.charAt(n);return";"===r||","===r},t.prototype._parseMappings=function(e,n){throw new Error("Subclasses must implement _parseMappings")},t.GENERATED_ORDER=1,t.ORIGINAL_ORDER=2,t.GREATEST_LOWER_BOUND=1,t.LEAST_UPPER_BOUND=2,t.prototype.eachMapping=function(e,n,r){var o,i=n||null,s=r||t.GENERATED_ORDER;switch(s){case t.GENERATED_ORDER:o=this._generatedMappings;break;case t.ORIGINAL_ORDER:o=this._originalMappings;break;default:throw new Error("Unknown order of iteration.")}var u=this.sourceRoot;o.map(function(e){var n=null===e.source?null:this._sources.at(e.source);return n=a.computeSourceURL(u,n,this._sourceMapURL),{source:n,generatedLine:e.generatedLine,generatedColumn:e.generatedColumn,originalLine:e.originalLine,originalColumn:e.originalColumn,name:null===e.name?null:this._names.at(e.name)}},this).forEach(e,i)},t.prototype.allGeneratedPositionsFor=function(e){var n=a.getArg(e,"line"),r={source:a.getArg(e,"source"),originalLine:n,originalColumn:a.getArg(e,"column",0)};if(r.source=this._findSourceIndex(r.source),r.source<0)return[];var t=[],o=this._findMapping(r,this._originalMappings,"originalLine","originalColumn",a.compareByOriginalPositions,u.LEAST_UPPER_BOUND);if(o>=0){var i=this._originalMappings[o];if(void 0===e.column)for(var s=i.originalLine;i&&i.originalLine===s;)t.push({line:a.getArg(i,"generatedLine",null),column:a.getArg(i,"generatedColumn",null),lastColumn:a.getArg(i,"lastGeneratedColumn",null)}),i=this._originalMappings[++o];else for(var l=i.originalColumn;i&&i.originalLine===n&&i.originalColumn==l;)t.push({line:a.getArg(i,"generatedLine",null),column:a.getArg(i,"generatedColumn",null),lastColumn:a.getArg(i,"lastGeneratedColumn",null)}),i=this._originalMappings[++o]}return t},n.SourceMapConsumer=t,o.prototype=Object.create(t.prototype),o.prototype.consumer=t,o.prototype._findSourceIndex=function(e){var n=e;if(null!=this.sourceRoot&&(n=a.relative(this.sourceRoot,n)),this._sources.has(n))return this._sources.indexOf(n);var r;for(r=0;r<this._absoluteSources.length;++r)if(this._absoluteSources[r]==e)return r;return-1},o.fromSourceMap=function(e,n){var r=Object.create(o.prototype),t=r._names=l.fromArray(e._names.toArray(),!0),s=r._sources=l.fromArray(e._sources.toArray(),!0);r.sourceRoot=e._sourceRoot,r.sourcesContent=e._generateSourcesContent(r._sources.toArray(),r.sourceRoot),r.file=e._file,r._sourceMapURL=n,r._absoluteSources=r._sources.toArray().map(function(e){return a.computeSourceURL(r.sourceRoot,e,n)});for(var u=e._mappings.toArray().slice(),c=r.__generatedMappings=[],p=r.__originalMappings=[],h=0,f=u.length;h<f;h++){var d=u[h],m=new i;m.generatedLine=d.generatedLine,m.generatedColumn=d.generatedColumn,d.source&&(m.source=s.indexOf(d.source),m.originalLine=d.originalLine,m.originalColumn=d.originalColumn,d.name&&(m.name=t.indexOf(d.name)),p.push(m)),c.push(m)}return g(r.__originalMappings,a.compareByOriginalPositions),r},o.prototype._version=3,Object.defineProperty(o.prototype,"sources",{get:function(){return this._absoluteSources.slice()}}),o.prototype._parseMappings=function(e,n){for(var r,t,o,s,u,l=1,p=0,h=0,f=0,d=0,m=0,_=e.length,v=0,y={},C={},S=[],A=[];v<_;)if(";"===e.charAt(v))l++,v++,p=0;else if(","===e.charAt(v))v++;else{for(r=new i,r.generatedLine=l,s=v;s<_&&!this._charIsMappingSeparator(e,s);s++);if(t=e.slice(v,s),o=y[t])v+=t.length;else{for(o=[];v<s;)c.decode(e,v,C),u=C.value,v=C.rest,o.push(u);if(2===o.length)throw new Error("Found a source, but no line and column");if(3===o.length)throw new Error("Found a source and line, but no column");y[t]=o}r.generatedColumn=p+o[0],p=r.generatedColumn,o.length>1&&(r.source=d+o[1],d+=o[1],r.originalLine=h+o[2],h=r.originalLine,r.originalLine+=1,r.originalColumn=f+o[3],f=r.originalColumn,o.length>4&&(r.name=m+o[4],m+=o[4])),A.push(r),"number"==typeof r.originalLine&&S.push(r)}g(A,a.compareByGeneratedPositionsDeflated),this.__generatedMappings=A,g(S,a.compareByOriginalPositions),this.__originalMappings=S},o.prototype._findMapping=function(e,n,r,t,o,i){if(e[r]<=0)throw new TypeError("Line must be greater than or equal to 1, got "+e[r]);if(e[t]<0)throw new TypeError("Column must be greater than or equal to 0, got "+e[t]);return u.search(e,n,o,i)},o.prototype.computeColumnSpans=function(){for(var e=0;e<this._generatedMappings.length;++e){var n=this._generatedMappings[e];if(e+1<this._generatedMappings.length){var r=this._generatedMappings[e+1];if(n.generatedLine===r.generatedLine){n.lastGeneratedColumn=r.generatedColumn-1;continue}}n.lastGeneratedColumn=1/0}},o.prototype.originalPositionFor=function(e){var n={generatedLine:a.getArg(e,"line"),generatedColumn:a.getArg(e,"column")},r=this._findMapping(n,this._generatedMappings,"generatedLine","generatedColumn",a.compareByGeneratedPositionsDeflated,a.getArg(e,"bias",t.GREATEST_LOWER_BOUND));if(r>=0){var o=this._generatedMappings[r];if(o.generatedLine===n.generatedLine){var i=a.getArg(o,"source",null);null!==i&&(i=this._sources.at(i),i=a.computeSourceURL(this.sourceRoot,i,this._sourceMapURL));var s=a.getArg(o,"name",null);return null!==s&&(s=this._names.at(s)),{source:i,line:a.getArg(o,"originalLine",null),column:a.getArg(o,"originalColumn",null),name:s}}}return{source:null,line:null,column:null,name:null}},o.prototype.hasContentsOfAllSources=function(){return!!this.sourcesContent&&(this.sourcesContent.length>=this._sources.size()&&!this.sourcesContent.some(function(e){return null==e}))},o.prototype.sourceContentFor=function(e,n){if(!this.sourcesContent)return null;var r=this._findSourceIndex(e);if(r>=0)return this.sourcesContent[r];var t=e;null!=this.sourceRoot&&(t=a.relative(this.sourceRoot,t));var o;if(null!=this.sourceRoot&&(o=a.urlParse(this.sourceRoot))){var i=t.replace(/^file:\/\//,"");if("file"==o.scheme&&this._sources.has(i))return this.sourcesContent[this._sources.indexOf(i)];if((!o.path||"/"==o.path)&&this._sources.has("/"+t))return this.sourcesContent[this._sources.indexOf("/"+t)]}if(n)return null;throw new Error('"'+t+'" is not in the SourceMap.')},o.prototype.generatedPositionFor=function(e){var n=a.getArg(e,"source");if(n=this._findSourceIndex(n),n<0)return{line:null,column:null,lastColumn:null};var r={source:n,originalLine:a.getArg(e,"line"),originalColumn:a.getArg(e,"column")},o=this._findMapping(r,this._originalMappings,"originalLine","originalColumn",a.compareByOriginalPositions,a.getArg(e,"bias",t.GREATEST_LOWER_BOUND));if(o>=0){var i=this._originalMappings[o];if(i.source===r.source)return{line:a.getArg(i,"generatedLine",null),column:a.getArg(i,"generatedColumn",null),lastColumn:a.getArg(i,"lastGeneratedColumn",null)}}return{line:null,column:null,lastColumn:null}},n.BasicSourceMapConsumer=o,s.prototype=Object.create(t.prototype),s.prototype.constructor=t,s.prototype._version=3,Object.defineProperty(s.prototype,"sources",{get:function(){for(var e=[],n=0;n<this._sections.length;n++)for(var r=0;r<this._sections[n].consumer.sources.length;r++)e.push(this._sections[n].consumer.sources[r]);return e}}),s.prototype.originalPositionFor=function(e){var n={generatedLine:a.getArg(e,"line"),generatedColumn:a.getArg(e,"column")},r=u.search(n,this._sections,function(e,n){var r=e.generatedLine-n.generatedOffset.generatedLine;return r?r:e.generatedColumn-n.generatedOffset.generatedColumn}),t=this._sections[r];return t?t.consumer.originalPositionFor({line:n.generatedLine-(t.generatedOffset.generatedLine-1),column:n.generatedColumn-(t.generatedOffset.generatedLine===n.generatedLine?t.generatedOffset.generatedColumn-1:0),bias:e.bias}):{source:null,line:null,column:null,name:null}},s.prototype.hasContentsOfAllSources=function(){return this._sections.every(function(e){return e.consumer.hasContentsOfAllSources()})},s.prototype.sourceContentFor=function(e,n){for(var r=0;r<this._sections.length;r++){var t=this._sections[r],o=t.consumer.sourceContentFor(e,!0);if(o)return o}if(n)return null;throw new Error('"'+e+'" is not in the SourceMap.')},s.prototype.generatedPositionFor=function(e){for(var n=0;n<this._sections.length;n++){var r=this._sections[n];if(r.consumer._findSourceIndex(a.getArg(e,"source"))!==-1){var t=r.consumer.generatedPositionFor(e);if(t){var o={line:t.line+(r.generatedOffset.generatedLine-1),column:t.column+(r.generatedOffset.generatedLine===t.line?r.generatedOffset.generatedColumn-1:0)};return o}}}return{line:null,column:null}},s.prototype._parseMappings=function(e,n){this.__generatedMappings=[],this.__originalMappings=[];for(var r=0;r<this._sections.length;r++)for(var t=this._sections[r],o=t.consumer._generatedMappings,i=0;i<o.length;i++){var s=o[i],u=t.consumer._sources.at(s.source);u=a.computeSourceURL(t.consumer.sourceRoot,u,this._sourceMapURL),this._sources.add(u),u=this._sources.indexOf(u);var l=null;s.name&&(l=t.consumer._names.at(s.name),this._names.add(l),l=this._names.indexOf(l));var c={source:u,generatedLine:s.generatedLine+(t.generatedOffset.generatedLine-1),generatedColumn:s.generatedColumn+(t.generatedOffset.generatedLine===s.generatedLine?t.generatedOffset.generatedColumn-1:0),originalLine:s.originalLine,originalColumn:s.originalColumn,name:l};this.__generatedMappings.push(c),"number"==typeof c.originalLine&&this.__originalMappings.push(c)}g(this.__generatedMappings,a.compareByGeneratedPositionsDeflated),g(this.__originalMappings,a.compareByOriginalPositions)},n.IndexedSourceMapConsumer=s},function(e,n){function r(e,t,o,i,s,a){var u=Math.floor((t-e)/2)+e,l=s(o,i[u],!0);return 0===l?u:l>0?t-u>1?r(u,t,o,i,s,a):a==n.LEAST_UPPER_BOUND?t<i.length?t:-1:u:u-e>1?r(e,u,o,i,s,a):a==n.LEAST_UPPER_BOUND?u:e<0?-1:e}n.GREATEST_LOWER_BOUND=1,n.LEAST_UPPER_BOUND=2,n.search=function(e,t,o,i){if(0===t.length)return-1;var s=r(-1,t.length,e,t,o,i||n.GREATEST_LOWER_BOUND);if(s<0)return-1;for(;s-1>=0&&0===o(t[s],t[s-1],!0);)--s;return s}},function(e,n){function r(e,n,r){var t=e[n];e[n]=e[r],e[r]=t}function t(e,n){return Math.round(e+Math.random()*(n-e))}function o(e,n,i,s){if(i<s){var a=t(i,s),u=i-1;r(e,a,s);for(var l=e[s],c=i;c<s;c++)n(e[c],l)<=0&&(u+=1,r(e,u,c));r(e,u+1,c);var g=u+1;o(e,n,i,g-1),o(e,n,g+1,s)}}n.quickSort=function(e,n){o(e,n,0,e.length-1)}},function(e,n,r){function t(e,n,r,t,o){this.children=[],this.sourceContents={},this.line=null==e?null:e,this.column=null==n?null:n,this.source=null==r?null:r,this.name=null==o?null:o,this[u]=!0,null!=t&&this.add(t)}var o=r(1).SourceMapGenerator,i=r(4),s=/(\r?\n)/,a=10,u="$$$isSourceNode$$$";t.fromStringWithSourceMap=function(e,n,r){function o(e,n){if(null===e||void 0===e.source)a.add(n);else{var o=r?i.join(r,e.source):e.source;a.add(new t(e.originalLine,e.originalColumn,o,n,e.name))}}var a=new t,u=e.split(s),l=0,c=function(){function e(){return l<u.length?u[l++]:void 0}var n=e(),r=e()||"";return n+r},g=1,p=0,h=null;return n.eachMapping(function(e){if(null!==h){if(!(g<e.generatedLine)){var n=u[l]||"",r=n.substr(0,e.generatedColumn-p);return u[l]=n.substr(e.generatedColumn-p),p=e.generatedColumn,o(h,r),void(h=e)}o(h,c()),g++,p=0}for(;g<e.generatedLine;)a.add(c()),g++;if(p<e.generatedColumn){var n=u[l]||"";a.add(n.substr(0,e.generatedColumn)),u[l]=n.substr(e.generatedColumn),p=e.generatedColumn}h=e},this),l<u.length&&(h&&o(h,c()),a.add(u.splice(l).join(""))),n.sources.forEach(function(e){var t=n.sourceContentFor(e);null!=t&&(null!=r&&(e=i.join(r,e)),a.setSourceContent(e,t))}),a},t.prototype.add=function(e){if(Array.isArray(e))e.forEach(function(e){this.add(e)},this);else{if(!e[u]&&"string"!=typeof e)throw new TypeError("Expected a SourceNode, string, or an array of SourceNodes and strings. Got "+e);e&&this.children.push(e)}return this},t.prototype.prepend=function(e){if(Array.isArray(e))for(var n=e.length-1;n>=0;n--)this.prepend(e[n]);else{if(!e[u]&&"string"!=typeof e)throw new TypeError("Expected a SourceNode, string, or an array of SourceNodes and strings. Got "+e);this.children.unshift(e)}return this},t.prototype.walk=function(e){for(var n,r=0,t=this.children.length;r<t;r++)n=this.children[r],n[u]?n.walk(e):""!==n&&e(n,{source:this.source,line:this.line,column:this.column,name:this.name})},t.prototype.join=function(e){var n,r,t=this.children.length;if(t>0){for(n=[],r=0;r<t-1;r++)n.push(this.children[r]),n.push(e);n.push(this.children[r]),this.children=n}return this},t.prototype.replaceRight=function(e,n){var r=this.children[this.children.length-1];return r[u]?r.replaceRight(e,n):"string"==typeof r?this.children[this.children.length-1]=r.replace(e,n):this.children.push("".replace(e,n)),this},t.prototype.setSourceContent=function(e,n){this.sourceContents[i.toSetString(e)]=n},t.prototype.walkSourceContents=function(e){for(var n=0,r=this.children.length;n<r;n++)this.children[n][u]&&this.children[n].walkSourceContents(e);for(var t=Object.keys(this.sourceContents),n=0,r=t.length;n<r;n++)e(i.fromSetString(t[n]),this.sourceContents[t[n]])},t.prototype.toString=function(){var e="";return this.walk(function(n){e+=n}),e},t.prototype.toStringWithSourceMap=function(e){var n={code:"",line:1,column:0},r=new o(e),t=!1,i=null,s=null,u=null,l=null;return this.walk(function(e,o){n.code+=e,null!==o.source&&null!==o.line&&null!==o.column?(i===o.source&&s===o.line&&u===o.column&&l===o.name||r.addMapping({source:o.source,original:{line:o.line,column:o.column},generated:{line:n.line,column:n.column},name:o.name}),i=o.source,s=o.line,u=o.column,l=o.name,t=!0):t&&(r.addMapping({generated:{line:n.line,column:n.column}}),i=null,t=!1);for(var c=0,g=e.length;c<g;c++)e.charCodeAt(c)===a?(n.line++,n.column=0,c+1===g?(i=null,t=!1):t&&r.addMapping({source:o.source,original:{line:o.line,column:o.column},generated:{line:n.line,column:n.column},name:o.name})):n.column++}),this.walkSourceContents(function(e,n){r.setSourceContent(e,n)}),{code:n.code,map:r}},n.SourceNode=t}])}); //# sourceMappingURL=source-map.min.js.map
zhihu-crawler
/zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/source-map/dist/source-map.min.js
source-map.min.js
* Copyright 2011 Mozilla Foundation and contributors * Licensed under the New BSD license. See LICENSE or: * http://opensource.org/licenses/BSD-3-Clause */ exports.GREATEST_LOWER_BOUND = 1; exports.LEAST_UPPER_BOUND = 2; /** * Recursive implementation of binary search. * * @param aLow Indices here and lower do not contain the needle. * @param aHigh Indices here and higher do not contain the needle. * @param aNeedle The element being searched for. * @param aHaystack The non-empty array being searched. * @param aCompare Function which takes two elements and returns -1, 0, or 1. * @param aBias Either 'binarySearch.GREATEST_LOWER_BOUND' or * 'binarySearch.LEAST_UPPER_BOUND'. Specifies whether to return the * closest element that is smaller than or greater than the one we are * searching for, respectively, if the exact element cannot be found. */ function recursiveSearch(aLow, aHigh, aNeedle, aHaystack, aCompare, aBias) { // This function terminates when one of the following is true: // // 1. We find the exact element we are looking for. // // 2. We did not find the exact element, but we can return the index of // the next-closest element. // // 3. We did not find the exact element, and there is no next-closest // element than the one we are searching for, so we return -1. var mid = Math.floor((aHigh - aLow) / 2) + aLow; var cmp = aCompare(aNeedle, aHaystack[mid], true); if (cmp === 0) { // Found the element we are looking for. return mid; } else if (cmp > 0) { // Our needle is greater than aHaystack[mid]. if (aHigh - mid > 1) { // The element is in the upper half. return recursiveSearch(mid, aHigh, aNeedle, aHaystack, aCompare, aBias); } // The exact needle element was not found in this haystack. Determine if // we are in termination case (3) or (2) and return the appropriate thing. if (aBias == exports.LEAST_UPPER_BOUND) { return aHigh < aHaystack.length ? aHigh : -1; } else { return mid; } } else { // Our needle is less than aHaystack[mid]. if (mid - aLow > 1) { // The element is in the lower half. return recursiveSearch(aLow, mid, aNeedle, aHaystack, aCompare, aBias); } // we are in termination case (3) or (2) and return the appropriate thing. if (aBias == exports.LEAST_UPPER_BOUND) { return mid; } else { return aLow < 0 ? -1 : aLow; } } } /** * This is an implementation of binary search which will always try and return * the index of the closest element if there is no exact hit. This is because * mappings between original and generated line/col pairs are single points, * and there is an implicit region between each of them, so a miss just means * that you aren't on the very start of a region. * * @param aNeedle The element you are looking for. * @param aHaystack The array that is being searched. * @param aCompare A function which takes the needle and an element in the * array and returns -1, 0, or 1 depending on whether the needle is less * than, equal to, or greater than the element, respectively. * @param aBias Either 'binarySearch.GREATEST_LOWER_BOUND' or * 'binarySearch.LEAST_UPPER_BOUND'. Specifies whether to return the * closest element that is smaller than or greater than the one we are * searching for, respectively, if the exact element cannot be found. * Defaults to 'binarySearch.GREATEST_LOWER_BOUND'. */ exports.search = function search(aNeedle, aHaystack, aCompare, aBias) { if (aHaystack.length === 0) { return -1; } var index = recursiveSearch(-1, aHaystack.length, aNeedle, aHaystack, aCompare, aBias || exports.GREATEST_LOWER_BOUND); if (index < 0) { return -1; } // We have found either the exact element, or the next-closest element than // the one we are searching for. However, there may be more than one such // element. Make sure we always return the smallest of these. while (index - 1 >= 0) { if (aCompare(aHaystack[index], aHaystack[index - 1], true) !== 0) { break; } --index; } return index; };
zhihu-crawler
/zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/source-map/lib/binary-search.js
binary-search.js
* Copyright 2011 Mozilla Foundation and contributors * Licensed under the New BSD license. See LICENSE or: * http://opensource.org/licenses/BSD-3-Clause * * Based on the Base 64 VLQ implementation in Closure Compiler: * https://code.google.com/p/closure-compiler/source/browse/trunk/src/com/google/debugging/sourcemap/Base64VLQ.java * * Copyright 2011 The Closure Compiler Authors. All rights reserved. * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ var base64 = require('source-map/lib/base64'); // A single base 64 digit can contain 6 bits of data. For the base 64 variable // length quantities we use in the source map spec, the first bit is the sign, // the next four bits are the actual value, and the 6th bit is the // continuation bit. The continuation bit tells us whether there are more // digits in this value following this digit. // // Continuation // | Sign // | | // V V // 101011 var VLQ_BASE_SHIFT = 5; // binary: 100000 var VLQ_BASE = 1 << VLQ_BASE_SHIFT; // binary: 011111 var VLQ_BASE_MASK = VLQ_BASE - 1; // binary: 100000 var VLQ_CONTINUATION_BIT = VLQ_BASE; /** * Converts from a two-complement value to a value where the sign bit is * placed in the least significant bit. For example, as decimals: * 1 becomes 2 (10 binary), -1 becomes 3 (11 binary) * 2 becomes 4 (100 binary), -2 becomes 5 (101 binary) */ function toVLQSigned(aValue) { return aValue < 0 ? ((-aValue) << 1) + 1 : (aValue << 1) + 0; } /** * Converts to a two-complement value from a value where the sign bit is * placed in the least significant bit. For example, as decimals: * 2 (10 binary) becomes 1, 3 (11 binary) becomes -1 * 4 (100 binary) becomes 2, 5 (101 binary) becomes -2 */ function fromVLQSigned(aValue) { var isNegative = (aValue & 1) === 1; var shifted = aValue >> 1; return isNegative ? -shifted : shifted; } /** * Returns the base 64 VLQ encoded value. */ exports.encode = function base64VLQ_encode(aValue) { var encoded = ""; var digit; var vlq = toVLQSigned(aValue); do { digit = vlq & VLQ_BASE_MASK; vlq >>>= VLQ_BASE_SHIFT; if (vlq > 0) { // There are still more digits in this value, so we must make sure the // continuation bit is marked. digit |= VLQ_CONTINUATION_BIT; } encoded += base64.encode(digit); } while (vlq > 0); return encoded; }; /** * Decodes the next base 64 VLQ value from the given string and returns the * value and the rest of the string via the out parameter. */ exports.decode = function base64VLQ_decode(aStr, aIndex, aOutParam) { var strLen = aStr.length; var result = 0; var shift = 0; var continuation, digit; do { if (aIndex >= strLen) { throw new Error("Expected more digits in base 64 VLQ value."); } digit = base64.decode(aStr.charCodeAt(aIndex++)); if (digit === -1) { throw new Error("Invalid base64 digit: " + aStr.charAt(aIndex - 1)); } continuation = !!(digit & VLQ_CONTINUATION_BIT); digit &= VLQ_BASE_MASK; result = result + (digit << shift); shift += VLQ_BASE_SHIFT; } while (continuation); aOutParam.value = fromVLQSigned(result); aOutParam.rest = aIndex; };
zhihu-crawler
/zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/source-map/lib/base64-vlq.js
base64-vlq.js
* Copyright 2011 Mozilla Foundation and contributors * Licensed under the New BSD license. See LICENSE or: * http://opensource.org/licenses/BSD-3-Clause */ var util = require('source-map/lib/util'); var binarySearch = require('source-map/lib/binary-search'); var ArraySet = require('source-map/lib/array-set').ArraySet; var base64VLQ = require('source-map/lib/base64-vlq'); var quickSort = require('source-map/lib/quick-sort').quickSort; function SourceMapConsumer(aSourceMap, aSourceMapURL) { var sourceMap = aSourceMap; if (typeof aSourceMap === 'string') { sourceMap = util.parseSourceMapInput(aSourceMap); } return sourceMap.sections != null ? new IndexedSourceMapConsumer(sourceMap, aSourceMapURL) : new BasicSourceMapConsumer(sourceMap, aSourceMapURL); } SourceMapConsumer.fromSourceMap = function(aSourceMap, aSourceMapURL) { return BasicSourceMapConsumer.fromSourceMap(aSourceMap, aSourceMapURL); } /** * The version of the source mapping spec that we are consuming. */ SourceMapConsumer.prototype._version = 3; // `__generatedMappings` and `__originalMappings` are arrays that hold the // parsed mapping coordinates from the source map's "mappings" attribute. They // are lazily instantiated, accessed via the `_generatedMappings` and // `_originalMappings` getters respectively, and we only parse the mappings // and create these arrays once queried for a source location. We jump through // these hoops because there can be many thousands of mappings, and parsing // them is expensive, so we only want to do it if we must. // // Each object in the arrays is of the form: // // { // generatedLine: The line number in the generated code, // generatedColumn: The column number in the generated code, // source: The path to the original source file that generated this // chunk of code, // originalLine: The line number in the original source that // corresponds to this chunk of generated code, // originalColumn: The column number in the original source that // corresponds to this chunk of generated code, // name: The name of the original symbol which generated this chunk of // code. // } // // All properties except for `generatedLine` and `generatedColumn` can be // `null`. // // `_generatedMappings` is ordered by the generated positions. // // `_originalMappings` is ordered by the original positions. SourceMapConsumer.prototype.__generatedMappings = null; Object.defineProperty(SourceMapConsumer.prototype, '_generatedMappings', { configurable: true, enumerable: true, get: function () { if (!this.__generatedMappings) { this._parseMappings(this._mappings, this.sourceRoot); } return this.__generatedMappings; } }); SourceMapConsumer.prototype.__originalMappings = null; Object.defineProperty(SourceMapConsumer.prototype, '_originalMappings', { configurable: true, enumerable: true, get: function () { if (!this.__originalMappings) { this._parseMappings(this._mappings, this.sourceRoot); } return this.__originalMappings; } }); SourceMapConsumer.prototype._charIsMappingSeparator = function SourceMapConsumer_charIsMappingSeparator(aStr, index) { var c = aStr.charAt(index); return c === ";" || c === ","; }; /** * Parse the mappings in a string in to a data structure which we can easily * query (the ordered arrays in the `this.__generatedMappings` and * `this.__originalMappings` properties). */ SourceMapConsumer.prototype._parseMappings = function SourceMapConsumer_parseMappings(aStr, aSourceRoot) { throw new Error("Subclasses must implement _parseMappings"); }; SourceMapConsumer.GENERATED_ORDER = 1; SourceMapConsumer.ORIGINAL_ORDER = 2; SourceMapConsumer.GREATEST_LOWER_BOUND = 1; SourceMapConsumer.LEAST_UPPER_BOUND = 2; /** * Iterate over each mapping between an original source/line/column and a * generated line/column in this source map. * * @param Function aCallback * The function that is called with each mapping. * @param Object aContext * Optional. If specified, this object will be the value of `this` every * time that `aCallback` is called. * @param aOrder * Either `SourceMapConsumer.GENERATED_ORDER` or * `SourceMapConsumer.ORIGINAL_ORDER`. Specifies whether you want to * iterate over the mappings sorted by the generated file's line/column * order or the original's source/line/column order, respectively. Defaults to * `SourceMapConsumer.GENERATED_ORDER`. */ SourceMapConsumer.prototype.eachMapping = function SourceMapConsumer_eachMapping(aCallback, aContext, aOrder) { var context = aContext || null; var order = aOrder || SourceMapConsumer.GENERATED_ORDER; var mappings; switch (order) { case SourceMapConsumer.GENERATED_ORDER: mappings = this._generatedMappings; break; case SourceMapConsumer.ORIGINAL_ORDER: mappings = this._originalMappings; break; default: throw new Error("Unknown order of iteration."); } var sourceRoot = this.sourceRoot; mappings.map(function (mapping) { var source = mapping.source === null ? null : this._sources.at(mapping.source); source = util.computeSourceURL(sourceRoot, source, this._sourceMapURL); return { source: source, generatedLine: mapping.generatedLine, generatedColumn: mapping.generatedColumn, originalLine: mapping.originalLine, originalColumn: mapping.originalColumn, name: mapping.name === null ? null : this._names.at(mapping.name) }; }, this).forEach(aCallback, context); }; /** * Returns all generated line and column information for the original source, * line, and column provided. If no column is provided, returns all mappings * corresponding to a either the line we are searching for or the next * closest line that has any mappings. Otherwise, returns all mappings * corresponding to the given line and either the column we are searching for * or the next closest column that has any offsets. * * The only argument is an object with the following properties: * * - source: The filename of the original source. * - line: The line number in the original source. The line number is 1-based. * - column: Optional. the column number in the original source. * The column number is 0-based. * * and an array of objects is returned, each with the following properties: * * - line: The line number in the generated source, or null. The * line number is 1-based. * - column: The column number in the generated source, or null. * The column number is 0-based. */ SourceMapConsumer.prototype.allGeneratedPositionsFor = function SourceMapConsumer_allGeneratedPositionsFor(aArgs) { var line = util.getArg(aArgs, 'line'); // When there is no exact match, BasicSourceMapConsumer.prototype._findMapping // returns the index of the closest mapping less than the needle. By // setting needle.originalColumn to 0, we thus find the last mapping for // the given line, provided such a mapping exists. var needle = { source: util.getArg(aArgs, 'source'), originalLine: line, originalColumn: util.getArg(aArgs, 'column', 0) }; needle.source = this._findSourceIndex(needle.source); if (needle.source < 0) { return []; } var mappings = []; var index = this._findMapping(needle, this._originalMappings, "originalLine", "originalColumn", util.compareByOriginalPositions, binarySearch.LEAST_UPPER_BOUND); if (index >= 0) { var mapping = this._originalMappings[index]; if (aArgs.column === undefined) { var originalLine = mapping.originalLine; // Iterate until either we run out of mappings, or we run into // a mapping for a different line than the one we found. Since // mappings are sorted, this is guaranteed to find all mappings for // the line we found. while (mapping && mapping.originalLine === originalLine) { mappings.push({ line: util.getArg(mapping, 'generatedLine', null), column: util.getArg(mapping, 'generatedColumn', null), lastColumn: util.getArg(mapping, 'lastGeneratedColumn', null) }); mapping = this._originalMappings[++index]; } } else { var originalColumn = mapping.originalColumn; // Iterate until either we run out of mappings, or we run into // a mapping for a different line than the one we were searching for. // Since mappings are sorted, this is guaranteed to find all mappings for // the line we are searching for. while (mapping && mapping.originalLine === line && mapping.originalColumn == originalColumn) { mappings.push({ line: util.getArg(mapping, 'generatedLine', null), column: util.getArg(mapping, 'generatedColumn', null), lastColumn: util.getArg(mapping, 'lastGeneratedColumn', null) }); mapping = this._originalMappings[++index]; } } } return mappings; }; exports.SourceMapConsumer = SourceMapConsumer; /** * A BasicSourceMapConsumer instance represents a parsed source map which we can * query for information about the original file positions by giving it a file * position in the generated source. * * The first parameter is the raw source map (either as a JSON string, or * already parsed to an object). According to the spec, source maps have the * following attributes: * * - version: Which version of the source map spec this map is following. * - sources: An array of URLs to the original source files. * - names: An array of identifiers which can be referrenced by individual mappings. * - sourceRoot: Optional. The URL root from which all sources are relative. * - sourcesContent: Optional. An array of contents of the original source files. * - mappings: A string of base64 VLQs which contain the actual mappings. * - file: Optional. The generated file this source map is associated with. * * Here is an example source map, taken from the source map spec[0]: * * { * version : 3, * file: "out.js", * sourceRoot : "", * sources: ["foo.js", "bar.js"], * names: ["src", "maps", "are", "fun"], * mappings: "AA,AB;;ABCDE;" * } * * The second parameter, if given, is a string whose value is the URL * at which the source map was found. This URL is used to compute the * sources array. * * [0]: https://docs.google.com/document/d/1U1RGAehQwRypUTovF1KRlpiOFze0b-_2gc6fAH0KY0k/edit?pli=1# */ function BasicSourceMapConsumer(aSourceMap, aSourceMapURL) { var sourceMap = aSourceMap; if (typeof aSourceMap === 'string') { sourceMap = util.parseSourceMapInput(aSourceMap); } var version = util.getArg(sourceMap, 'version'); var sources = util.getArg(sourceMap, 'sources'); // Sass 3.3 leaves out the 'names' array, so we deviate from the spec (which // requires the array) to play nice here. var names = util.getArg(sourceMap, 'names', []); var sourceRoot = util.getArg(sourceMap, 'sourceRoot', null); var sourcesContent = util.getArg(sourceMap, 'sourcesContent', null); var mappings = util.getArg(sourceMap, 'mappings'); var file = util.getArg(sourceMap, 'file', null); // Once again, Sass deviates from the spec and supplies the version as a // string rather than a number, so we use loose equality checking here. if (version != this._version) { throw new Error('Unsupported version: ' + version); } if (sourceRoot) { sourceRoot = util.normalize(sourceRoot); } sources = sources .map(String) // Some source maps produce relative source paths like "./foo.js" instead of // "foo.js". Normalize these first so that future comparisons will succeed. // See bugzil.la/1090768. .map(util.normalize) // Always ensure that absolute sources are internally stored relative to // the source root, if the source root is absolute. Not doing this would // be particularly problematic when the source root is a prefix of the // source (valid, but why??). See github issue #199 and bugzil.la/1188982. .map(function (source) { return sourceRoot && util.isAbsolute(sourceRoot) && util.isAbsolute(source) ? util.relative(sourceRoot, source) : source; }); // Pass `true` below to allow duplicate names and sources. While source maps // are intended to be compressed and deduplicated, the TypeScript compiler // sometimes generates source maps with duplicates in them. See Github issue // #72 and bugzil.la/889492. this._names = ArraySet.fromArray(names.map(String), true); this._sources = ArraySet.fromArray(sources, true); this._absoluteSources = this._sources.toArray().map(function (s) { return util.computeSourceURL(sourceRoot, s, aSourceMapURL); }); this.sourceRoot = sourceRoot; this.sourcesContent = sourcesContent; this._mappings = mappings; this._sourceMapURL = aSourceMapURL; this.file = file; } BasicSourceMapConsumer.prototype = Object.create(SourceMapConsumer.prototype); BasicSourceMapConsumer.prototype.consumer = SourceMapConsumer; /** * Utility function to find the index of a source. Returns -1 if not * found. */ BasicSourceMapConsumer.prototype._findSourceIndex = function(aSource) { var relativeSource = aSource; if (this.sourceRoot != null) { relativeSource = util.relative(this.sourceRoot, relativeSource); } if (this._sources.has(relativeSource)) { return this._sources.indexOf(relativeSource); } // Maybe aSource is an absolute URL as returned by |sources|. In // this case we can't simply undo the transform. var i; for (i = 0; i < this._absoluteSources.length; ++i) { if (this._absoluteSources[i] == aSource) { return i; } } return -1; }; /** * Create a BasicSourceMapConsumer from a SourceMapGenerator. * * @param SourceMapGenerator aSourceMap * The source map that will be consumed. * @param String aSourceMapURL * The URL at which the source map can be found (optional) * @returns BasicSourceMapConsumer */ BasicSourceMapConsumer.fromSourceMap = function SourceMapConsumer_fromSourceMap(aSourceMap, aSourceMapURL) { var smc = Object.create(BasicSourceMapConsumer.prototype); var names = smc._names = ArraySet.fromArray(aSourceMap._names.toArray(), true); var sources = smc._sources = ArraySet.fromArray(aSourceMap._sources.toArray(), true); smc.sourceRoot = aSourceMap._sourceRoot; smc.sourcesContent = aSourceMap._generateSourcesContent(smc._sources.toArray(), smc.sourceRoot); smc.file = aSourceMap._file; smc._sourceMapURL = aSourceMapURL; smc._absoluteSources = smc._sources.toArray().map(function (s) { return util.computeSourceURL(smc.sourceRoot, s, aSourceMapURL); }); // Because we are modifying the entries (by converting string sources and // names to indices into the sources and names ArraySets), we have to make // a copy of the entry or else bad things happen. Shared mutable state // strikes again! See github issue #191. var generatedMappings = aSourceMap._mappings.toArray().slice(); var destGeneratedMappings = smc.__generatedMappings = []; var destOriginalMappings = smc.__originalMappings = []; for (var i = 0, length = generatedMappings.length; i < length; i++) { var srcMapping = generatedMappings[i]; var destMapping = new Mapping; destMapping.generatedLine = srcMapping.generatedLine; destMapping.generatedColumn = srcMapping.generatedColumn; if (srcMapping.source) { destMapping.source = sources.indexOf(srcMapping.source); destMapping.originalLine = srcMapping.originalLine; destMapping.originalColumn = srcMapping.originalColumn; if (srcMapping.name) { destMapping.name = names.indexOf(srcMapping.name); } destOriginalMappings.push(destMapping); } destGeneratedMappings.push(destMapping); } quickSort(smc.__originalMappings, util.compareByOriginalPositions); return smc; }; /** * The version of the source mapping spec that we are consuming. */ BasicSourceMapConsumer.prototype._version = 3; /** * The list of original sources. */ Object.defineProperty(BasicSourceMapConsumer.prototype, 'sources', { get: function () { return this._absoluteSources.slice(); } }); /** * Provide the JIT with a nice shape / hidden class. */ function Mapping() { this.generatedLine = 0; this.generatedColumn = 0; this.source = null; this.originalLine = null; this.originalColumn = null; this.name = null; } /** * Parse the mappings in a string in to a data structure which we can easily * query (the ordered arrays in the `this.__generatedMappings` and * `this.__originalMappings` properties). */ BasicSourceMapConsumer.prototype._parseMappings = function SourceMapConsumer_parseMappings(aStr, aSourceRoot) { var generatedLine = 1; var previousGeneratedColumn = 0; var previousOriginalLine = 0; var previousOriginalColumn = 0; var previousSource = 0; var previousName = 0; var length = aStr.length; var index = 0; var cachedSegments = {}; var temp = {}; var originalMappings = []; var generatedMappings = []; var mapping, str, segment, end, value; while (index < length) { if (aStr.charAt(index) === ';') { generatedLine++; index++; previousGeneratedColumn = 0; } else if (aStr.charAt(index) === ',') { index++; } else { mapping = new Mapping(); mapping.generatedLine = generatedLine; // Because each offset is encoded relative to the previous one, // many segments often have the same encoding. We can exploit this // fact by caching the parsed variable length fields of each segment, // allowing us to avoid a second parse if we encounter the same // segment again. for (end = index; end < length; end++) { if (this._charIsMappingSeparator(aStr, end)) { break; } } str = aStr.slice(index, end); segment = cachedSegments[str]; if (segment) { index += str.length; } else { segment = []; while (index < end) { base64VLQ.decode(aStr, index, temp); value = temp.value; index = temp.rest; segment.push(value); } if (segment.length === 2) { throw new Error('Found a source, but no line and column'); } if (segment.length === 3) { throw new Error('Found a source and line, but no column'); } cachedSegments[str] = segment; } // Generated column. mapping.generatedColumn = previousGeneratedColumn + segment[0]; previousGeneratedColumn = mapping.generatedColumn; if (segment.length > 1) { // Original source. mapping.source = previousSource + segment[1]; previousSource += segment[1]; // Original line. mapping.originalLine = previousOriginalLine + segment[2]; previousOriginalLine = mapping.originalLine; // Lines are stored 0-based mapping.originalLine += 1; // Original column. mapping.originalColumn = previousOriginalColumn + segment[3]; previousOriginalColumn = mapping.originalColumn; if (segment.length > 4) { // Original name. mapping.name = previousName + segment[4]; previousName += segment[4]; } } generatedMappings.push(mapping); if (typeof mapping.originalLine === 'number') { originalMappings.push(mapping); } } } quickSort(generatedMappings, util.compareByGeneratedPositionsDeflated); this.__generatedMappings = generatedMappings; quickSort(originalMappings, util.compareByOriginalPositions); this.__originalMappings = originalMappings; }; /** * Find the mapping that best matches the hypothetical "needle" mapping that * we are searching for in the given "haystack" of mappings. */ BasicSourceMapConsumer.prototype._findMapping = function SourceMapConsumer_findMapping(aNeedle, aMappings, aLineName, aColumnName, aComparator, aBias) { // To return the position we are searching for, we must first find the // mapping for the given position and then return the opposite position it // points to. Because the mappings are sorted, we can use binary search to // find the best mapping. if (aNeedle[aLineName] <= 0) { throw new TypeError('Line must be greater than or equal to 1, got ' + aNeedle[aLineName]); } if (aNeedle[aColumnName] < 0) { throw new TypeError('Column must be greater than or equal to 0, got ' + aNeedle[aColumnName]); } return binarySearch.search(aNeedle, aMappings, aComparator, aBias); }; /** * Compute the last column for each generated mapping. The last column is * inclusive. */ BasicSourceMapConsumer.prototype.computeColumnSpans = function SourceMapConsumer_computeColumnSpans() { for (var index = 0; index < this._generatedMappings.length; ++index) { var mapping = this._generatedMappings[index]; // Mappings do not contain a field for the last generated columnt. We // can come up with an optimistic estimate, however, by assuming that // mappings are contiguous (i.e. given two consecutive mappings, the // first mapping ends where the second one starts). if (index + 1 < this._generatedMappings.length) { var nextMapping = this._generatedMappings[index + 1]; if (mapping.generatedLine === nextMapping.generatedLine) { mapping.lastGeneratedColumn = nextMapping.generatedColumn - 1; continue; } } // The last mapping for each line spans the entire line. mapping.lastGeneratedColumn = Infinity; } }; /** * Returns the original source, line, and column information for the generated * source's line and column positions provided. The only argument is an object * with the following properties: * * - line: The line number in the generated source. The line number * is 1-based. * - column: The column number in the generated source. The column * number is 0-based. * - bias: Either 'SourceMapConsumer.GREATEST_LOWER_BOUND' or * 'SourceMapConsumer.LEAST_UPPER_BOUND'. Specifies whether to return the * closest element that is smaller than or greater than the one we are * searching for, respectively, if the exact element cannot be found. * Defaults to 'SourceMapConsumer.GREATEST_LOWER_BOUND'. * * and an object is returned with the following properties: * * - source: The original source file, or null. * - line: The line number in the original source, or null. The * line number is 1-based. * - column: The column number in the original source, or null. The * column number is 0-based. * - name: The original identifier, or null. */ BasicSourceMapConsumer.prototype.originalPositionFor = function SourceMapConsumer_originalPositionFor(aArgs) { var needle = { generatedLine: util.getArg(aArgs, 'line'), generatedColumn: util.getArg(aArgs, 'column') }; var index = this._findMapping( needle, this._generatedMappings, "generatedLine", "generatedColumn", util.compareByGeneratedPositionsDeflated, util.getArg(aArgs, 'bias', SourceMapConsumer.GREATEST_LOWER_BOUND) ); if (index >= 0) { var mapping = this._generatedMappings[index]; if (mapping.generatedLine === needle.generatedLine) { var source = util.getArg(mapping, 'source', null); if (source !== null) { source = this._sources.at(source); source = util.computeSourceURL(this.sourceRoot, source, this._sourceMapURL); } var name = util.getArg(mapping, 'name', null); if (name !== null) { name = this._names.at(name); } return { source: source, line: util.getArg(mapping, 'originalLine', null), column: util.getArg(mapping, 'originalColumn', null), name: name }; } } return { source: null, line: null, column: null, name: null }; }; /** * Return true if we have the source content for every source in the source * map, false otherwise. */ BasicSourceMapConsumer.prototype.hasContentsOfAllSources = function BasicSourceMapConsumer_hasContentsOfAllSources() { if (!this.sourcesContent) { return false; } return this.sourcesContent.length >= this._sources.size() && !this.sourcesContent.some(function (sc) { return sc == null; }); }; /** * Returns the original source content. The only argument is the url of the * original source file. Returns null if no original source content is * available. */ BasicSourceMapConsumer.prototype.sourceContentFor = function SourceMapConsumer_sourceContentFor(aSource, nullOnMissing) { if (!this.sourcesContent) { return null; } var index = this._findSourceIndex(aSource); if (index >= 0) { return this.sourcesContent[index]; } var relativeSource = aSource; if (this.sourceRoot != null) { relativeSource = util.relative(this.sourceRoot, relativeSource); } var url; if (this.sourceRoot != null && (url = util.urlParse(this.sourceRoot))) { // XXX: file:// URIs and absolute paths lead to unexpected behavior for // many users. We can help them out when they expect file:// URIs to // behave like it would if they were running a local HTTP server. See // https://bugzilla.mozilla.org/show_bug.cgi?id=885597. var fileUriAbsPath = relativeSource.replace(/^file:\/\//, ""); if (url.scheme == "file" && this._sources.has(fileUriAbsPath)) { return this.sourcesContent[this._sources.indexOf(fileUriAbsPath)] } if ((!url.path || url.path == "/") && this._sources.has("/" + relativeSource)) { return this.sourcesContent[this._sources.indexOf("/" + relativeSource)]; } } // This function is used recursively from // IndexedSourceMapConsumer.prototype.sourceContentFor. In that case, we // don't want to throw if we can't find the source - we just want to // return null, so we provide a flag to exit gracefully. if (nullOnMissing) { return null; } else { throw new Error('"' + relativeSource + '" is not in the SourceMap.'); } }; /** * Returns the generated line and column information for the original source, * line, and column positions provided. The only argument is an object with * the following properties: * * - source: The filename of the original source. * - line: The line number in the original source. The line number * is 1-based. * - column: The column number in the original source. The column * number is 0-based. * - bias: Either 'SourceMapConsumer.GREATEST_LOWER_BOUND' or * 'SourceMapConsumer.LEAST_UPPER_BOUND'. Specifies whether to return the * closest element that is smaller than or greater than the one we are * searching for, respectively, if the exact element cannot be found. * Defaults to 'SourceMapConsumer.GREATEST_LOWER_BOUND'. * * and an object is returned with the following properties: * * - line: The line number in the generated source, or null. The * line number is 1-based. * - column: The column number in the generated source, or null. * The column number is 0-based. */ BasicSourceMapConsumer.prototype.generatedPositionFor = function SourceMapConsumer_generatedPositionFor(aArgs) { var source = util.getArg(aArgs, 'source'); source = this._findSourceIndex(source); if (source < 0) { return { line: null, column: null, lastColumn: null }; } var needle = { source: source, originalLine: util.getArg(aArgs, 'line'), originalColumn: util.getArg(aArgs, 'column') }; var index = this._findMapping( needle, this._originalMappings, "originalLine", "originalColumn", util.compareByOriginalPositions, util.getArg(aArgs, 'bias', SourceMapConsumer.GREATEST_LOWER_BOUND) ); if (index >= 0) { var mapping = this._originalMappings[index]; if (mapping.source === needle.source) { return { line: util.getArg(mapping, 'generatedLine', null), column: util.getArg(mapping, 'generatedColumn', null), lastColumn: util.getArg(mapping, 'lastGeneratedColumn', null) }; } } return { line: null, column: null, lastColumn: null }; }; exports.BasicSourceMapConsumer = BasicSourceMapConsumer; /** * An IndexedSourceMapConsumer instance represents a parsed source map which * we can query for information. It differs from BasicSourceMapConsumer in * that it takes "indexed" source maps (i.e. ones with a "sections" field) as * input. * * The first parameter is a raw source map (either as a JSON string, or already * parsed to an object). According to the spec for indexed source maps, they * have the following attributes: * * - version: Which version of the source map spec this map is following. * - file: Optional. The generated file this source map is associated with. * - sections: A list of section definitions. * * Each value under the "sections" field has two fields: * - offset: The offset into the original specified at which this section * begins to apply, defined as an object with a "line" and "column" * field. * - map: A source map definition. This source map could also be indexed, * but doesn't have to be. * * Instead of the "map" field, it's also possible to have a "url" field * specifying a URL to retrieve a source map from, but that's currently * unsupported. * * Here's an example source map, taken from the source map spec[0], but * modified to omit a section which uses the "url" field. * * { * version : 3, * file: "app.js", * sections: [{ * offset: {line:100, column:10}, * map: { * version : 3, * file: "section.js", * sources: ["foo.js", "bar.js"], * names: ["src", "maps", "are", "fun"], * mappings: "AAAA,E;;ABCDE;" * } * }], * } * * The second parameter, if given, is a string whose value is the URL * at which the source map was found. This URL is used to compute the * sources array. * * [0]: https://docs.google.com/document/d/1U1RGAehQwRypUTovF1KRlpiOFze0b-_2gc6fAH0KY0k/edit#heading=h.535es3xeprgt */ function IndexedSourceMapConsumer(aSourceMap, aSourceMapURL) { var sourceMap = aSourceMap; if (typeof aSourceMap === 'string') { sourceMap = util.parseSourceMapInput(aSourceMap); } var version = util.getArg(sourceMap, 'version'); var sections = util.getArg(sourceMap, 'sections'); if (version != this._version) { throw new Error('Unsupported version: ' + version); } this._sources = new ArraySet(); this._names = new ArraySet(); var lastOffset = { line: -1, column: 0 }; this._sections = sections.map(function (s) { if (s.url) { // The url field will require support for asynchronicity. // See https://github.com/mozilla/source-map/issues/16 throw new Error('Support for url field in sections not implemented.'); } var offset = util.getArg(s, 'offset'); var offsetLine = util.getArg(offset, 'line'); var offsetColumn = util.getArg(offset, 'column'); if (offsetLine < lastOffset.line || (offsetLine === lastOffset.line && offsetColumn < lastOffset.column)) { throw new Error('Section offsets must be ordered and non-overlapping.'); } lastOffset = offset; return { generatedOffset: { // The offset fields are 0-based, but we use 1-based indices when // encoding/decoding from VLQ. generatedLine: offsetLine + 1, generatedColumn: offsetColumn + 1 }, consumer: new SourceMapConsumer(util.getArg(s, 'map'), aSourceMapURL) } }); } IndexedSourceMapConsumer.prototype = Object.create(SourceMapConsumer.prototype); IndexedSourceMapConsumer.prototype.constructor = SourceMapConsumer; /** * The version of the source mapping spec that we are consuming. */ IndexedSourceMapConsumer.prototype._version = 3; /** * The list of original sources. */ Object.defineProperty(IndexedSourceMapConsumer.prototype, 'sources', { get: function () { var sources = []; for (var i = 0; i < this._sections.length; i++) { for (var j = 0; j < this._sections[i].consumer.sources.length; j++) { sources.push(this._sections[i].consumer.sources[j]); } } return sources; } }); /** * Returns the original source, line, and column information for the generated * source's line and column positions provided. The only argument is an object * with the following properties: * * - line: The line number in the generated source. The line number * is 1-based. * - column: The column number in the generated source. The column * number is 0-based. * * and an object is returned with the following properties: * * - source: The original source file, or null. * - line: The line number in the original source, or null. The * line number is 1-based. * - column: The column number in the original source, or null. The * column number is 0-based. * - name: The original identifier, or null. */ IndexedSourceMapConsumer.prototype.originalPositionFor = function IndexedSourceMapConsumer_originalPositionFor(aArgs) { var needle = { generatedLine: util.getArg(aArgs, 'line'), generatedColumn: util.getArg(aArgs, 'column') }; // Find the section containing the generated position we're trying to map // to an original position. var sectionIndex = binarySearch.search(needle, this._sections, function(needle, section) { var cmp = needle.generatedLine - section.generatedOffset.generatedLine; if (cmp) { return cmp; } return (needle.generatedColumn - section.generatedOffset.generatedColumn); }); var section = this._sections[sectionIndex]; if (!section) { return { source: null, line: null, column: null, name: null }; } return section.consumer.originalPositionFor({ line: needle.generatedLine - (section.generatedOffset.generatedLine - 1), column: needle.generatedColumn - (section.generatedOffset.generatedLine === needle.generatedLine ? section.generatedOffset.generatedColumn - 1 : 0), bias: aArgs.bias }); }; /** * Return true if we have the source content for every source in the source * map, false otherwise. */ IndexedSourceMapConsumer.prototype.hasContentsOfAllSources = function IndexedSourceMapConsumer_hasContentsOfAllSources() { return this._sections.every(function (s) { return s.consumer.hasContentsOfAllSources(); }); }; /** * Returns the original source content. The only argument is the url of the * original source file. Returns null if no original source content is * available. */ IndexedSourceMapConsumer.prototype.sourceContentFor = function IndexedSourceMapConsumer_sourceContentFor(aSource, nullOnMissing) { for (var i = 0; i < this._sections.length; i++) { var section = this._sections[i]; var content = section.consumer.sourceContentFor(aSource, true); if (content) { return content; } } if (nullOnMissing) { return null; } else { throw new Error('"' + aSource + '" is not in the SourceMap.'); } }; /** * Returns the generated line and column information for the original source, * line, and column positions provided. The only argument is an object with * the following properties: * * - source: The filename of the original source. * - line: The line number in the original source. The line number * is 1-based. * - column: The column number in the original source. The column * number is 0-based. * * and an object is returned with the following properties: * * - line: The line number in the generated source, or null. The * line number is 1-based. * - column: The column number in the generated source, or null. * The column number is 0-based. */ IndexedSourceMapConsumer.prototype.generatedPositionFor = function IndexedSourceMapConsumer_generatedPositionFor(aArgs) { for (var i = 0; i < this._sections.length; i++) { var section = this._sections[i]; // Only consider this section if the requested source is in the list of // sources of the consumer. if (section.consumer._findSourceIndex(util.getArg(aArgs, 'source')) === -1) { continue; } var generatedPosition = section.consumer.generatedPositionFor(aArgs); if (generatedPosition) { var ret = { line: generatedPosition.line + (section.generatedOffset.generatedLine - 1), column: generatedPosition.column + (section.generatedOffset.generatedLine === generatedPosition.line ? section.generatedOffset.generatedColumn - 1 : 0) }; return ret; } } return { line: null, column: null }; }; /** * Parse the mappings in a string in to a data structure which we can easily * query (the ordered arrays in the `this.__generatedMappings` and * `this.__originalMappings` properties). */ IndexedSourceMapConsumer.prototype._parseMappings = function IndexedSourceMapConsumer_parseMappings(aStr, aSourceRoot) { this.__generatedMappings = []; this.__originalMappings = []; for (var i = 0; i < this._sections.length; i++) { var section = this._sections[i]; var sectionMappings = section.consumer._generatedMappings; for (var j = 0; j < sectionMappings.length; j++) { var mapping = sectionMappings[j]; var source = section.consumer._sources.at(mapping.source); source = util.computeSourceURL(section.consumer.sourceRoot, source, this._sourceMapURL); this._sources.add(source); source = this._sources.indexOf(source); var name = null; if (mapping.name) { name = section.consumer._names.at(mapping.name); this._names.add(name); name = this._names.indexOf(name); } // The mappings coming from the consumer for the section have // generated positions relative to the start of the section, so we // need to offset them to be relative to the start of the concatenated // generated file. var adjustedMapping = { source: source, generatedLine: mapping.generatedLine + (section.generatedOffset.generatedLine - 1), generatedColumn: mapping.generatedColumn + (section.generatedOffset.generatedLine === mapping.generatedLine ? section.generatedOffset.generatedColumn - 1 : 0), originalLine: mapping.originalLine, originalColumn: mapping.originalColumn, name: name }; this.__generatedMappings.push(adjustedMapping); if (typeof adjustedMapping.originalLine === 'number') { this.__originalMappings.push(adjustedMapping); } } } quickSort(this.__generatedMappings, util.compareByGeneratedPositionsDeflated); quickSort(this.__originalMappings, util.compareByOriginalPositions); }; exports.IndexedSourceMapConsumer = IndexedSourceMapConsumer;
zhihu-crawler
/zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/source-map/lib/source-map-consumer.js
source-map-consumer.js
* Copyright 2011 Mozilla Foundation and contributors * Licensed under the New BSD license. See LICENSE or: * http://opensource.org/licenses/BSD-3-Clause */ /** * This is a helper function for getting values from parameter/options * objects. * * @param args The object we are extracting values from * @param name The name of the property we are getting. * @param defaultValue An optional value to return if the property is missing * from the object. If this is not specified and the property is missing, an * error will be thrown. */ function getArg(aArgs, aName, aDefaultValue) { if (aName in aArgs) { return aArgs[aName]; } else if (arguments.length === 3) { return aDefaultValue; } else { throw new Error('"' + aName + '" is a required argument.'); } } exports.getArg = getArg; var urlRegexp = /^(?:([\w+\-.]+):)?\/\/(?:(\w+:\w+)@)?([\w.-]*)(?::(\d+))?(.*)$/; var dataUrlRegexp = /^data:.+\,.+$/; function urlParse(aUrl) { var match = aUrl.match(urlRegexp); if (!match) { return null; } return { scheme: match[1], auth: match[2], host: match[3], port: match[4], path: match[5] }; } exports.urlParse = urlParse; function urlGenerate(aParsedUrl) { var url = ''; if (aParsedUrl.scheme) { url += aParsedUrl.scheme + ':'; } url += '//'; if (aParsedUrl.auth) { url += aParsedUrl.auth + '@'; } if (aParsedUrl.host) { url += aParsedUrl.host; } if (aParsedUrl.port) { url += ":" + aParsedUrl.port } if (aParsedUrl.path) { url += aParsedUrl.path; } return url; } exports.urlGenerate = urlGenerate; /** * Normalizes a path, or the path portion of a URL: * * - Replaces consecutive slashes with one slash. * - Removes unnecessary '.' parts. * - Removes unnecessary '<dir>/..' parts. * * Based on code in the Node.js 'path' core module. * * @param aPath The path or url to normalize. */ function normalize(aPath) { var path = aPath; var url = urlParse(aPath); if (url) { if (!url.path) { return aPath; } path = url.path; } var isAbsolute = exports.isAbsolute(path); var parts = path.split(/\/+/); for (var part, up = 0, i = parts.length - 1; i >= 0; i--) { part = parts[i]; if (part === '.') { parts.splice(i, 1); } else if (part === '..') { up++; } else if (up > 0) { if (part === '') { // The first part is blank if the path is absolute. Trying to go // above the root is a no-op. Therefore we can remove all '..' parts // directly after the root. parts.splice(i + 1, up); up = 0; } else { parts.splice(i, 2); up--; } } } path = parts.join('/'); if (path === '') { path = isAbsolute ? '/' : '.'; } if (url) { url.path = path; return urlGenerate(url); } return path; } exports.normalize = normalize; /** * Joins two paths/URLs. * * @param aRoot The root path or URL. * @param aPath The path or URL to be joined with the root. * * - If aPath is a URL or a data URI, aPath is returned, unless aPath is a * scheme-relative URL: Then the scheme of aRoot, if any, is prepended * first. * - Otherwise aPath is a path. If aRoot is a URL, then its path portion * is updated with the result and aRoot is returned. Otherwise the result * is returned. * - If aPath is absolute, the result is aPath. * - Otherwise the two paths are joined with a slash. * - Joining for example 'http://' and 'www.example.com' is also supported. */ function join(aRoot, aPath) { if (aRoot === "") { aRoot = "."; } if (aPath === "") { aPath = "."; } var aPathUrl = urlParse(aPath); var aRootUrl = urlParse(aRoot); if (aRootUrl) { aRoot = aRootUrl.path || '/'; } // `join(foo, '//www.example.org')` if (aPathUrl && !aPathUrl.scheme) { if (aRootUrl) { aPathUrl.scheme = aRootUrl.scheme; } return urlGenerate(aPathUrl); } if (aPathUrl || aPath.match(dataUrlRegexp)) { return aPath; } // `join('http://', 'www.example.com')` if (aRootUrl && !aRootUrl.host && !aRootUrl.path) { aRootUrl.host = aPath; return urlGenerate(aRootUrl); } var joined = aPath.charAt(0) === '/' ? aPath : normalize(aRoot.replace(/\/+$/, '') + '/' + aPath); if (aRootUrl) { aRootUrl.path = joined; return urlGenerate(aRootUrl); } return joined; } exports.join = join; exports.isAbsolute = function (aPath) { return aPath.charAt(0) === '/' || urlRegexp.test(aPath); }; /** * Make a path relative to a URL or another path. * * @param aRoot The root path or URL. * @param aPath The path or URL to be made relative to aRoot. */ function relative(aRoot, aPath) { if (aRoot === "") { aRoot = "."; } aRoot = aRoot.replace(/\/$/, ''); // It is possible for the path to be above the root. In this case, simply // checking whether the root is a prefix of the path won't work. Instead, we // need to remove components from the root one by one, until either we find // a prefix that fits, or we run out of components to remove. var level = 0; while (aPath.indexOf(aRoot + '/') !== 0) { var index = aRoot.lastIndexOf("/"); if (index < 0) { return aPath; } // If the only part of the root that is left is the scheme (i.e. http://, // file:///, etc.), one or more slashes (/), or simply nothing at all, we // have exhausted all components, so the path is not relative to the root. aRoot = aRoot.slice(0, index); if (aRoot.match(/^([^\/]+:\/)?\/*$/)) { return aPath; } ++level; } // Make sure we add a "../" for each component we removed from the root. return Array(level + 1).join("../") + aPath.substr(aRoot.length + 1); } exports.relative = relative; var supportsNullProto = (function () { var obj = Object.create(null); return !('__proto__' in obj); }()); function identity (s) { return s; } /** * Because behavior goes wacky when you set `__proto__` on objects, we * have to prefix all the strings in our set with an arbitrary character. * * See https://github.com/mozilla/source-map/pull/31 and * https://github.com/mozilla/source-map/issues/30 * * @param String aStr */ function toSetString(aStr) { if (isProtoString(aStr)) { return '$' + aStr; } return aStr; } exports.toSetString = supportsNullProto ? identity : toSetString; function fromSetString(aStr) { if (isProtoString(aStr)) { return aStr.slice(1); } return aStr; } exports.fromSetString = supportsNullProto ? identity : fromSetString; function isProtoString(s) { if (!s) { return false; } var length = s.length; if (length < 9 /* "__proto__".length */) { return false; } if (s.charCodeAt(length - 1) !== 95 /* '_' */ || s.charCodeAt(length - 2) !== 95 /* '_' */ || s.charCodeAt(length - 3) !== 111 /* 'o' */ || s.charCodeAt(length - 4) !== 116 /* 't' */ || s.charCodeAt(length - 5) !== 111 /* 'o' */ || s.charCodeAt(length - 6) !== 114 /* 'r' */ || s.charCodeAt(length - 7) !== 112 /* 'p' */ || s.charCodeAt(length - 8) !== 95 /* '_' */ || s.charCodeAt(length - 9) !== 95 /* '_' */) { return false; } for (var i = length - 10; i >= 0; i--) { if (s.charCodeAt(i) !== 36 /* '$' */) { return false; } } return true; } /** * Comparator between two mappings where the original positions are compared. * * Optionally pass in `true` as `onlyCompareGenerated` to consider two * mappings with the same original source/line/column, but different generated * line and column the same. Useful when searching for a mapping with a * stubbed out mapping. */ function compareByOriginalPositions(mappingA, mappingB, onlyCompareOriginal) { var cmp = strcmp(mappingA.source, mappingB.source); if (cmp !== 0) { return cmp; } cmp = mappingA.originalLine - mappingB.originalLine; if (cmp !== 0) { return cmp; } cmp = mappingA.originalColumn - mappingB.originalColumn; if (cmp !== 0 || onlyCompareOriginal) { return cmp; } cmp = mappingA.generatedColumn - mappingB.generatedColumn; if (cmp !== 0) { return cmp; } cmp = mappingA.generatedLine - mappingB.generatedLine; if (cmp !== 0) { return cmp; } return strcmp(mappingA.name, mappingB.name); } exports.compareByOriginalPositions = compareByOriginalPositions; /** * Comparator between two mappings with deflated source and name indices where * the generated positions are compared. * * Optionally pass in `true` as `onlyCompareGenerated` to consider two * mappings with the same generated line and column, but different * source/name/original line and column the same. Useful when searching for a * mapping with a stubbed out mapping. */ function compareByGeneratedPositionsDeflated(mappingA, mappingB, onlyCompareGenerated) { var cmp = mappingA.generatedLine - mappingB.generatedLine; if (cmp !== 0) { return cmp; } cmp = mappingA.generatedColumn - mappingB.generatedColumn; if (cmp !== 0 || onlyCompareGenerated) { return cmp; } cmp = strcmp(mappingA.source, mappingB.source); if (cmp !== 0) { return cmp; } cmp = mappingA.originalLine - mappingB.originalLine; if (cmp !== 0) { return cmp; } cmp = mappingA.originalColumn - mappingB.originalColumn; if (cmp !== 0) { return cmp; } return strcmp(mappingA.name, mappingB.name); } exports.compareByGeneratedPositionsDeflated = compareByGeneratedPositionsDeflated; function strcmp(aStr1, aStr2) { if (aStr1 === aStr2) { return 0; } if (aStr1 === null) { return 1; // aStr2 !== null } if (aStr2 === null) { return -1; // aStr1 !== null } if (aStr1 > aStr2) { return 1; } return -1; } /** * Comparator between two mappings with inflated source and name strings where * the generated positions are compared. */ function compareByGeneratedPositionsInflated(mappingA, mappingB) { var cmp = mappingA.generatedLine - mappingB.generatedLine; if (cmp !== 0) { return cmp; } cmp = mappingA.generatedColumn - mappingB.generatedColumn; if (cmp !== 0) { return cmp; } cmp = strcmp(mappingA.source, mappingB.source); if (cmp !== 0) { return cmp; } cmp = mappingA.originalLine - mappingB.originalLine; if (cmp !== 0) { return cmp; } cmp = mappingA.originalColumn - mappingB.originalColumn; if (cmp !== 0) { return cmp; } return strcmp(mappingA.name, mappingB.name); } exports.compareByGeneratedPositionsInflated = compareByGeneratedPositionsInflated; /** * Strip any JSON XSSI avoidance prefix from the string (as documented * in the source maps specification), and then parse the string as * JSON. */ function parseSourceMapInput(str) { return JSON.parse(str.replace(/^\)]}'[^\n]*\n/, '')); } exports.parseSourceMapInput = parseSourceMapInput; /** * Compute the URL of a source given the the source root, the source's * URL, and the source map's URL. */ function computeSourceURL(sourceRoot, sourceURL, sourceMapURL) { sourceURL = sourceURL || ''; if (sourceRoot) { // This follows what Chrome does. if (sourceRoot[sourceRoot.length - 1] !== '/' && sourceURL[0] !== '/') { sourceRoot += '/'; } // The spec says: // Line 4: An optional source root, useful for relocating source // files on a server or removing repeated values in the // “sources” entry. This value is prepended to the individual // entries in the “source” field. sourceURL = sourceRoot + sourceURL; } // Historically, SourceMapConsumer did not take the sourceMapURL as // a parameter. This mode is still somewhat supported, which is why // this code block is conditional. However, it's preferable to pass // the source map URL to SourceMapConsumer, so that this function // can implement the source URL resolution algorithm as outlined in // the spec. This block is basically the equivalent of: // new URL(sourceURL, sourceMapURL).toString() // ... except it avoids using URL, which wasn't available in the // older releases of node still supported by this library. // // The spec says: // If the sources are not absolute URLs after prepending of the // “sourceRoot”, the sources are resolved relative to the // SourceMap (like resolving script src in a html document). if (sourceMapURL) { var parsed = urlParse(sourceMapURL); if (!parsed) { throw new Error("sourceMapURL could not be parsed"); } if (parsed.path) { // Strip the last path component, but keep the "/". var index = parsed.path.lastIndexOf('/'); if (index >= 0) { parsed.path = parsed.path.substring(0, index + 1); } } sourceURL = join(urlGenerate(parsed), sourceURL); } return normalize(sourceURL); } exports.computeSourceURL = computeSourceURL;
zhihu-crawler
/zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/source-map/lib/util.js
util.js
* Copyright 2011 Mozilla Foundation and contributors * Licensed under the New BSD license. See LICENSE or: * http://opensource.org/licenses/BSD-3-Clause */ var util = require('source-map/lib/util'); var has = Object.prototype.hasOwnProperty; var hasNativeMap = typeof Map !== "undefined"; /** * A data structure which is a combination of an array and a set. Adding a new * member is O(1), testing for membership is O(1), and finding the index of an * element is O(1). Removing elements from the set is not supported. Only * strings are supported for membership. */ function ArraySet() { this._array = []; this._set = hasNativeMap ? new Map() : Object.create(null); } /** * Static method for creating ArraySet instances from an existing array. */ ArraySet.fromArray = function ArraySet_fromArray(aArray, aAllowDuplicates) { var set = new ArraySet(); for (var i = 0, len = aArray.length; i < len; i++) { set.add(aArray[i], aAllowDuplicates); } return set; }; /** * Return how many unique items are in this ArraySet. If duplicates have been * added, than those do not count towards the size. * * @returns Number */ ArraySet.prototype.size = function ArraySet_size() { return hasNativeMap ? this._set.size : Object.getOwnPropertyNames(this._set).length; }; /** * Add the given string to this set. * * @param String aStr */ ArraySet.prototype.add = function ArraySet_add(aStr, aAllowDuplicates) { var sStr = hasNativeMap ? aStr : util.toSetString(aStr); var isDuplicate = hasNativeMap ? this.has(aStr) : has.call(this._set, sStr); var idx = this._array.length; if (!isDuplicate || aAllowDuplicates) { this._array.push(aStr); } if (!isDuplicate) { if (hasNativeMap) { this._set.set(aStr, idx); } else { this._set[sStr] = idx; } } }; /** * Is the given string a member of this set? * * @param String aStr */ ArraySet.prototype.has = function ArraySet_has(aStr) { if (hasNativeMap) { return this._set.has(aStr); } else { var sStr = util.toSetString(aStr); return has.call(this._set, sStr); } }; /** * What is the index of the given string in the array? * * @param String aStr */ ArraySet.prototype.indexOf = function ArraySet_indexOf(aStr) { if (hasNativeMap) { var idx = this._set.get(aStr); if (idx >= 0) { return idx; } } else { var sStr = util.toSetString(aStr); if (has.call(this._set, sStr)) { return this._set[sStr]; } } throw new Error('"' + aStr + '" is not in the set.'); }; /** * What is the element at the given index? * * @param Number aIdx */ ArraySet.prototype.at = function ArraySet_at(aIdx) { if (aIdx >= 0 && aIdx < this._array.length) { return this._array[aIdx]; } throw new Error('No element indexed by ' + aIdx); }; /** * Returns the array representation of this set (which has the proper indices * indicated by indexOf). Note that this is a copy of the internal array used * for storing the members so that no one can mess with internal state. */ ArraySet.prototype.toArray = function ArraySet_toArray() { return this._array.slice(); }; exports.ArraySet = ArraySet;
zhihu-crawler
/zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/source-map/lib/array-set.js
array-set.js
* Copyright 2011 Mozilla Foundation and contributors * Licensed under the New BSD license. See LICENSE or: * http://opensource.org/licenses/BSD-3-Clause */ var SourceMapGenerator = require('source-map/lib/source-map-generator').SourceMapGenerator; var util = require('source-map/lib/util'); // Matches a Windows-style `\r\n` newline or a `\n` newline used by all other // operating systems these days (capturing the result). var REGEX_NEWLINE = /(\r?\n)/; // Newline character code for charCodeAt() comparisons var NEWLINE_CODE = 10; // Private symbol for identifying `SourceNode`s when multiple versions of // the source-map library are loaded. This MUST NOT CHANGE across // versions! var isSourceNode = "$$$isSourceNode$$$"; /** * SourceNodes provide a way to abstract over interpolating/concatenating * snippets of generated JavaScript source code while maintaining the line and * column information associated with the original source code. * * @param aLine The original line number. * @param aColumn The original column number. * @param aSource The original source's filename. * @param aChunks Optional. An array of strings which are snippets of * generated JS, or other SourceNodes. * @param aName The original identifier. */ function SourceNode(aLine, aColumn, aSource, aChunks, aName) { this.children = []; this.sourceContents = {}; this.line = aLine == null ? null : aLine; this.column = aColumn == null ? null : aColumn; this.source = aSource == null ? null : aSource; this.name = aName == null ? null : aName; this[isSourceNode] = true; if (aChunks != null) this.add(aChunks); } /** * Creates a SourceNode from generated code and a SourceMapConsumer. * * @param aGeneratedCode The generated code * @param aSourceMapConsumer The SourceMap for the generated code * @param aRelativePath Optional. The path that relative sources in the * SourceMapConsumer should be relative to. */ SourceNode.fromStringWithSourceMap = function SourceNode_fromStringWithSourceMap(aGeneratedCode, aSourceMapConsumer, aRelativePath) { // The SourceNode we want to fill with the generated code // and the SourceMap var node = new SourceNode(); // All even indices of this array are one line of the generated code, // while all odd indices are the newlines between two adjacent lines // (since `REGEX_NEWLINE` captures its match). // Processed fragments are accessed by calling `shiftNextLine`. var remainingLines = aGeneratedCode.split(REGEX_NEWLINE); var remainingLinesIndex = 0; var shiftNextLine = function() { var lineContents = getNextLine(); // The last line of a file might not have a newline. var newLine = getNextLine() || ""; return lineContents + newLine; function getNextLine() { return remainingLinesIndex < remainingLines.length ? remainingLines[remainingLinesIndex++] : undefined; } }; // We need to remember the position of "remainingLines" var lastGeneratedLine = 1, lastGeneratedColumn = 0; // The generate SourceNodes we need a code range. // To extract it current and last mapping is used. // Here we store the last mapping. var lastMapping = null; aSourceMapConsumer.eachMapping(function (mapping) { if (lastMapping !== null) { // We add the code from "lastMapping" to "mapping": // First check if there is a new line in between. if (lastGeneratedLine < mapping.generatedLine) { // Associate first line with "lastMapping" addMappingWithCode(lastMapping, shiftNextLine()); lastGeneratedLine++; lastGeneratedColumn = 0; // The remaining code is added without mapping } else { // There is no new line in between. // Associate the code between "lastGeneratedColumn" and // "mapping.generatedColumn" with "lastMapping" var nextLine = remainingLines[remainingLinesIndex] || ''; var code = nextLine.substr(0, mapping.generatedColumn - lastGeneratedColumn); remainingLines[remainingLinesIndex] = nextLine.substr(mapping.generatedColumn - lastGeneratedColumn); lastGeneratedColumn = mapping.generatedColumn; addMappingWithCode(lastMapping, code); // No more remaining code, continue lastMapping = mapping; return; } } // We add the generated code until the first mapping // to the SourceNode without any mapping. // Each line is added as separate string. while (lastGeneratedLine < mapping.generatedLine) { node.add(shiftNextLine()); lastGeneratedLine++; } if (lastGeneratedColumn < mapping.generatedColumn) { var nextLine = remainingLines[remainingLinesIndex] || ''; node.add(nextLine.substr(0, mapping.generatedColumn)); remainingLines[remainingLinesIndex] = nextLine.substr(mapping.generatedColumn); lastGeneratedColumn = mapping.generatedColumn; } lastMapping = mapping; }, this); // We have processed all mappings. if (remainingLinesIndex < remainingLines.length) { if (lastMapping) { // Associate the remaining code in the current line with "lastMapping" addMappingWithCode(lastMapping, shiftNextLine()); } // and add the remaining lines without any mapping node.add(remainingLines.splice(remainingLinesIndex).join("")); } // Copy sourcesContent into SourceNode aSourceMapConsumer.sources.forEach(function (sourceFile) { var content = aSourceMapConsumer.sourceContentFor(sourceFile); if (content != null) { if (aRelativePath != null) { sourceFile = util.join(aRelativePath, sourceFile); } node.setSourceContent(sourceFile, content); } }); return node; function addMappingWithCode(mapping, code) { if (mapping === null || mapping.source === undefined) { node.add(code); } else { var source = aRelativePath ? util.join(aRelativePath, mapping.source) : mapping.source; node.add(new SourceNode(mapping.originalLine, mapping.originalColumn, source, code, mapping.name)); } } }; /** * Add a chunk of generated JS to this source node. * * @param aChunk A string snippet of generated JS code, another instance of * SourceNode, or an array where each member is one of those things. */ SourceNode.prototype.add = function SourceNode_add(aChunk) { if (Array.isArray(aChunk)) { aChunk.forEach(function (chunk) { this.add(chunk); }, this); } else if (aChunk[isSourceNode] || typeof aChunk === "string") { if (aChunk) { this.children.push(aChunk); } } else { throw new TypeError( "Expected a SourceNode, string, or an array of SourceNodes and strings. Got " + aChunk ); } return this; }; /** * Add a chunk of generated JS to the beginning of this source node. * * @param aChunk A string snippet of generated JS code, another instance of * SourceNode, or an array where each member is one of those things. */ SourceNode.prototype.prepend = function SourceNode_prepend(aChunk) { if (Array.isArray(aChunk)) { for (var i = aChunk.length-1; i >= 0; i--) { this.prepend(aChunk[i]); } } else if (aChunk[isSourceNode] || typeof aChunk === "string") { this.children.unshift(aChunk); } else { throw new TypeError( "Expected a SourceNode, string, or an array of SourceNodes and strings. Got " + aChunk ); } return this; }; /** * Walk over the tree of JS snippets in this node and its children. The * walking function is called once for each snippet of JS and is passed that * snippet and the its original associated source's line/column location. * * @param aFn The traversal function. */ SourceNode.prototype.walk = function SourceNode_walk(aFn) { var chunk; for (var i = 0, len = this.children.length; i < len; i++) { chunk = this.children[i]; if (chunk[isSourceNode]) { chunk.walk(aFn); } else { if (chunk !== '') { aFn(chunk, { source: this.source, line: this.line, column: this.column, name: this.name }); } } } }; /** * Like `String.prototype.join` except for SourceNodes. Inserts `aStr` between * each of `this.children`. * * @param aSep The separator. */ SourceNode.prototype.join = function SourceNode_join(aSep) { var newChildren; var i; var len = this.children.length; if (len > 0) { newChildren = []; for (i = 0; i < len-1; i++) { newChildren.push(this.children[i]); newChildren.push(aSep); } newChildren.push(this.children[i]); this.children = newChildren; } return this; }; /** * Call String.prototype.replace on the very right-most source snippet. Useful * for trimming whitespace from the end of a source node, etc. * * @param aPattern The pattern to replace. * @param aReplacement The thing to replace the pattern with. */ SourceNode.prototype.replaceRight = function SourceNode_replaceRight(aPattern, aReplacement) { var lastChild = this.children[this.children.length - 1]; if (lastChild[isSourceNode]) { lastChild.replaceRight(aPattern, aReplacement); } else if (typeof lastChild === 'string') { this.children[this.children.length - 1] = lastChild.replace(aPattern, aReplacement); } else { this.children.push(''.replace(aPattern, aReplacement)); } return this; }; /** * Set the source content for a source file. This will be added to the SourceMapGenerator * in the sourcesContent field. * * @param aSourceFile The filename of the source file * @param aSourceContent The content of the source file */ SourceNode.prototype.setSourceContent = function SourceNode_setSourceContent(aSourceFile, aSourceContent) { this.sourceContents[util.toSetString(aSourceFile)] = aSourceContent; }; /** * Walk over the tree of SourceNodes. The walking function is called for each * source file content and is passed the filename and source content. * * @param aFn The traversal function. */ SourceNode.prototype.walkSourceContents = function SourceNode_walkSourceContents(aFn) { for (var i = 0, len = this.children.length; i < len; i++) { if (this.children[i][isSourceNode]) { this.children[i].walkSourceContents(aFn); } } var sources = Object.keys(this.sourceContents); for (var i = 0, len = sources.length; i < len; i++) { aFn(util.fromSetString(sources[i]), this.sourceContents[sources[i]]); } }; /** * Return the string representation of this source node. Walks over the tree * and concatenates all the various snippets together to one string. */ SourceNode.prototype.toString = function SourceNode_toString() { var str = ""; this.walk(function (chunk) { str += chunk; }); return str; }; /** * Returns the string representation of this source node along with a source * map. */ SourceNode.prototype.toStringWithSourceMap = function SourceNode_toStringWithSourceMap(aArgs) { var generated = { code: "", line: 1, column: 0 }; var map = new SourceMapGenerator(aArgs); var sourceMappingActive = false; var lastOriginalSource = null; var lastOriginalLine = null; var lastOriginalColumn = null; var lastOriginalName = null; this.walk(function (chunk, original) { generated.code += chunk; if (original.source !== null && original.line !== null && original.column !== null) { if(lastOriginalSource !== original.source || lastOriginalLine !== original.line || lastOriginalColumn !== original.column || lastOriginalName !== original.name) { map.addMapping({ source: original.source, original: { line: original.line, column: original.column }, generated: { line: generated.line, column: generated.column }, name: original.name }); } lastOriginalSource = original.source; lastOriginalLine = original.line; lastOriginalColumn = original.column; lastOriginalName = original.name; sourceMappingActive = true; } else if (sourceMappingActive) { map.addMapping({ generated: { line: generated.line, column: generated.column } }); lastOriginalSource = null; sourceMappingActive = false; } for (var idx = 0, length = chunk.length; idx < length; idx++) { if (chunk.charCodeAt(idx) === NEWLINE_CODE) { generated.line++; generated.column = 0; // Mappings end at eol if (idx + 1 === length) { lastOriginalSource = null; sourceMappingActive = false; } else if (sourceMappingActive) { map.addMapping({ source: original.source, original: { line: original.line, column: original.column }, generated: { line: generated.line, column: generated.column }, name: original.name }); } } else { generated.column++; } } }); this.walkSourceContents(function (sourceFile, sourceContent) { map.setSourceContent(sourceFile, sourceContent); }); return { code: generated.code, map: map }; }; exports.SourceNode = SourceNode;
zhihu-crawler
/zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/source-map/lib/source-node.js
source-node.js
* Copyright 2011 Mozilla Foundation and contributors * Licensed under the New BSD license. See LICENSE or: * http://opensource.org/licenses/BSD-3-Clause */ var base64VLQ = require('source-map/lib/base64-vlq'); var util = require('source-map/lib/util'); var ArraySet = require('source-map/lib/array-set').ArraySet; var MappingList = require('source-map/lib/mapping-list').MappingList; /** * An instance of the SourceMapGenerator represents a source map which is * being built incrementally. You may pass an object with the following * properties: * * - file: The filename of the generated source. * - sourceRoot: A root for all relative URLs in this source map. */ function SourceMapGenerator(aArgs) { if (!aArgs) { aArgs = {}; } this._file = util.getArg(aArgs, 'file', null); this._sourceRoot = util.getArg(aArgs, 'sourceRoot', null); this._skipValidation = util.getArg(aArgs, 'skipValidation', false); this._sources = new ArraySet(); this._names = new ArraySet(); this._mappings = new MappingList(); this._sourcesContents = null; } SourceMapGenerator.prototype._version = 3; /** * Creates a new SourceMapGenerator based on a SourceMapConsumer * * @param aSourceMapConsumer The SourceMap. */ SourceMapGenerator.fromSourceMap = function SourceMapGenerator_fromSourceMap(aSourceMapConsumer) { var sourceRoot = aSourceMapConsumer.sourceRoot; var generator = new SourceMapGenerator({ file: aSourceMapConsumer.file, sourceRoot: sourceRoot }); aSourceMapConsumer.eachMapping(function (mapping) { var newMapping = { generated: { line: mapping.generatedLine, column: mapping.generatedColumn } }; if (mapping.source != null) { newMapping.source = mapping.source; if (sourceRoot != null) { newMapping.source = util.relative(sourceRoot, newMapping.source); } newMapping.original = { line: mapping.originalLine, column: mapping.originalColumn }; if (mapping.name != null) { newMapping.name = mapping.name; } } generator.addMapping(newMapping); }); aSourceMapConsumer.sources.forEach(function (sourceFile) { var sourceRelative = sourceFile; if (sourceRoot !== null) { sourceRelative = util.relative(sourceRoot, sourceFile); } if (!generator._sources.has(sourceRelative)) { generator._sources.add(sourceRelative); } var content = aSourceMapConsumer.sourceContentFor(sourceFile); if (content != null) { generator.setSourceContent(sourceFile, content); } }); return generator; }; /** * Add a single mapping from original source line and column to the generated * source's line and column for this source map being created. The mapping * object should have the following properties: * * - generated: An object with the generated line and column positions. * - original: An object with the original line and column positions. * - source: The original source file (relative to the sourceRoot). * - name: An optional original token name for this mapping. */ SourceMapGenerator.prototype.addMapping = function SourceMapGenerator_addMapping(aArgs) { var generated = util.getArg(aArgs, 'generated'); var original = util.getArg(aArgs, 'original', null); var source = util.getArg(aArgs, 'source', null); var name = util.getArg(aArgs, 'name', null); if (!this._skipValidation) { this._validateMapping(generated, original, source, name); } if (source != null) { source = String(source); if (!this._sources.has(source)) { this._sources.add(source); } } if (name != null) { name = String(name); if (!this._names.has(name)) { this._names.add(name); } } this._mappings.add({ generatedLine: generated.line, generatedColumn: generated.column, originalLine: original != null && original.line, originalColumn: original != null && original.column, source: source, name: name }); }; /** * Set the source content for a source file. */ SourceMapGenerator.prototype.setSourceContent = function SourceMapGenerator_setSourceContent(aSourceFile, aSourceContent) { var source = aSourceFile; if (this._sourceRoot != null) { source = util.relative(this._sourceRoot, source); } if (aSourceContent != null) { // Add the source content to the _sourcesContents map. // Create a new _sourcesContents map if the property is null. if (!this._sourcesContents) { this._sourcesContents = Object.create(null); } this._sourcesContents[util.toSetString(source)] = aSourceContent; } else if (this._sourcesContents) { // Remove the source file from the _sourcesContents map. // If the _sourcesContents map is empty, set the property to null. delete this._sourcesContents[util.toSetString(source)]; if (Object.keys(this._sourcesContents).length === 0) { this._sourcesContents = null; } } }; /** * Applies the mappings of a sub-source-map for a specific source file to the * source map being generated. Each mapping to the supplied source file is * rewritten using the supplied source map. Note: The resolution for the * resulting mappings is the minimium of this map and the supplied map. * * @param aSourceMapConsumer The source map to be applied. * @param aSourceFile Optional. The filename of the source file. * If omitted, SourceMapConsumer's file property will be used. * @param aSourceMapPath Optional. The dirname of the path to the source map * to be applied. If relative, it is relative to the SourceMapConsumer. * This parameter is needed when the two source maps aren't in the same * directory, and the source map to be applied contains relative source * paths. If so, those relative source paths need to be rewritten * relative to the SourceMapGenerator. */ SourceMapGenerator.prototype.applySourceMap = function SourceMapGenerator_applySourceMap(aSourceMapConsumer, aSourceFile, aSourceMapPath) { var sourceFile = aSourceFile; // If aSourceFile is omitted, we will use the file property of the SourceMap if (aSourceFile == null) { if (aSourceMapConsumer.file == null) { throw new Error( 'SourceMapGenerator.prototype.applySourceMap requires either an explicit source file, ' + 'or the source map\'s "file" property. Both were omitted.' ); } sourceFile = aSourceMapConsumer.file; } var sourceRoot = this._sourceRoot; // Make "sourceFile" relative if an absolute Url is passed. if (sourceRoot != null) { sourceFile = util.relative(sourceRoot, sourceFile); } // Applying the SourceMap can add and remove items from the sources and // the names array. var newSources = new ArraySet(); var newNames = new ArraySet(); // Find mappings for the "sourceFile" this._mappings.unsortedForEach(function (mapping) { if (mapping.source === sourceFile && mapping.originalLine != null) { // Check if it can be mapped by the source map, then update the mapping. var original = aSourceMapConsumer.originalPositionFor({ line: mapping.originalLine, column: mapping.originalColumn }); if (original.source != null) { // Copy mapping mapping.source = original.source; if (aSourceMapPath != null) { mapping.source = util.join(aSourceMapPath, mapping.source) } if (sourceRoot != null) { mapping.source = util.relative(sourceRoot, mapping.source); } mapping.originalLine = original.line; mapping.originalColumn = original.column; if (original.name != null) { mapping.name = original.name; } } } var source = mapping.source; if (source != null && !newSources.has(source)) { newSources.add(source); } var name = mapping.name; if (name != null && !newNames.has(name)) { newNames.add(name); } }, this); this._sources = newSources; this._names = newNames; // Copy sourcesContents of applied map. aSourceMapConsumer.sources.forEach(function (sourceFile) { var content = aSourceMapConsumer.sourceContentFor(sourceFile); if (content != null) { if (aSourceMapPath != null) { sourceFile = util.join(aSourceMapPath, sourceFile); } if (sourceRoot != null) { sourceFile = util.relative(sourceRoot, sourceFile); } this.setSourceContent(sourceFile, content); } }, this); }; /** * A mapping can have one of the three levels of data: * * 1. Just the generated position. * 2. The Generated position, original position, and original source. * 3. Generated and original position, original source, as well as a name * token. * * To maintain consistency, we validate that any new mapping being added falls * in to one of these categories. */ SourceMapGenerator.prototype._validateMapping = function SourceMapGenerator_validateMapping(aGenerated, aOriginal, aSource, aName) { // When aOriginal is truthy but has empty values for .line and .column, // it is most likely a programmer error. In this case we throw a very // specific error message to try to guide them the right way. // For example: https://github.com/Polymer/polymer-bundler/pull/519 if (aOriginal && typeof aOriginal.line !== 'number' && typeof aOriginal.column !== 'number') { throw new Error( 'original.line and original.column are not numbers -- you probably meant to omit ' + 'the original mapping entirely and only map the generated position. If so, pass ' + 'null for the original mapping instead of an object with empty or null values.' ); } if (aGenerated && 'line' in aGenerated && 'column' in aGenerated && aGenerated.line > 0 && aGenerated.column >= 0 && !aOriginal && !aSource && !aName) { // Case 1. return; } else if (aGenerated && 'line' in aGenerated && 'column' in aGenerated && aOriginal && 'line' in aOriginal && 'column' in aOriginal && aGenerated.line > 0 && aGenerated.column >= 0 && aOriginal.line > 0 && aOriginal.column >= 0 && aSource) { // Cases 2 and 3. return; } else { throw new Error('Invalid mapping: ' + JSON.stringify({ generated: aGenerated, source: aSource, original: aOriginal, name: aName })); } }; /** * Serialize the accumulated mappings in to the stream of base 64 VLQs * specified by the source map format. */ SourceMapGenerator.prototype._serializeMappings = function SourceMapGenerator_serializeMappings() { var previousGeneratedColumn = 0; var previousGeneratedLine = 1; var previousOriginalColumn = 0; var previousOriginalLine = 0; var previousName = 0; var previousSource = 0; var result = ''; var next; var mapping; var nameIdx; var sourceIdx; var mappings = this._mappings.toArray(); for (var i = 0, len = mappings.length; i < len; i++) { mapping = mappings[i]; next = '' if (mapping.generatedLine !== previousGeneratedLine) { previousGeneratedColumn = 0; while (mapping.generatedLine !== previousGeneratedLine) { next += ';'; previousGeneratedLine++; } } else { if (i > 0) { if (!util.compareByGeneratedPositionsInflated(mapping, mappings[i - 1])) { continue; } next += ','; } } next += base64VLQ.encode(mapping.generatedColumn - previousGeneratedColumn); previousGeneratedColumn = mapping.generatedColumn; if (mapping.source != null) { sourceIdx = this._sources.indexOf(mapping.source); next += base64VLQ.encode(sourceIdx - previousSource); previousSource = sourceIdx; // lines are stored 0-based in SourceMap spec version 3 next += base64VLQ.encode(mapping.originalLine - 1 - previousOriginalLine); previousOriginalLine = mapping.originalLine - 1; next += base64VLQ.encode(mapping.originalColumn - previousOriginalColumn); previousOriginalColumn = mapping.originalColumn; if (mapping.name != null) { nameIdx = this._names.indexOf(mapping.name); next += base64VLQ.encode(nameIdx - previousName); previousName = nameIdx; } } result += next; } return result; }; SourceMapGenerator.prototype._generateSourcesContent = function SourceMapGenerator_generateSourcesContent(aSources, aSourceRoot) { return aSources.map(function (source) { if (!this._sourcesContents) { return null; } if (aSourceRoot != null) { source = util.relative(aSourceRoot, source); } var key = util.toSetString(source); return Object.prototype.hasOwnProperty.call(this._sourcesContents, key) ? this._sourcesContents[key] : null; }, this); }; /** * Externalize the source map. */ SourceMapGenerator.prototype.toJSON = function SourceMapGenerator_toJSON() { var map = { version: this._version, sources: this._sources.toArray(), names: this._names.toArray(), mappings: this._serializeMappings() }; if (this._file != null) { map.file = this._file; } if (this._sourceRoot != null) { map.sourceRoot = this._sourceRoot; } if (this._sourcesContents) { map.sourcesContent = this._generateSourcesContent(map.sources, map.sourceRoot); } return map; }; /** * Render the source map being generated to a string. */ SourceMapGenerator.prototype.toString = function SourceMapGenerator_toString() { return JSON.stringify(this.toJSON()); }; exports.SourceMapGenerator = SourceMapGenerator;
zhihu-crawler
/zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/source-map/lib/source-map-generator.js
source-map-generator.js
* Copyright 2011 Mozilla Foundation and contributors * Licensed under the New BSD license. See LICENSE or: * http://opensource.org/licenses/BSD-3-Clause */ // It turns out that some (most?) JavaScript engines don't self-host // `Array.prototype.sort`. This makes sense because C++ will likely remain // faster than JS when doing raw CPU-intensive sorting. However, when using a // custom comparator function, calling back and forth between the VM's C++ and // JIT'd JS is rather slow *and* loses JIT type information, resulting in // worse generated code for the comparator function than would be optimal. In // fact, when sorting with a comparator, these costs outweigh the benefits of // sorting in C++. By using our own JS-implemented Quick Sort (below), we get // a ~3500ms mean speed-up in `bench/bench.html`. /** * Swap the elements indexed by `x` and `y` in the array `ary`. * * @param {Array} ary * The array. * @param {Number} x * The index of the first item. * @param {Number} y * The index of the second item. */ function swap(ary, x, y) { var temp = ary[x]; ary[x] = ary[y]; ary[y] = temp; } /** * Returns a random integer within the range `low .. high` inclusive. * * @param {Number} low * The lower bound on the range. * @param {Number} high * The upper bound on the range. */ function randomIntInRange(low, high) { return Math.round(low + (Math.random() * (high - low))); } /** * The Quick Sort algorithm. * * @param {Array} ary * An array to sort. * @param {function} comparator * Function to use to compare two items. * @param {Number} p * Start index of the array * @param {Number} r * End index of the array */ function doQuickSort(ary, comparator, p, r) { // If our lower bound is less than our upper bound, we (1) partition the // array into two pieces and (2) recurse on each half. If it is not, this is // the empty array and our base case. if (p < r) { // (1) Partitioning. // // The partitioning chooses a pivot between `p` and `r` and moves all // elements that are less than or equal to the pivot to the before it, and // all the elements that are greater than it after it. The effect is that // once partition is done, the pivot is in the exact place it will be when // the array is put in sorted order, and it will not need to be moved // again. This runs in O(n) time. // Always choose a random pivot so that an input array which is reverse // sorted does not cause O(n^2) running time. var pivotIndex = randomIntInRange(p, r); var i = p - 1; swap(ary, pivotIndex, r); var pivot = ary[r]; // Immediately after `j` is incremented in this loop, the following hold // true: // // * Every element in `ary[p .. i]` is less than or equal to the pivot. // // * Every element in `ary[i+1 .. j-1]` is greater than the pivot. for (var j = p; j < r; j++) { if (comparator(ary[j], pivot) <= 0) { i += 1; swap(ary, i, j); } } swap(ary, i + 1, j); var q = i + 1; // (2) Recurse on each half. doQuickSort(ary, comparator, p, q - 1); doQuickSort(ary, comparator, q + 1, r); } } /** * Sort the given array in-place with the given comparator function. * * @param {Array} ary * An array to sort. * @param {function} comparator * Function to use to compare two items. */ exports.quickSort = function (ary, comparator) { doQuickSort(ary, comparator, 0, ary.length - 1); };
zhihu-crawler
/zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/source-map/lib/quick-sort.js
quick-sort.js
# browser-process-hrtime Browser shim for Node.js `process.hrtime()`. See [documentation at nodejs.org](http://nodejs.org/api/process.html#process_process_hrtime) This module does not provide the same level of time precision as node.js, but provides a matching API and response format. ### usage Use hrtime independent of environment (node or browser). It will use `process.hrtime` first and fallback if not present. ```js const hrtime = require('browser-process-hrtime') const start = hrtime() // ... const delta = hrtime(start) ``` ### monkey-patching You can monkey-patch `process.hrtime` for your dependency graph like this: ```js process.hrtime = require('browser-process-hrtime') const coolTool = require('module-that-uses-hrtime-somewhere-in-its-depths') ``` ### note This was originally pull-requested against [node-process](https://github.com/defunctzombie/node-process), but they are trying to stay lean.
zhihu-crawler
/zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/browser-process-hrtime/README.md
README.md
The MIT License (MIT) ===================== Copyright © 2016 Sebastian Mayr Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
zhihu-crawler
/zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/w3c-xmlserializer/LICENSE.md
LICENSE.md
# w3c-xmlserializer An XML serializer that follows the [W3C specification](https://w3c.github.io/DOM-Parsing/). This package can be used in Node.js, as long as you feed it a DOM node, e.g. one produced by [jsdom](https://github.com/jsdom/jsdom). ## Basic usage Assume you have a DOM tree rooted at a node `node`. In Node.js, you could create this using [jsdom](https://github.com/jsdom/jsdom) as follows: ```js const { JSDOM } = require("jsdom"); const { document } = new JSDOM().window; const node = document.createElement("akomaNtoso"); ``` Then, you use this package as follows: ```js const serialize = require("w3c-xmlserializer"); console.log(serialize(node)); // => '<akomantoso xmlns="http://www.w3.org/1999/xhtml"></akomantoso>' ``` ## `requireWellFormed` option By default the input DOM tree is not required to be "well-formed"; any given input will serialize to some output string. You can instead require well-formedness via ```js serialize(node, { requireWellFormed: true }); ``` which will cause `Error`s to be thrown when non-well-formed constructs are encountered. [Per the spec](https://w3c.github.io/DOM-Parsing/#dfn-require-well-formed), this largely is about imposing constraints on the names of elements, attributes, etc. As a point of reference, on the web platform: * The [`innerHTML` getter](https://w3c.github.io/DOM-Parsing/#dom-innerhtml-innerhtml) uses the require-well-formed mode, i.e. trying to get the `innerHTML` of non-well-formed subtrees will throw. * The [`xhr.send()` method](https://xhr.spec.whatwg.org/#the-send()-method) does not require well-formedness, i.e. sending non-well-formed `Document`s will serialize and send them anyway.
zhihu-crawler
/zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/w3c-xmlserializer/README.md
README.md
"use strict"; const xnv = require("xml-name-validator"); const attributeUtils = require("w3c-xmlserializer/lib/attributes"); const { NAMESPACES, VOID_ELEMENTS, NODE_TYPES } = require("w3c-xmlserializer/lib/constants"); const XML_CHAR = /^(\x09|\x0A|\x0D|[\x20-\uD7FF]|[\uE000-\uFFFD]|(?:[\uD800-\uDBFF][\uDC00-\uDFFF]))*$/; const PUBID_CHAR = /^(\x20|\x0D|\x0A|[a-zA-Z0-9]|[-'()+,./:=?;!*#@$_%])*$/; function asciiCaseInsensitiveMatch(a, b) { if (a.length !== b.length) { return false; } for (let i = 0; i < a.length; ++i) { if ((a.charCodeAt(i) | 32) !== (b.charCodeAt(i) | 32)) { return false; } } return true; } function recordNamespaceInformation(element, map, prefixMap) { let defaultNamespaceAttrValue = null; for (let i = 0; i < element.attributes.length; ++i) { const attr = element.attributes[i]; if (attr.namespaceURI === NAMESPACES.XMLNS) { if (attr.prefix === null) { defaultNamespaceAttrValue = attr.value; continue; } let namespaceDefinition = attr.value; if (namespaceDefinition === NAMESPACES.XML) { continue; } // This is exactly the other way than the spec says, but that's intended. // All the maps coalesce null to the empty string (explained in the // spec), so instead of doing that every time, just do it once here. if (namespaceDefinition === null) { namespaceDefinition = ""; } if ( namespaceDefinition in map && map[namespaceDefinition].includes(attr.localName) ) { continue; } if (!(namespaceDefinition in map)) { map[namespaceDefinition] = []; } map[namespaceDefinition].push(attr.localName); prefixMap[attr.localName] = namespaceDefinition; } } return defaultNamespaceAttrValue; } function serializeDocumentType(node, namespace, prefixMap, requireWellFormed) { if (requireWellFormed && !PUBID_CHAR.test(node.publicId)) { throw new Error("Failed to serialize XML: document type node publicId is not well-formed."); } if ( requireWellFormed && (!XML_CHAR.test(node.systemId) || (node.systemId.includes('"') && node.systemId.includes("'"))) ) { throw new Error("Failed to serialize XML: document type node systemId is not well-formed."); } let markup = `<!DOCTYPE ${node.name}`; if (node.publicId !== "") { markup += ` PUBLIC "${node.publicId}"`; } else if (node.systemId !== "") { markup += " SYSTEM"; } if (node.systemId !== "") { markup += ` "${node.systemId}"`; } return markup + ">"; } function serializeProcessingInstruction( node, namespace, prefixMap, requireWellFormed ) { if ( requireWellFormed && (node.target.includes(":") || asciiCaseInsensitiveMatch(node.target, "xml")) ) { throw new Error("Failed to serialize XML: processing instruction node target is not well-formed."); } if ( requireWellFormed && (!XML_CHAR.test(node.data) || node.data.includes("?>")) ) { throw new Error("Failed to serialize XML: processing instruction node data is not well-formed."); } return `<?${node.target} ${node.data}?>`; } function serializeDocument( node, namespace, prefixMap, requireWellFormed, refs ) { if (requireWellFormed && node.documentElement === null) { throw new Error("Failed to serialize XML: document does not have a document element."); } let serializedDocument = ""; for (const child of node.childNodes) { serializedDocument += xmlSerialization( child, namespace, prefixMap, requireWellFormed, refs ); } return serializedDocument; } function serializeDocumentFragment( node, namespace, prefixMap, requireWellFormed, refs ) { let markup = ""; for (const child of node.childNodes) { markup += xmlSerialization( child, namespace, prefixMap, requireWellFormed, refs ); } return markup; } function serializeText(node, namespace, prefixMap, requireWellFormed) { if (requireWellFormed && !XML_CHAR.test(node.data)) { throw new Error("Failed to serialize XML: text node data is not well-formed."); } return node.data .replace(/&/g, "&amp;") .replace(/</g, "&lt;") .replace(/>/g, "&gt;"); } function serializeComment(node, namespace, prefixMap, requireWellFormed) { if (requireWellFormed && !XML_CHAR.test(node.data)) { throw new Error("Failed to serialize XML: comment node data is not well-formed."); } if ( requireWellFormed && (node.data.includes("--") || node.data.endsWith("-")) ) { throw new Error("Failed to serialize XML: found hyphens in illegal places in comment node data."); } return `<!--${node.data}-->`; } function serializeElement(node, namespace, prefixMap, requireWellFormed, refs) { if ( requireWellFormed && (node.localName.includes(":") || !xnv.name(node.localName)) ) { throw new Error("Failed to serialize XML: element node localName is not a valid XML name."); } let markup = "<"; let qualifiedName = ""; let skipEndTag = false; let ignoreNamespaceDefinitionAttr = false; const map = Object.assign({}, prefixMap); const localPrefixesMap = Object.create(null); const localDefaultNamespace = recordNamespaceInformation( node, map, localPrefixesMap ); let inheritedNs = namespace; const ns = node.namespaceURI; if (inheritedNs === ns) { if (localDefaultNamespace !== null) { ignoreNamespaceDefinitionAttr = true; } if (ns === NAMESPACES.XML) { qualifiedName = "xml:" + node.localName; } else { qualifiedName = node.localName; } markup += qualifiedName; } else { let { prefix } = node; let candidatePrefix = attributeUtils.preferredPrefixString(map, ns, prefix); if (prefix === "xmlns") { if (requireWellFormed) { throw new Error("Failed to serialize XML: element nodes can't have a prefix of \"xmlns\"."); } candidatePrefix = "xmlns"; } if (candidatePrefix !== null) { qualifiedName = candidatePrefix + ":" + node.localName; if ( localDefaultNamespace !== null && localDefaultNamespace !== NAMESPACES.XML ) { inheritedNs = localDefaultNamespace === "" ? null : localDefaultNamespace; } markup += qualifiedName; } else if (prefix !== null) { if (prefix in localPrefixesMap) { prefix = attributeUtils.generatePrefix(map, ns, refs.prefixIndex++); } if (map[ns]) { map[ns].push(prefix); } else { map[ns] = [prefix]; } qualifiedName = prefix + ":" + node.localName; markup += `${qualifiedName} xmlns:${prefix}="${attributeUtils.serializeAttributeValue( ns, requireWellFormed )}"`; if (localDefaultNamespace !== null) { inheritedNs = localDefaultNamespace === "" ? null : localDefaultNamespace; } } else if (localDefaultNamespace === null || localDefaultNamespace !== ns) { ignoreNamespaceDefinitionAttr = true; qualifiedName = node.localName; inheritedNs = ns; markup += `${qualifiedName} xmlns="${attributeUtils.serializeAttributeValue( ns, requireWellFormed )}"`; } else { qualifiedName = node.localName; inheritedNs = ns; markup += qualifiedName; } } markup += attributeUtils.serializeAttributes( node, map, localPrefixesMap, ignoreNamespaceDefinitionAttr, requireWellFormed, refs ); if ( ns === NAMESPACES.HTML && node.childNodes.length === 0 && VOID_ELEMENTS.has(node.localName) ) { markup += " /"; skipEndTag = true; } else if (ns !== NAMESPACES.HTML && node.childNodes.length === 0) { markup += "/"; skipEndTag = true; } markup += ">"; if (skipEndTag) { return markup; } if (ns === NAMESPACES.HTML && node.localName === "template") { markup += xmlSerialization( node.content, inheritedNs, map, requireWellFormed, refs ); } else { for (const child of node.childNodes) { markup += xmlSerialization( child, inheritedNs, map, requireWellFormed, refs ); } } markup += `</${qualifiedName}>`; return markup; } function serializeCDATASection(node) { return "<![CDATA[" + node.data + "]]>"; } /** * @param {{prefixIndex: number}} refs */ function xmlSerialization(node, namespace, prefixMap, requireWellFormed, refs) { switch (node.nodeType) { case NODE_TYPES.ELEMENT_NODE: return serializeElement( node, namespace, prefixMap, requireWellFormed, refs ); case NODE_TYPES.DOCUMENT_NODE: return serializeDocument( node, namespace, prefixMap, requireWellFormed, refs ); case NODE_TYPES.COMMENT_NODE: return serializeComment(node, namespace, prefixMap, requireWellFormed); case NODE_TYPES.TEXT_NODE: return serializeText(node, namespace, prefixMap, requireWellFormed); case NODE_TYPES.DOCUMENT_FRAGMENT_NODE: return serializeDocumentFragment( node, namespace, prefixMap, requireWellFormed, refs ); case NODE_TYPES.DOCUMENT_TYPE_NODE: return serializeDocumentType( node, namespace, prefixMap, requireWellFormed ); case NODE_TYPES.PROCESSING_INSTRUCTION_NODE: return serializeProcessingInstruction( node, namespace, prefixMap, requireWellFormed ); case NODE_TYPES.ATTRIBUTE_NODE: return ""; case NODE_TYPES.CDATA_SECTION_NODE: return serializeCDATASection(node); default: throw new TypeError("Failed to serialize XML: only Nodes can be serialized."); } } module.exports = (root, { requireWellFormed = false } = {}) => { const namespacePrefixMap = Object.create(null); namespacePrefixMap["http://www.w3.org/XML/1998/namespace"] = ["xml"]; return xmlSerialization(root, null, namespacePrefixMap, requireWellFormed, { prefixIndex: 1 }); };
zhihu-crawler
/zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/w3c-xmlserializer/lib/serialize.js
serialize.js
# fast-json-stable-stringify Deterministic `JSON.stringify()` - a faster version of [@substack](https://github.com/substack)'s json-stable-strigify without [jsonify](https://github.com/substack/jsonify). You can also pass in a custom comparison function. [![Build Status](https://travis-ci.org/epoberezkin/fast-json-stable-stringify.svg?branch=master)](https://travis-ci.org/epoberezkin/fast-json-stable-stringify) [![Coverage Status](https://coveralls.io/repos/github/epoberezkin/fast-json-stable-stringify/badge.svg?branch=master)](https://coveralls.io/github/epoberezkin/fast-json-stable-stringify?branch=master) # example ``` js var stringify = require('fast-json-stable-stringify'); var obj = { c: 8, b: [{z:6,y:5,x:4},7], a: 3 }; console.log(stringify(obj)); ``` output: ``` {"a":3,"b":[{"x":4,"y":5,"z":6},7],"c":8} ``` # methods ``` js var stringify = require('fast-json-stable-stringify') ``` ## var str = stringify(obj, opts) Return a deterministic stringified string `str` from the object `obj`. ## options ### cmp If `opts` is given, you can supply an `opts.cmp` to have a custom comparison function for object keys. Your function `opts.cmp` is called with these parameters: ``` js opts.cmp({ key: akey, value: avalue }, { key: bkey, value: bvalue }) ``` For example, to sort on the object key names in reverse order you could write: ``` js var stringify = require('fast-json-stable-stringify'); var obj = { c: 8, b: [{z:6,y:5,x:4},7], a: 3 }; var s = stringify(obj, function (a, b) { return a.key < b.key ? 1 : -1; }); console.log(s); ``` which results in the output string: ``` {"c":8,"b":[{"z":6,"y":5,"x":4},7],"a":3} ``` Or if you wanted to sort on the object values in reverse order, you could write: ``` var stringify = require('fast-json-stable-stringify'); var obj = { d: 6, c: 5, b: [{z:3,y:2,x:1},9], a: 10 }; var s = stringify(obj, function (a, b) { return a.value < b.value ? 1 : -1; }); console.log(s); ``` which outputs: ``` {"d":6,"c":5,"b":[{"z":3,"y":2,"x":1},9],"a":10} ``` ### cycles Pass `true` in `opts.cycles` to stringify circular property as `__cycle__` - the result will not be a valid JSON string in this case. TypeError will be thrown in case of circular object without this option. # install With [npm](https://npmjs.org) do: ``` npm install fast-json-stable-stringify ``` # benchmark To run benchmark (requires Node.js 6+): ``` node benchmark ``` Results: ``` fast-json-stable-stringify x 17,189 ops/sec ±1.43% (83 runs sampled) json-stable-stringify x 13,634 ops/sec ±1.39% (85 runs sampled) fast-stable-stringify x 20,212 ops/sec ±1.20% (84 runs sampled) faster-stable-stringify x 15,549 ops/sec ±1.12% (84 runs sampled) The fastest is fast-stable-stringify ``` ## Enterprise support fast-json-stable-stringify package is a part of [Tidelift enterprise subscription](https://tidelift.com/subscription/pkg/npm-fast-json-stable-stringify?utm_source=npm-fast-json-stable-stringify&utm_medium=referral&utm_campaign=enterprise&utm_term=repo) - it provides a centralised commercial support to open-source software users, in addition to the support provided by software maintainers. ## Security contact To report a security vulnerability, please use the [Tidelift security contact](https://tidelift.com/security). Tidelift will coordinate the fix and disclosure. Please do NOT report security vulnerability via GitHub issues. # license [MIT](https://github.com/epoberezkin/fast-json-stable-stringify/blob/master/LICENSE)
zhihu-crawler
/zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/fast-json-stable-stringify/README.md
README.md
# URI.js URI.js is an [RFC 3986](http://www.ietf.org/rfc/rfc3986.txt) compliant, scheme extendable URI parsing/validating/resolving library for all JavaScript environments (browsers, Node.js, etc). It is also compliant with the IRI ([RFC 3987](http://www.ietf.org/rfc/rfc3987.txt)), IDNA ([RFC 5890](http://www.ietf.org/rfc/rfc5890.txt)), IPv6 Address ([RFC 5952](http://www.ietf.org/rfc/rfc5952.txt)), IPv6 Zone Identifier ([RFC 6874](http://www.ietf.org/rfc/rfc6874.txt)) specifications. URI.js has an extensive test suite, and works in all (Node.js, web) environments. It weighs in at 6.2kb (gzipped, 16kb deflated). ## API ### Parsing URI.parse("uri://user:[email protected]:123/one/two.three?q1=a1&q2=a2#body"); //returns: //{ // scheme : "uri", // userinfo : "user:pass", // host : "example.com", // port : 123, // path : "/one/two.three", // query : "q1=a1&q2=a2", // fragment : "body" //} ### Serializing URI.serialize({scheme : "http", host : "example.com", fragment : "footer"}) === "http://example.com/#footer" ### Resolving URI.resolve("uri://a/b/c/d?q", "../../g") === "uri://a/g" ### Normalizing URI.normalize("HTTP://ABC.com:80/%7Esmith/home.html") === "http://abc.com/~smith/home.html" ### Comparison URI.equal("example://a/b/c/%7Bfoo%7D", "eXAMPLE://a/./b/../b/%63/%7bfoo%7d") === true ### IP Support //IPv4 normalization URI.normalize("//192.068.001.000") === "//192.68.1.0" //IPv6 normalization URI.normalize("//[2001:0:0DB8::0:0001]") === "//[2001:0:db8::1]" //IPv6 zone identifier support URI.parse("//[2001:db8::7%25en1]"); //returns: //{ // host : "2001:db8::7%en1" //} ### IRI Support //convert IRI to URI URI.serialize(URI.parse("http://examplé.org/rosé")) === "http://xn--exampl-gva.org/ros%C3%A9" //convert URI to IRI URI.serialize(URI.parse("http://xn--exampl-gva.org/ros%C3%A9"), {iri:true}) === "http://examplé.org/rosé" ### Options All of the above functions can accept an additional options argument that is an object that can contain one or more of the following properties: * `scheme` (string) Indicates the scheme that the URI should be treated as, overriding the URI's normal scheme parsing behavior. * `reference` (string) If set to `"suffix"`, it indicates that the URI is in the suffix format, and the validator will use the option's `scheme` property to determine the URI's scheme. * `tolerant` (boolean, false) If set to `true`, the parser will relax URI resolving rules. * `absolutePath` (boolean, false) If set to `true`, the serializer will not resolve a relative `path` component. * `iri` (boolean, false) If set to `true`, the serializer will unescape non-ASCII characters as per [RFC 3987](http://www.ietf.org/rfc/rfc3987.txt). * `unicodeSupport` (boolean, false) If set to `true`, the parser will unescape non-ASCII characters in the parsed output as per [RFC 3987](http://www.ietf.org/rfc/rfc3987.txt). * `domainHost` (boolean, false) If set to `true`, the library will treat the `host` component as a domain name, and convert IDNs (International Domain Names) as per [RFC 5891](http://www.ietf.org/rfc/rfc5891.txt). ## Scheme Extendable URI.js supports inserting custom [scheme](http://en.wikipedia.org/wiki/URI_scheme) dependent processing rules. Currently, URI.js has built in support for the following schemes: * http \[[RFC 2616](http://www.ietf.org/rfc/rfc2616.txt)\] * https \[[RFC 2818](http://www.ietf.org/rfc/rfc2818.txt)\] * mailto \[[RFC 6068](http://www.ietf.org/rfc/rfc6068.txt)\] * urn \[[RFC 2141](http://www.ietf.org/rfc/rfc2141.txt)\] * urn:uuid \[[RFC 4122](http://www.ietf.org/rfc/rfc4122.txt)\] ### HTTP Support URI.equal("HTTP://ABC.COM:80", "http://abc.com/") === true ### Mailto Support URI.parse("mailto:[email protected],[email protected]?subject=SUBSCRIBE&body=Sign%20me%20up!"); //returns: //{ // scheme : "mailto", // to : ["[email protected]", "[email protected]"], // subject : "SUBSCRIBE", // body : "Sign me up!" //} URI.serialize({ scheme : "mailto", to : ["[email protected]"], subject : "REMOVE", body : "Please remove me", headers : { cc : "[email protected]" } }) === "mailto:[email protected][email protected]&subject=REMOVE&body=Please%20remove%20me" ### URN Support URI.parse("urn:example:foo"); //returns: //{ // scheme : "urn", // nid : "example", // nss : "foo", //} #### URN UUID Support URI.parse("urn:uuid:f81d4fae-7dec-11d0-a765-00a0c91e6bf6"); //returns: //{ // scheme : "urn", // nid : "example", // uuid : "f81d4fae-7dec-11d0-a765-00a0c91e6bf6", //} ## Usage To load in a browser, use the following tag: <script type="text/javascript" src="uri-js/dist/es5/uri.all.min.js"></script> To load in a CommonJS (Node.js) environment, first install with npm by running on the command line: npm install uri-js Then, in your code, load it using: const URI = require("uri-js"); If you are writing your code in ES6+ (ESNEXT) or TypeScript, you would load it using: import * as URI from "uri-js"; Or you can load just what you need using named exports: import { parse, serialize, resolve, resolveComponents, normalize, equal, removeDotSegments, pctEncChar, pctDecChars, escapeComponent, unescapeComponent } from "uri-js"; ## Breaking changes ### Breaking changes from 3.x URN parsing has been completely changed to better align with the specification. Scheme is now always `urn`, but has two new properties: `nid` which contains the Namspace Identifier, and `nss` which contains the Namespace Specific String. The `nss` property will be removed by higher order scheme handlers, such as the UUID URN scheme handler. The UUID of a URN can now be found in the `uuid` property. ### Breaking changes from 2.x URI validation has been removed as it was slow, exposed a vulnerabilty, and was generally not useful. ### Breaking changes from 1.x The `errors` array on parsed components is now an `error` string. ## License ([Simplified BSD](http://en.wikipedia.org/wiki/BSD_licenses#2-clause)) Copyright 2011 Gary Court. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY GARY COURT "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GARY COURT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. The views and conclusions contained in the software and documentation are those of the authors and should not be interpreted as representing official policies, either expressed or implied, of Gary Court.
zhihu-crawler
/zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/uri-js/README.md
README.md
* Copyright 2011 Gary Court. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are * permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, this list * of conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY GARY COURT ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GARY COURT OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are those of the * authors and should not be interpreted as representing official policies, either expressed * or implied, of Gary Court. */ import URI_PROTOCOL from "uri-js/src/regexps-uri"; import IRI_PROTOCOL from "uri-js/src/regexps-iri"; import punycode from "punycode"; import { toUpperCase, typeOf, assign } from "uri-js/src/util"; export interface URIComponents { scheme?:string; userinfo?:string; host?:string; port?:number|string; path?:string; query?:string; fragment?:string; reference?:string; error?:string; } export interface URIOptions { scheme?:string; reference?:string; tolerant?:boolean; absolutePath?:boolean; iri?:boolean; unicodeSupport?:boolean; domainHost?:boolean; } export interface URISchemeHandler<Components extends URIComponents = URIComponents, Options extends URIOptions = URIOptions, ParentComponents extends URIComponents = URIComponents> { scheme:string; parse(components:ParentComponents, options:Options):Components; serialize(components:Components, options:Options):ParentComponents; unicodeSupport?:boolean; domainHost?:boolean; absolutePath?:boolean; } export interface URIRegExps { NOT_SCHEME : RegExp, NOT_USERINFO : RegExp, NOT_HOST : RegExp, NOT_PATH : RegExp, NOT_PATH_NOSCHEME : RegExp, NOT_QUERY : RegExp, NOT_FRAGMENT : RegExp, ESCAPE : RegExp, UNRESERVED : RegExp, OTHER_CHARS : RegExp, PCT_ENCODED : RegExp, IPV4ADDRESS : RegExp, IPV6ADDRESS : RegExp, } export const SCHEMES:{[scheme:string]:URISchemeHandler} = {}; export function pctEncChar(chr:string):string { const c = chr.charCodeAt(0); let e:string; if (c < 16) e = "%0" + c.toString(16).toUpperCase(); else if (c < 128) e = "%" + c.toString(16).toUpperCase(); else if (c < 2048) e = "%" + ((c >> 6) | 192).toString(16).toUpperCase() + "%" + ((c & 63) | 128).toString(16).toUpperCase(); else e = "%" + ((c >> 12) | 224).toString(16).toUpperCase() + "%" + (((c >> 6) & 63) | 128).toString(16).toUpperCase() + "%" + ((c & 63) | 128).toString(16).toUpperCase(); return e; } export function pctDecChars(str:string):string { let newStr = ""; let i = 0; const il = str.length; while (i < il) { const c = parseInt(str.substr(i + 1, 2), 16); if (c < 128) { newStr += String.fromCharCode(c); i += 3; } else if (c >= 194 && c < 224) { if ((il - i) >= 6) { const c2 = parseInt(str.substr(i + 4, 2), 16); newStr += String.fromCharCode(((c & 31) << 6) | (c2 & 63)); } else { newStr += str.substr(i, 6); } i += 6; } else if (c >= 224) { if ((il - i) >= 9) { const c2 = parseInt(str.substr(i + 4, 2), 16); const c3 = parseInt(str.substr(i + 7, 2), 16); newStr += String.fromCharCode(((c & 15) << 12) | ((c2 & 63) << 6) | (c3 & 63)); } else { newStr += str.substr(i, 9); } i += 9; } else { newStr += str.substr(i, 3); i += 3; } } return newStr; } function _normalizeComponentEncoding(components:URIComponents, protocol:URIRegExps) { function decodeUnreserved(str:string):string { const decStr = pctDecChars(str); return (!decStr.match(protocol.UNRESERVED) ? str : decStr); } if (components.scheme) components.scheme = String(components.scheme).replace(protocol.PCT_ENCODED, decodeUnreserved).toLowerCase().replace(protocol.NOT_SCHEME, ""); if (components.userinfo !== undefined) components.userinfo = String(components.userinfo).replace(protocol.PCT_ENCODED, decodeUnreserved).replace(protocol.NOT_USERINFO, pctEncChar).replace(protocol.PCT_ENCODED, toUpperCase); if (components.host !== undefined) components.host = String(components.host).replace(protocol.PCT_ENCODED, decodeUnreserved).toLowerCase().replace(protocol.NOT_HOST, pctEncChar).replace(protocol.PCT_ENCODED, toUpperCase); if (components.path !== undefined) components.path = String(components.path).replace(protocol.PCT_ENCODED, decodeUnreserved).replace((components.scheme ? protocol.NOT_PATH : protocol.NOT_PATH_NOSCHEME), pctEncChar).replace(protocol.PCT_ENCODED, toUpperCase); if (components.query !== undefined) components.query = String(components.query).replace(protocol.PCT_ENCODED, decodeUnreserved).replace(protocol.NOT_QUERY, pctEncChar).replace(protocol.PCT_ENCODED, toUpperCase); if (components.fragment !== undefined) components.fragment = String(components.fragment).replace(protocol.PCT_ENCODED, decodeUnreserved).replace(protocol.NOT_FRAGMENT, pctEncChar).replace(protocol.PCT_ENCODED, toUpperCase); return components; }; function _stripLeadingZeros(str:string):string { return str.replace(/^0*(.*)/, "$1") || "0"; } function _normalizeIPv4(host:string, protocol:URIRegExps):string { const matches = host.match(protocol.IPV4ADDRESS) || []; const [, address] = matches; if (address) { return address.split(".").map(_stripLeadingZeros).join("."); } else { return host; } } function _normalizeIPv6(host:string, protocol:URIRegExps):string { const matches = host.match(protocol.IPV6ADDRESS) || []; const [, address, zone] = matches; if (address) { const [last, first] = address.toLowerCase().split('::').reverse(); const firstFields = first ? first.split(":").map(_stripLeadingZeros) : []; const lastFields = last.split(":").map(_stripLeadingZeros); const isLastFieldIPv4Address = protocol.IPV4ADDRESS.test(lastFields[lastFields.length - 1]); const fieldCount = isLastFieldIPv4Address ? 7 : 8; const lastFieldsStart = lastFields.length - fieldCount; const fields = Array<string>(fieldCount); for (let x = 0; x < fieldCount; ++x) { fields[x] = firstFields[x] || lastFields[lastFieldsStart + x] || ''; } if (isLastFieldIPv4Address) { fields[fieldCount - 1] = _normalizeIPv4(fields[fieldCount - 1], protocol); } const allZeroFields = fields.reduce<Array<{index:number,length:number}>>((acc, field, index) => { if (!field || field === "0") { const lastLongest = acc[acc.length - 1]; if (lastLongest && lastLongest.index + lastLongest.length === index) { lastLongest.length++; } else { acc.push({ index, length : 1 }); } } return acc; }, []); const longestZeroFields = allZeroFields.sort((a, b) => b.length - a.length)[0]; let newHost:string; if (longestZeroFields && longestZeroFields.length > 1) { const newFirst = fields.slice(0, longestZeroFields.index) ; const newLast = fields.slice(longestZeroFields.index + longestZeroFields.length); newHost = newFirst.join(":") + "::" + newLast.join(":"); } else { newHost = fields.join(":"); } if (zone) { newHost += "%" + zone; } return newHost; } else { return host; } } const URI_PARSE = /^(?:([^:\/?#]+):)?(?:\/\/((?:([^\/?#@]*)@)?(\[[^\/?#\]]+\]|[^\/?#:]*)(?:\:(\d*))?))?([^?#]*)(?:\?([^#]*))?(?:#((?:.|\n|\r)*))?/i; const NO_MATCH_IS_UNDEFINED = (<RegExpMatchArray>("").match(/(){0}/))[1] === undefined; export function parse(uriString:string, options:URIOptions = {}):URIComponents { const components:URIComponents = {}; const protocol = (options.iri !== false ? IRI_PROTOCOL : URI_PROTOCOL); if (options.reference === "suffix") uriString = (options.scheme ? options.scheme + ":" : "") + "//" + uriString; const matches = uriString.match(URI_PARSE); if (matches) { if (NO_MATCH_IS_UNDEFINED) { //store each component components.scheme = matches[1]; components.userinfo = matches[3]; components.host = matches[4]; components.port = parseInt(matches[5], 10); components.path = matches[6] || ""; components.query = matches[7]; components.fragment = matches[8]; //fix port number if (isNaN(components.port)) { components.port = matches[5]; } } else { //IE FIX for improper RegExp matching //store each component components.scheme = matches[1] || undefined; components.userinfo = (uriString.indexOf("@") !== -1 ? matches[3] : undefined); components.host = (uriString.indexOf("//") !== -1 ? matches[4] : undefined); components.port = parseInt(matches[5], 10); components.path = matches[6] || ""; components.query = (uriString.indexOf("?") !== -1 ? matches[7] : undefined); components.fragment = (uriString.indexOf("#") !== -1 ? matches[8] : undefined); //fix port number if (isNaN(components.port)) { components.port = (uriString.match(/\/\/(?:.|\n)*\:(?:\/|\?|\#|$)/) ? matches[4] : undefined); } } if (components.host) { //normalize IP hosts components.host = _normalizeIPv6(_normalizeIPv4(components.host, protocol), protocol); } //determine reference type if (components.scheme === undefined && components.userinfo === undefined && components.host === undefined && components.port === undefined && !components.path && components.query === undefined) { components.reference = "same-document"; } else if (components.scheme === undefined) { components.reference = "relative"; } else if (components.fragment === undefined) { components.reference = "absolute"; } else { components.reference = "uri"; } //check for reference errors if (options.reference && options.reference !== "suffix" && options.reference !== components.reference) { components.error = components.error || "URI is not a " + options.reference + " reference."; } //find scheme handler const schemeHandler = SCHEMES[(options.scheme || components.scheme || "").toLowerCase()]; //check if scheme can't handle IRIs if (!options.unicodeSupport && (!schemeHandler || !schemeHandler.unicodeSupport)) { //if host component is a domain name if (components.host && (options.domainHost || (schemeHandler && schemeHandler.domainHost))) { //convert Unicode IDN -> ASCII IDN try { components.host = punycode.toASCII(components.host.replace(protocol.PCT_ENCODED, pctDecChars).toLowerCase()); } catch (e) { components.error = components.error || "Host's domain name can not be converted to ASCII via punycode: " + e; } } //convert IRI -> URI _normalizeComponentEncoding(components, URI_PROTOCOL); } else { //normalize encodings _normalizeComponentEncoding(components, protocol); } //perform scheme specific parsing if (schemeHandler && schemeHandler.parse) { schemeHandler.parse(components, options); } } else { components.error = components.error || "URI can not be parsed."; } return components; }; function _recomposeAuthority(components:URIComponents, options:URIOptions):string|undefined { const protocol = (options.iri !== false ? IRI_PROTOCOL : URI_PROTOCOL); const uriTokens:Array<string> = []; if (components.userinfo !== undefined) { uriTokens.push(components.userinfo); uriTokens.push("@"); } if (components.host !== undefined) { //normalize IP hosts, add brackets and escape zone separator for IPv6 uriTokens.push(_normalizeIPv6(_normalizeIPv4(String(components.host), protocol), protocol).replace(protocol.IPV6ADDRESS, (_, $1, $2) => "[" + $1 + ($2 ? "%25" + $2 : "") + "]")); } if (typeof components.port === "number") { uriTokens.push(":"); uriTokens.push(components.port.toString(10)); } return uriTokens.length ? uriTokens.join("") : undefined; }; const RDS1 = /^\.\.?\//; const RDS2 = /^\/\.(\/|$)/; const RDS3 = /^\/\.\.(\/|$)/; const RDS4 = /^\.\.?$/; const RDS5 = /^\/?(?:.|\n)*?(?=\/|$)/; export function removeDotSegments(input:string):string { const output:Array<string> = []; while (input.length) { if (input.match(RDS1)) { input = input.replace(RDS1, ""); } else if (input.match(RDS2)) { input = input.replace(RDS2, "/"); } else if (input.match(RDS3)) { input = input.replace(RDS3, "/"); output.pop(); } else if (input === "." || input === "..") { input = ""; } else { const im = input.match(RDS5); if (im) { const s = im[0]; input = input.slice(s.length); output.push(s); } else { throw new Error("Unexpected dot segment condition"); } } } return output.join(""); }; export function serialize(components:URIComponents, options:URIOptions = {}):string { const protocol = (options.iri ? IRI_PROTOCOL : URI_PROTOCOL); const uriTokens:Array<string> = []; //find scheme handler const schemeHandler = SCHEMES[(options.scheme || components.scheme || "").toLowerCase()]; //perform scheme specific serialization if (schemeHandler && schemeHandler.serialize) schemeHandler.serialize(components, options); if (components.host) { //if host component is an IPv6 address if (protocol.IPV6ADDRESS.test(components.host)) { //TODO: normalize IPv6 address as per RFC 5952 } //if host component is a domain name else if (options.domainHost || (schemeHandler && schemeHandler.domainHost)) { //convert IDN via punycode try { components.host = (!options.iri ? punycode.toASCII(components.host.replace(protocol.PCT_ENCODED, pctDecChars).toLowerCase()) : punycode.toUnicode(components.host)); } catch (e) { components.error = components.error || "Host's domain name can not be converted to " + (!options.iri ? "ASCII" : "Unicode") + " via punycode: " + e; } } } //normalize encoding _normalizeComponentEncoding(components, protocol); if (options.reference !== "suffix" && components.scheme) { uriTokens.push(components.scheme); uriTokens.push(":"); } const authority = _recomposeAuthority(components, options); if (authority !== undefined) { if (options.reference !== "suffix") { uriTokens.push("//"); } uriTokens.push(authority); if (components.path && components.path.charAt(0) !== "/") { uriTokens.push("/"); } } if (components.path !== undefined) { let s = components.path; if (!options.absolutePath && (!schemeHandler || !schemeHandler.absolutePath)) { s = removeDotSegments(s); } if (authority === undefined) { s = s.replace(/^\/\//, "/%2F"); //don't allow the path to start with "//" } uriTokens.push(s); } if (components.query !== undefined) { uriTokens.push("?"); uriTokens.push(components.query); } if (components.fragment !== undefined) { uriTokens.push("#"); uriTokens.push(components.fragment); } return uriTokens.join(""); //merge tokens into a string }; export function resolveComponents(base:URIComponents, relative:URIComponents, options:URIOptions = {}, skipNormalization?:boolean):URIComponents { const target:URIComponents = {}; if (!skipNormalization) { base = parse(serialize(base, options), options); //normalize base components relative = parse(serialize(relative, options), options); //normalize relative components } options = options || {}; if (!options.tolerant && relative.scheme) { target.scheme = relative.scheme; //target.authority = relative.authority; target.userinfo = relative.userinfo; target.host = relative.host; target.port = relative.port; target.path = removeDotSegments(relative.path || ""); target.query = relative.query; } else { if (relative.userinfo !== undefined || relative.host !== undefined || relative.port !== undefined) { //target.authority = relative.authority; target.userinfo = relative.userinfo; target.host = relative.host; target.port = relative.port; target.path = removeDotSegments(relative.path || ""); target.query = relative.query; } else { if (!relative.path) { target.path = base.path; if (relative.query !== undefined) { target.query = relative.query; } else { target.query = base.query; } } else { if (relative.path.charAt(0) === "/") { target.path = removeDotSegments(relative.path); } else { if ((base.userinfo !== undefined || base.host !== undefined || base.port !== undefined) && !base.path) { target.path = "/" + relative.path; } else if (!base.path) { target.path = relative.path; } else { target.path = base.path.slice(0, base.path.lastIndexOf("/") + 1) + relative.path; } target.path = removeDotSegments(target.path); } target.query = relative.query; } //target.authority = base.authority; target.userinfo = base.userinfo; target.host = base.host; target.port = base.port; } target.scheme = base.scheme; } target.fragment = relative.fragment; return target; }; export function resolve(baseURI:string, relativeURI:string, options?:URIOptions):string { const schemelessOptions = assign({ scheme : 'null' }, options); return serialize(resolveComponents(parse(baseURI, schemelessOptions), parse(relativeURI, schemelessOptions), schemelessOptions, true), schemelessOptions); }; export function normalize(uri:string, options?:URIOptions):string; export function normalize(uri:URIComponents, options?:URIOptions):URIComponents; export function normalize(uri:any, options?:URIOptions):any { if (typeof uri === "string") { uri = serialize(parse(uri, options), options); } else if (typeOf(uri) === "object") { uri = parse(serialize(<URIComponents>uri, options), options); } return uri; }; export function equal(uriA:string, uriB:string, options?: URIOptions):boolean; export function equal(uriA:URIComponents, uriB:URIComponents, options?:URIOptions):boolean; export function equal(uriA:any, uriB:any, options?:URIOptions):boolean { if (typeof uriA === "string") { uriA = serialize(parse(uriA, options), options); } else if (typeOf(uriA) === "object") { uriA = serialize(<URIComponents>uriA, options); } if (typeof uriB === "string") { uriB = serialize(parse(uriB, options), options); } else if (typeOf(uriB) === "object") { uriB = serialize(<URIComponents>uriB, options); } return uriA === uriB; }; export function escapeComponent(str:string, options?:URIOptions):string { return str && str.toString().replace((!options || !options.iri ? URI_PROTOCOL.ESCAPE : IRI_PROTOCOL.ESCAPE), pctEncChar); }; export function unescapeComponent(str:string, options?:URIOptions):string { return str && str.toString().replace((!options || !options.iri ? URI_PROTOCOL.PCT_ENCODED : IRI_PROTOCOL.PCT_ENCODED), pctDecChars); };
zhihu-crawler
/zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/uri-js/src/uri.ts
uri.ts
import { URIRegExps } from "uri-js/src/uri"; import { merge, subexp } from "uri-js/src/util"; export function buildExps(isIRI:boolean):URIRegExps { const ALPHA$$ = "[A-Za-z]", CR$ = "[\\x0D]", DIGIT$$ = "[0-9]", DQUOTE$$ = "[\\x22]", HEXDIG$$ = merge(DIGIT$$, "[A-Fa-f]"), //case-insensitive LF$$ = "[\\x0A]", SP$$ = "[\\x20]", PCT_ENCODED$ = subexp(subexp("%[EFef]" + HEXDIG$$ + "%" + HEXDIG$$ + HEXDIG$$ + "%" + HEXDIG$$ + HEXDIG$$) + "|" + subexp("%[89A-Fa-f]" + HEXDIG$$ + "%" + HEXDIG$$ + HEXDIG$$) + "|" + subexp("%" + HEXDIG$$ + HEXDIG$$)), //expanded GEN_DELIMS$$ = "[\\:\\/\\?\\#\\[\\]\\@]", SUB_DELIMS$$ = "[\\!\\$\\&\\'\\(\\)\\*\\+\\,\\;\\=]", RESERVED$$ = merge(GEN_DELIMS$$, SUB_DELIMS$$), UCSCHAR$$ = isIRI ? "[\\xA0-\\u200D\\u2010-\\u2029\\u202F-\\uD7FF\\uF900-\\uFDCF\\uFDF0-\\uFFEF]" : "[]", //subset, excludes bidi control characters IPRIVATE$$ = isIRI ? "[\\uE000-\\uF8FF]" : "[]", //subset UNRESERVED$$ = merge(ALPHA$$, DIGIT$$, "[\\-\\.\\_\\~]", UCSCHAR$$), SCHEME$ = subexp(ALPHA$$ + merge(ALPHA$$, DIGIT$$, "[\\+\\-\\.]") + "*"), USERINFO$ = subexp(subexp(PCT_ENCODED$ + "|" + merge(UNRESERVED$$, SUB_DELIMS$$, "[\\:]")) + "*"), DEC_OCTET$ = subexp(subexp("25[0-5]") + "|" + subexp("2[0-4]" + DIGIT$$) + "|" + subexp("1" + DIGIT$$ + DIGIT$$) + "|" + subexp("[1-9]" + DIGIT$$) + "|" + DIGIT$$), DEC_OCTET_RELAXED$ = subexp(subexp("25[0-5]") + "|" + subexp("2[0-4]" + DIGIT$$) + "|" + subexp("1" + DIGIT$$ + DIGIT$$) + "|" + subexp("0?[1-9]" + DIGIT$$) + "|0?0?" + DIGIT$$), //relaxed parsing rules IPV4ADDRESS$ = subexp(DEC_OCTET_RELAXED$ + "\\." + DEC_OCTET_RELAXED$ + "\\." + DEC_OCTET_RELAXED$ + "\\." + DEC_OCTET_RELAXED$), H16$ = subexp(HEXDIG$$ + "{1,4}"), LS32$ = subexp(subexp(H16$ + "\\:" + H16$) + "|" + IPV4ADDRESS$), IPV6ADDRESS1$ = subexp( subexp(H16$ + "\\:") + "{6}" + LS32$), // 6( h16 ":" ) ls32 IPV6ADDRESS2$ = subexp( "\\:\\:" + subexp(H16$ + "\\:") + "{5}" + LS32$), // "::" 5( h16 ":" ) ls32 IPV6ADDRESS3$ = subexp(subexp( H16$) + "?\\:\\:" + subexp(H16$ + "\\:") + "{4}" + LS32$), //[ h16 ] "::" 4( h16 ":" ) ls32 IPV6ADDRESS4$ = subexp(subexp(subexp(H16$ + "\\:") + "{0,1}" + H16$) + "?\\:\\:" + subexp(H16$ + "\\:") + "{3}" + LS32$), //[ *1( h16 ":" ) h16 ] "::" 3( h16 ":" ) ls32 IPV6ADDRESS5$ = subexp(subexp(subexp(H16$ + "\\:") + "{0,2}" + H16$) + "?\\:\\:" + subexp(H16$ + "\\:") + "{2}" + LS32$), //[ *2( h16 ":" ) h16 ] "::" 2( h16 ":" ) ls32 IPV6ADDRESS6$ = subexp(subexp(subexp(H16$ + "\\:") + "{0,3}" + H16$) + "?\\:\\:" + H16$ + "\\:" + LS32$), //[ *3( h16 ":" ) h16 ] "::" h16 ":" ls32 IPV6ADDRESS7$ = subexp(subexp(subexp(H16$ + "\\:") + "{0,4}" + H16$) + "?\\:\\:" + LS32$), //[ *4( h16 ":" ) h16 ] "::" ls32 IPV6ADDRESS8$ = subexp(subexp(subexp(H16$ + "\\:") + "{0,5}" + H16$) + "?\\:\\:" + H16$ ), //[ *5( h16 ":" ) h16 ] "::" h16 IPV6ADDRESS9$ = subexp(subexp(subexp(H16$ + "\\:") + "{0,6}" + H16$) + "?\\:\\:" ), //[ *6( h16 ":" ) h16 ] "::" IPV6ADDRESS$ = subexp([IPV6ADDRESS1$, IPV6ADDRESS2$, IPV6ADDRESS3$, IPV6ADDRESS4$, IPV6ADDRESS5$, IPV6ADDRESS6$, IPV6ADDRESS7$, IPV6ADDRESS8$, IPV6ADDRESS9$].join("|")), ZONEID$ = subexp(subexp(UNRESERVED$$ + "|" + PCT_ENCODED$) + "+"), //RFC 6874 IPV6ADDRZ$ = subexp(IPV6ADDRESS$ + "\\%25" + ZONEID$), //RFC 6874 IPV6ADDRZ_RELAXED$ = subexp(IPV6ADDRESS$ + subexp("\\%25|\\%(?!" + HEXDIG$$ + "{2})") + ZONEID$), //RFC 6874, with relaxed parsing rules IPVFUTURE$ = subexp("[vV]" + HEXDIG$$ + "+\\." + merge(UNRESERVED$$, SUB_DELIMS$$, "[\\:]") + "+"), IP_LITERAL$ = subexp("\\[" + subexp(IPV6ADDRZ_RELAXED$ + "|" + IPV6ADDRESS$ + "|" + IPVFUTURE$) + "\\]"), //RFC 6874 REG_NAME$ = subexp(subexp(PCT_ENCODED$ + "|" + merge(UNRESERVED$$, SUB_DELIMS$$)) + "*"), HOST$ = subexp(IP_LITERAL$ + "|" + IPV4ADDRESS$ + "(?!" + REG_NAME$ + ")" + "|" + REG_NAME$), PORT$ = subexp(DIGIT$$ + "*"), AUTHORITY$ = subexp(subexp(USERINFO$ + "@") + "?" + HOST$ + subexp("\\:" + PORT$) + "?"), PCHAR$ = subexp(PCT_ENCODED$ + "|" + merge(UNRESERVED$$, SUB_DELIMS$$, "[\\:\\@]")), SEGMENT$ = subexp(PCHAR$ + "*"), SEGMENT_NZ$ = subexp(PCHAR$ + "+"), SEGMENT_NZ_NC$ = subexp(subexp(PCT_ENCODED$ + "|" + merge(UNRESERVED$$, SUB_DELIMS$$, "[\\@]")) + "+"), PATH_ABEMPTY$ = subexp(subexp("\\/" + SEGMENT$) + "*"), PATH_ABSOLUTE$ = subexp("\\/" + subexp(SEGMENT_NZ$ + PATH_ABEMPTY$) + "?"), //simplified PATH_NOSCHEME$ = subexp(SEGMENT_NZ_NC$ + PATH_ABEMPTY$), //simplified PATH_ROOTLESS$ = subexp(SEGMENT_NZ$ + PATH_ABEMPTY$), //simplified PATH_EMPTY$ = "(?!" + PCHAR$ + ")", PATH$ = subexp(PATH_ABEMPTY$ + "|" + PATH_ABSOLUTE$ + "|" + PATH_NOSCHEME$ + "|" + PATH_ROOTLESS$ + "|" + PATH_EMPTY$), QUERY$ = subexp(subexp(PCHAR$ + "|" + merge("[\\/\\?]", IPRIVATE$$)) + "*"), FRAGMENT$ = subexp(subexp(PCHAR$ + "|[\\/\\?]") + "*"), HIER_PART$ = subexp(subexp("\\/\\/" + AUTHORITY$ + PATH_ABEMPTY$) + "|" + PATH_ABSOLUTE$ + "|" + PATH_ROOTLESS$ + "|" + PATH_EMPTY$), URI$ = subexp(SCHEME$ + "\\:" + HIER_PART$ + subexp("\\?" + QUERY$) + "?" + subexp("\\#" + FRAGMENT$) + "?"), RELATIVE_PART$ = subexp(subexp("\\/\\/" + AUTHORITY$ + PATH_ABEMPTY$) + "|" + PATH_ABSOLUTE$ + "|" + PATH_NOSCHEME$ + "|" + PATH_EMPTY$), RELATIVE$ = subexp(RELATIVE_PART$ + subexp("\\?" + QUERY$) + "?" + subexp("\\#" + FRAGMENT$) + "?"), URI_REFERENCE$ = subexp(URI$ + "|" + RELATIVE$), ABSOLUTE_URI$ = subexp(SCHEME$ + "\\:" + HIER_PART$ + subexp("\\?" + QUERY$) + "?"), GENERIC_REF$ = "^(" + SCHEME$ + ")\\:" + subexp(subexp("\\/\\/(" + subexp("(" + USERINFO$ + ")@") + "?(" + HOST$ + ")" + subexp("\\:(" + PORT$ + ")") + "?)") + "?(" + PATH_ABEMPTY$ + "|" + PATH_ABSOLUTE$ + "|" + PATH_ROOTLESS$ + "|" + PATH_EMPTY$ + ")") + subexp("\\?(" + QUERY$ + ")") + "?" + subexp("\\#(" + FRAGMENT$ + ")") + "?$", RELATIVE_REF$ = "^(){0}" + subexp(subexp("\\/\\/(" + subexp("(" + USERINFO$ + ")@") + "?(" + HOST$ + ")" + subexp("\\:(" + PORT$ + ")") + "?)") + "?(" + PATH_ABEMPTY$ + "|" + PATH_ABSOLUTE$ + "|" + PATH_NOSCHEME$ + "|" + PATH_EMPTY$ + ")") + subexp("\\?(" + QUERY$ + ")") + "?" + subexp("\\#(" + FRAGMENT$ + ")") + "?$", ABSOLUTE_REF$ = "^(" + SCHEME$ + ")\\:" + subexp(subexp("\\/\\/(" + subexp("(" + USERINFO$ + ")@") + "?(" + HOST$ + ")" + subexp("\\:(" + PORT$ + ")") + "?)") + "?(" + PATH_ABEMPTY$ + "|" + PATH_ABSOLUTE$ + "|" + PATH_ROOTLESS$ + "|" + PATH_EMPTY$ + ")") + subexp("\\?(" + QUERY$ + ")") + "?$", SAMEDOC_REF$ = "^" + subexp("\\#(" + FRAGMENT$ + ")") + "?$", AUTHORITY_REF$ = "^" + subexp("(" + USERINFO$ + ")@") + "?(" + HOST$ + ")" + subexp("\\:(" + PORT$ + ")") + "?$" ; return { NOT_SCHEME : new RegExp(merge("[^]", ALPHA$$, DIGIT$$, "[\\+\\-\\.]"), "g"), NOT_USERINFO : new RegExp(merge("[^\\%\\:]", UNRESERVED$$, SUB_DELIMS$$), "g"), NOT_HOST : new RegExp(merge("[^\\%\\[\\]\\:]", UNRESERVED$$, SUB_DELIMS$$), "g"), NOT_PATH : new RegExp(merge("[^\\%\\/\\:\\@]", UNRESERVED$$, SUB_DELIMS$$), "g"), NOT_PATH_NOSCHEME : new RegExp(merge("[^\\%\\/\\@]", UNRESERVED$$, SUB_DELIMS$$), "g"), NOT_QUERY : new RegExp(merge("[^\\%]", UNRESERVED$$, SUB_DELIMS$$, "[\\:\\@\\/\\?]", IPRIVATE$$), "g"), NOT_FRAGMENT : new RegExp(merge("[^\\%]", UNRESERVED$$, SUB_DELIMS$$, "[\\:\\@\\/\\?]"), "g"), ESCAPE : new RegExp(merge("[^]", UNRESERVED$$, SUB_DELIMS$$), "g"), UNRESERVED : new RegExp(UNRESERVED$$, "g"), OTHER_CHARS : new RegExp(merge("[^\\%]", UNRESERVED$$, RESERVED$$), "g"), PCT_ENCODED : new RegExp(PCT_ENCODED$, "g"), IPV4ADDRESS : new RegExp("^(" + IPV4ADDRESS$ + ")$"), IPV6ADDRESS : new RegExp("^\\[?(" + IPV6ADDRESS$ + ")" + subexp(subexp("\\%25|\\%(?!" + HEXDIG$$ + "{2})") + "(" + ZONEID$ + ")") + "?\\]?$") //RFC 6874, with relaxed parsing rules }; } export default buildExps(false);
zhihu-crawler
/zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/uri-js/src/regexps-uri.ts
regexps-uri.ts
import { URISchemeHandler, URIComponents, URIOptions } from "uri-js/src/uri"; import { pctEncChar, pctDecChars, unescapeComponent } from "uri-js/src/uri"; import punycode from "punycode"; import { merge, subexp, toUpperCase, toArray } from "uri-js/src/util"; export interface MailtoHeaders { [hfname:string]:string } export interface MailtoComponents extends URIComponents { to:Array<string>, headers?:MailtoHeaders, subject?:string, body?:string } const O:MailtoHeaders = {}; const isIRI = true; //RFC 3986 const UNRESERVED$$ = "[A-Za-z0-9\\-\\.\\_\\~" + (isIRI ? "\\xA0-\\u200D\\u2010-\\u2029\\u202F-\\uD7FF\\uF900-\\uFDCF\\uFDF0-\\uFFEF" : "") + "]"; const HEXDIG$$ = "[0-9A-Fa-f]"; //case-insensitive const PCT_ENCODED$ = subexp(subexp("%[EFef]" + HEXDIG$$ + "%" + HEXDIG$$ + HEXDIG$$ + "%" + HEXDIG$$ + HEXDIG$$) + "|" + subexp("%[89A-Fa-f]" + HEXDIG$$ + "%" + HEXDIG$$ + HEXDIG$$) + "|" + subexp("%" + HEXDIG$$ + HEXDIG$$)); //expanded //RFC 5322, except these symbols as per RFC 6068: @ : / ? # [ ] & ; = //const ATEXT$$ = "[A-Za-z0-9\\!\\#\\$\\%\\&\\'\\*\\+\\-\\/\\=\\?\\^\\_\\`\\{\\|\\}\\~]"; //const WSP$$ = "[\\x20\\x09]"; //const OBS_QTEXT$$ = "[\\x01-\\x08\\x0B\\x0C\\x0E-\\x1F\\x7F]"; //(%d1-8 / %d11-12 / %d14-31 / %d127) //const QTEXT$$ = merge("[\\x21\\x23-\\x5B\\x5D-\\x7E]", OBS_QTEXT$$); //%d33 / %d35-91 / %d93-126 / obs-qtext //const VCHAR$$ = "[\\x21-\\x7E]"; //const WSP$$ = "[\\x20\\x09]"; //const OBS_QP$ = subexp("\\\\" + merge("[\\x00\\x0D\\x0A]", OBS_QTEXT$$)); //%d0 / CR / LF / obs-qtext //const FWS$ = subexp(subexp(WSP$$ + "*" + "\\x0D\\x0A") + "?" + WSP$$ + "+"); //const QUOTED_PAIR$ = subexp(subexp("\\\\" + subexp(VCHAR$$ + "|" + WSP$$)) + "|" + OBS_QP$); //const QUOTED_STRING$ = subexp('\\"' + subexp(FWS$ + "?" + QCONTENT$) + "*" + FWS$ + "?" + '\\"'); const ATEXT$$ = "[A-Za-z0-9\\!\\$\\%\\'\\*\\+\\-\\^\\_\\`\\{\\|\\}\\~]"; const QTEXT$$ = "[\\!\\$\\%\\'\\(\\)\\*\\+\\,\\-\\.0-9\\<\\>A-Z\\x5E-\\x7E]"; const VCHAR$$ = merge(QTEXT$$, "[\\\"\\\\]"); const DOT_ATOM_TEXT$ = subexp(ATEXT$$ + "+" + subexp("\\." + ATEXT$$ + "+") + "*"); const QUOTED_PAIR$ = subexp("\\\\" + VCHAR$$); const QCONTENT$ = subexp(QTEXT$$ + "|" + QUOTED_PAIR$); const QUOTED_STRING$ = subexp('\\"' + QCONTENT$ + "*" + '\\"'); //RFC 6068 const DTEXT_NO_OBS$$ = "[\\x21-\\x5A\\x5E-\\x7E]"; //%d33-90 / %d94-126 const SOME_DELIMS$$ = "[\\!\\$\\'\\(\\)\\*\\+\\,\\;\\:\\@]"; const QCHAR$ = subexp(UNRESERVED$$ + "|" + PCT_ENCODED$ + "|" + SOME_DELIMS$$); const DOMAIN$ = subexp(DOT_ATOM_TEXT$ + "|" + "\\[" + DTEXT_NO_OBS$$ + "*" + "\\]"); const LOCAL_PART$ = subexp(DOT_ATOM_TEXT$ + "|" + QUOTED_STRING$); const ADDR_SPEC$ = subexp(LOCAL_PART$ + "\\@" + DOMAIN$); const TO$ = subexp(ADDR_SPEC$ + subexp("\\," + ADDR_SPEC$) + "*"); const HFNAME$ = subexp(QCHAR$ + "*"); const HFVALUE$ = HFNAME$; const HFIELD$ = subexp(HFNAME$ + "\\=" + HFVALUE$); const HFIELDS2$ = subexp(HFIELD$ + subexp("\\&" + HFIELD$) + "*"); const HFIELDS$ = subexp("\\?" + HFIELDS2$); const MAILTO_URI = new RegExp("^mailto\\:" + TO$ + "?" + HFIELDS$ + "?$"); const UNRESERVED = new RegExp(UNRESERVED$$, "g"); const PCT_ENCODED = new RegExp(PCT_ENCODED$, "g"); const NOT_LOCAL_PART = new RegExp(merge("[^]", ATEXT$$, "[\\.]", '[\\"]', VCHAR$$), "g"); const NOT_DOMAIN = new RegExp(merge("[^]", ATEXT$$, "[\\.]", "[\\[]", DTEXT_NO_OBS$$, "[\\]]"), "g"); const NOT_HFNAME = new RegExp(merge("[^]", UNRESERVED$$, SOME_DELIMS$$), "g"); const NOT_HFVALUE = NOT_HFNAME; const TO = new RegExp("^" + TO$ + "$"); const HFIELDS = new RegExp("^" + HFIELDS2$ + "$"); function decodeUnreserved(str:string):string { const decStr = pctDecChars(str); return (!decStr.match(UNRESERVED) ? str : decStr); } const handler:URISchemeHandler<MailtoComponents> = { scheme : "mailto", parse : function (components:URIComponents, options:URIOptions):MailtoComponents { const mailtoComponents = components as MailtoComponents; const to = mailtoComponents.to = (mailtoComponents.path ? mailtoComponents.path.split(",") : []); mailtoComponents.path = undefined; if (mailtoComponents.query) { let unknownHeaders = false const headers:MailtoHeaders = {}; const hfields = mailtoComponents.query.split("&"); for (let x = 0, xl = hfields.length; x < xl; ++x) { const hfield = hfields[x].split("="); switch (hfield[0]) { case "to": const toAddrs = hfield[1].split(","); for (let x = 0, xl = toAddrs.length; x < xl; ++x) { to.push(toAddrs[x]); } break; case "subject": mailtoComponents.subject = unescapeComponent(hfield[1], options); break; case "body": mailtoComponents.body = unescapeComponent(hfield[1], options); break; default: unknownHeaders = true; headers[unescapeComponent(hfield[0], options)] = unescapeComponent(hfield[1], options); break; } } if (unknownHeaders) mailtoComponents.headers = headers; } mailtoComponents.query = undefined; for (let x = 0, xl = to.length; x < xl; ++x) { const addr = to[x].split("@"); addr[0] = unescapeComponent(addr[0]); if (!options.unicodeSupport) { //convert Unicode IDN -> ASCII IDN try { addr[1] = punycode.toASCII(unescapeComponent(addr[1], options).toLowerCase()); } catch (e) { mailtoComponents.error = mailtoComponents.error || "Email address's domain name can not be converted to ASCII via punycode: " + e; } } else { addr[1] = unescapeComponent(addr[1], options).toLowerCase(); } to[x] = addr.join("@"); } return mailtoComponents; }, serialize : function (mailtoComponents:MailtoComponents, options:URIOptions):URIComponents { const components = mailtoComponents as URIComponents; const to = toArray(mailtoComponents.to); if (to) { for (let x = 0, xl = to.length; x < xl; ++x) { const toAddr = String(to[x]); const atIdx = toAddr.lastIndexOf("@"); const localPart = (toAddr.slice(0, atIdx)).replace(PCT_ENCODED, decodeUnreserved).replace(PCT_ENCODED, toUpperCase).replace(NOT_LOCAL_PART, pctEncChar); let domain = toAddr.slice(atIdx + 1); //convert IDN via punycode try { domain = (!options.iri ? punycode.toASCII(unescapeComponent(domain, options).toLowerCase()) : punycode.toUnicode(domain)); } catch (e) { components.error = components.error || "Email address's domain name can not be converted to " + (!options.iri ? "ASCII" : "Unicode") + " via punycode: " + e; } to[x] = localPart + "@" + domain; } components.path = to.join(","); } const headers = mailtoComponents.headers = mailtoComponents.headers || {}; if (mailtoComponents.subject) headers["subject"] = mailtoComponents.subject; if (mailtoComponents.body) headers["body"] = mailtoComponents.body; const fields = []; for (const name in headers) { if (headers[name] !== O[name]) { fields.push( name.replace(PCT_ENCODED, decodeUnreserved).replace(PCT_ENCODED, toUpperCase).replace(NOT_HFNAME, pctEncChar) + "=" + headers[name].replace(PCT_ENCODED, decodeUnreserved).replace(PCT_ENCODED, toUpperCase).replace(NOT_HFVALUE, pctEncChar) ); } } if (fields.length) { components.query = fields.join("&"); } return components; } } export default handler;
zhihu-crawler
/zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/uri-js/src/schemes/mailto.ts
mailto.ts
import { URISchemeHandler, URIComponents, URIOptions } from "uri-js/src/uri"; import { pctEncChar, SCHEMES } from "uri-js/src/uri"; export interface URNComponents extends URIComponents { nid?:string; nss?:string; } export interface URNOptions extends URIOptions { nid?:string; } const NID$ = "(?:[0-9A-Za-z][0-9A-Za-z\\-]{1,31})"; const PCT_ENCODED$ = "(?:\\%[0-9A-Fa-f]{2})"; const TRANS$$ = "[0-9A-Za-z\\(\\)\\+\\,\\-\\.\\:\\=\\@\\;\\$\\_\\!\\*\\'\\/\\?\\#]"; const NSS$ = "(?:(?:" + PCT_ENCODED$ + "|" + TRANS$$ + ")+)"; const URN_SCHEME = new RegExp("^urn\\:(" + NID$ + ")$"); const URN_PATH = new RegExp("^(" + NID$ + ")\\:(" + NSS$ + ")$"); const URN_PARSE = /^([^\:]+)\:(.*)/; const URN_EXCLUDED = /[\x00-\x20\\\"\&\<\>\[\]\^\`\{\|\}\~\x7F-\xFF]/g; //RFC 2141 const handler:URISchemeHandler<URNComponents,URNOptions> = { scheme : "urn", parse : function (components:URIComponents, options:URNOptions):URNComponents { const matches = components.path && components.path.match(URN_PARSE); let urnComponents = components as URNComponents; if (matches) { const scheme = options.scheme || urnComponents.scheme || "urn"; const nid = matches[1].toLowerCase(); const nss = matches[2]; const urnScheme = `${scheme}:${options.nid || nid}`; const schemeHandler = SCHEMES[urnScheme]; urnComponents.nid = nid; urnComponents.nss = nss; urnComponents.path = undefined; if (schemeHandler) { urnComponents = schemeHandler.parse(urnComponents, options) as URNComponents; } } else { urnComponents.error = urnComponents.error || "URN can not be parsed."; } return urnComponents; }, serialize : function (urnComponents:URNComponents, options:URNOptions):URIComponents { const scheme = options.scheme || urnComponents.scheme || "urn"; const nid = urnComponents.nid; const urnScheme = `${scheme}:${options.nid || nid}`; const schemeHandler = SCHEMES[urnScheme]; if (schemeHandler) { urnComponents = schemeHandler.serialize(urnComponents, options) as URNComponents; } const uriComponents = urnComponents as URIComponents; const nss = urnComponents.nss; uriComponents.path = `${nid || options.nid}:${nss}`; return uriComponents; }, }; export default handler;
zhihu-crawler
/zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/uri-js/src/schemes/urn.ts
urn.ts
export interface URIComponents { scheme?: string; userinfo?: string; host?: string; port?: number | string; path?: string; query?: string; fragment?: string; reference?: string; error?: string; } export interface URIOptions { scheme?: string; reference?: string; tolerant?: boolean; absolutePath?: boolean; iri?: boolean; unicodeSupport?: boolean; domainHost?: boolean; } export interface URISchemeHandler<Components extends URIComponents = URIComponents, Options extends URIOptions = URIOptions, ParentComponents extends URIComponents = URIComponents> { scheme: string; parse(components: ParentComponents, options: Options): Components; serialize(components: Components, options: Options): ParentComponents; unicodeSupport?: boolean; domainHost?: boolean; absolutePath?: boolean; } export interface URIRegExps { NOT_SCHEME: RegExp; NOT_USERINFO: RegExp; NOT_HOST: RegExp; NOT_PATH: RegExp; NOT_PATH_NOSCHEME: RegExp; NOT_QUERY: RegExp; NOT_FRAGMENT: RegExp; ESCAPE: RegExp; UNRESERVED: RegExp; OTHER_CHARS: RegExp; PCT_ENCODED: RegExp; IPV4ADDRESS: RegExp; IPV6ADDRESS: RegExp; } export declare const SCHEMES: { [scheme: string]: URISchemeHandler; }; export declare function pctEncChar(chr: string): string; export declare function pctDecChars(str: string): string; export declare function parse(uriString: string, options?: URIOptions): URIComponents; export declare function removeDotSegments(input: string): string; export declare function serialize(components: URIComponents, options?: URIOptions): string; export declare function resolveComponents(base: URIComponents, relative: URIComponents, options?: URIOptions, skipNormalization?: boolean): URIComponents; export declare function resolve(baseURI: string, relativeURI: string, options?: URIOptions): string; export declare function normalize(uri: string, options?: URIOptions): string; export declare function normalize(uri: URIComponents, options?: URIOptions): URIComponents; export declare function equal(uriA: string, uriB: string, options?: URIOptions): boolean; export declare function equal(uriA: URIComponents, uriB: URIComponents, options?: URIOptions): boolean; export declare function escapeComponent(str: string, options?: URIOptions): string; export declare function unescapeComponent(str: string, options?: URIOptions): string;
zhihu-crawler
/zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/uri-js/dist/es5/uri.all.d.ts
uri.all.d.ts
!function(e,r){"object"==typeof exports&&"undefined"!=typeof module?r(exports):"function"==typeof define&&define.amd?define(["exports"],r):r(e.URI=e.URI||{})}(this,function(e){"use strict";function r(){for(var e=arguments.length,r=Array(e),n=0;n<e;n++)r[n]=arguments[n];if(r.length>1){r[0]=r[0].slice(0,-1);for(var t=r.length-1,o=1;o<t;++o)r[o]=r[o].slice(1,-1);return r[t]=r[t].slice(1),r.join("")}return r[0]}function n(e){return"(?:"+e+")"}function t(e){return e===undefined?"undefined":null===e?"null":Object.prototype.toString.call(e).split(" ").pop().split("]").shift().toLowerCase()}function o(e){return e.toUpperCase()}function a(e){return e!==undefined&&null!==e?e instanceof Array?e:"number"!=typeof e.length||e.split||e.setInterval||e.call?[e]:Array.prototype.slice.call(e):[]}function i(e,r){var n=e;if(r)for(var t in r)n[t]=r[t];return n}function u(e){var t=r("[0-9]","[A-Fa-f]"),o=n(n("%[EFef]"+t+"%"+t+t+"%"+t+t)+"|"+n("%[89A-Fa-f]"+t+"%"+t+t)+"|"+n("%"+t+t)),a="[\\!\\$\\&\\'\\(\\)\\*\\+\\,\\;\\=]",i=r("[\\:\\/\\?\\#\\[\\]\\@]",a),u=e?"[\\xA0-\\u200D\\u2010-\\u2029\\u202F-\\uD7FF\\uF900-\\uFDCF\\uFDF0-\\uFFEF]":"[]",s=e?"[\\uE000-\\uF8FF]":"[]",f=r("[A-Za-z]","[0-9]","[\\-\\.\\_\\~]",u),c=n(n("25[0-5]")+"|"+n("2[0-4][0-9]")+"|"+n("1[0-9][0-9]")+"|"+n("0?[1-9][0-9]")+"|0?0?[0-9]"),p=n(c+"\\."+c+"\\."+c+"\\."+c),h=n(t+"{1,4}"),d=n(n(h+"\\:"+h)+"|"+p),l=n(n(h+"\\:")+"{6}"+d),g=n("\\:\\:"+n(h+"\\:")+"{5}"+d),v=n(n(h)+"?\\:\\:"+n(h+"\\:")+"{4}"+d),m=n(n(n(h+"\\:")+"{0,1}"+h)+"?\\:\\:"+n(h+"\\:")+"{3}"+d),E=n(n(n(h+"\\:")+"{0,2}"+h)+"?\\:\\:"+n(h+"\\:")+"{2}"+d),C=n(n(n(h+"\\:")+"{0,3}"+h)+"?\\:\\:"+h+"\\:"+d),y=n(n(n(h+"\\:")+"{0,4}"+h)+"?\\:\\:"+d),S=n(n(n(h+"\\:")+"{0,5}"+h)+"?\\:\\:"+h),A=n(n(n(h+"\\:")+"{0,6}"+h)+"?\\:\\:"),D=n([l,g,v,m,E,C,y,S,A].join("|")),w=n(n(f+"|"+o)+"+");return{NOT_SCHEME:new RegExp(r("[^]","[A-Za-z]","[0-9]","[\\+\\-\\.]"),"g"),NOT_USERINFO:new RegExp(r("[^\\%\\:]",f,a),"g"),NOT_HOST:new RegExp(r("[^\\%\\[\\]\\:]",f,a),"g"),NOT_PATH:new RegExp(r("[^\\%\\/\\:\\@]",f,a),"g"),NOT_PATH_NOSCHEME:new RegExp(r("[^\\%\\/\\@]",f,a),"g"),NOT_QUERY:new RegExp(r("[^\\%]",f,a,"[\\:\\@\\/\\?]",s),"g"),NOT_FRAGMENT:new RegExp(r("[^\\%]",f,a,"[\\:\\@\\/\\?]"),"g"),ESCAPE:new RegExp(r("[^]",f,a),"g"),UNRESERVED:new RegExp(f,"g"),OTHER_CHARS:new RegExp(r("[^\\%]",f,i),"g"),PCT_ENCODED:new RegExp(o,"g"),IPV4ADDRESS:new RegExp("^("+p+")$"),IPV6ADDRESS:new RegExp("^\\[?("+D+")"+n(n("\\%25|\\%(?!"+t+"{2})")+"("+w+")")+"?\\]?$")}}function s(e){throw new RangeError(q[e])}function f(e,r){for(var n=[],t=e.length;t--;)n[t]=r(e[t]);return n}function c(e,r){var n=e.split("@"),t="";return n.length>1&&(t=n[0]+"@",e=n[1]),e=e.replace(j,"."),t+f(e.split("."),r).join(".")}function p(e){for(var r=[],n=0,t=e.length;n<t;){var o=e.charCodeAt(n++);if(o>=55296&&o<=56319&&n<t){var a=e.charCodeAt(n++);56320==(64512&a)?r.push(((1023&o)<<10)+(1023&a)+65536):(r.push(o),n--)}else r.push(o)}return r}function h(e){var r=e.charCodeAt(0);return r<16?"%0"+r.toString(16).toUpperCase():r<128?"%"+r.toString(16).toUpperCase():r<2048?"%"+(r>>6|192).toString(16).toUpperCase()+"%"+(63&r|128).toString(16).toUpperCase():"%"+(r>>12|224).toString(16).toUpperCase()+"%"+(r>>6&63|128).toString(16).toUpperCase()+"%"+(63&r|128).toString(16).toUpperCase()}function d(e){for(var r="",n=0,t=e.length;n<t;){var o=parseInt(e.substr(n+1,2),16);if(o<128)r+=String.fromCharCode(o),n+=3;else if(o>=194&&o<224){if(t-n>=6){var a=parseInt(e.substr(n+4,2),16);r+=String.fromCharCode((31&o)<<6|63&a)}else r+=e.substr(n,6);n+=6}else if(o>=224){if(t-n>=9){var i=parseInt(e.substr(n+4,2),16),u=parseInt(e.substr(n+7,2),16);r+=String.fromCharCode((15&o)<<12|(63&i)<<6|63&u)}else r+=e.substr(n,9);n+=9}else r+=e.substr(n,3),n+=3}return r}function l(e,r){function n(e){var n=d(e);return n.match(r.UNRESERVED)?n:e}return e.scheme&&(e.scheme=String(e.scheme).replace(r.PCT_ENCODED,n).toLowerCase().replace(r.NOT_SCHEME,"")),e.userinfo!==undefined&&(e.userinfo=String(e.userinfo).replace(r.PCT_ENCODED,n).replace(r.NOT_USERINFO,h).replace(r.PCT_ENCODED,o)),e.host!==undefined&&(e.host=String(e.host).replace(r.PCT_ENCODED,n).toLowerCase().replace(r.NOT_HOST,h).replace(r.PCT_ENCODED,o)),e.path!==undefined&&(e.path=String(e.path).replace(r.PCT_ENCODED,n).replace(e.scheme?r.NOT_PATH:r.NOT_PATH_NOSCHEME,h).replace(r.PCT_ENCODED,o)),e.query!==undefined&&(e.query=String(e.query).replace(r.PCT_ENCODED,n).replace(r.NOT_QUERY,h).replace(r.PCT_ENCODED,o)),e.fragment!==undefined&&(e.fragment=String(e.fragment).replace(r.PCT_ENCODED,n).replace(r.NOT_FRAGMENT,h).replace(r.PCT_ENCODED,o)),e}function g(e){return e.replace(/^0*(.*)/,"$1")||"0"}function v(e,r){var n=e.match(r.IPV4ADDRESS)||[],t=R(n,2),o=t[1];return o?o.split(".").map(g).join("."):e}function m(e,r){var n=e.match(r.IPV6ADDRESS)||[],t=R(n,3),o=t[1],a=t[2];if(o){for(var i=o.toLowerCase().split("::").reverse(),u=R(i,2),s=u[0],f=u[1],c=f?f.split(":").map(g):[],p=s.split(":").map(g),h=r.IPV4ADDRESS.test(p[p.length-1]),d=h?7:8,l=p.length-d,m=Array(d),E=0;E<d;++E)m[E]=c[E]||p[l+E]||"";h&&(m[d-1]=v(m[d-1],r));var C=m.reduce(function(e,r,n){if(!r||"0"===r){var t=e[e.length-1];t&&t.index+t.length===n?t.length++:e.push({index:n,length:1})}return e},[]),y=C.sort(function(e,r){return r.length-e.length})[0],S=void 0;if(y&&y.length>1){var A=m.slice(0,y.index),D=m.slice(y.index+y.length);S=A.join(":")+"::"+D.join(":")}else S=m.join(":");return a&&(S+="%"+a),S}return e}function E(e){var r=arguments.length>1&&arguments[1]!==undefined?arguments[1]:{},n={},t=!1!==r.iri?N:F;"suffix"===r.reference&&(e=(r.scheme?r.scheme+":":"")+"//"+e);var o=e.match(J);if(o){K?(n.scheme=o[1],n.userinfo=o[3],n.host=o[4],n.port=parseInt(o[5],10),n.path=o[6]||"",n.query=o[7],n.fragment=o[8],isNaN(n.port)&&(n.port=o[5])):(n.scheme=o[1]||undefined,n.userinfo=-1!==e.indexOf("@")?o[3]:undefined,n.host=-1!==e.indexOf("//")?o[4]:undefined,n.port=parseInt(o[5],10),n.path=o[6]||"",n.query=-1!==e.indexOf("?")?o[7]:undefined,n.fragment=-1!==e.indexOf("#")?o[8]:undefined,isNaN(n.port)&&(n.port=e.match(/\/\/(?:.|\n)*\:(?:\/|\?|\#|$)/)?o[4]:undefined)),n.host&&(n.host=m(v(n.host,t),t)),n.scheme!==undefined||n.userinfo!==undefined||n.host!==undefined||n.port!==undefined||n.path||n.query!==undefined?n.scheme===undefined?n.reference="relative":n.fragment===undefined?n.reference="absolute":n.reference="uri":n.reference="same-document",r.reference&&"suffix"!==r.reference&&r.reference!==n.reference&&(n.error=n.error||"URI is not a "+r.reference+" reference.");var a=B[(r.scheme||n.scheme||"").toLowerCase()];if(r.unicodeSupport||a&&a.unicodeSupport)l(n,t);else{if(n.host&&(r.domainHost||a&&a.domainHost))try{n.host=Y.toASCII(n.host.replace(t.PCT_ENCODED,d).toLowerCase())}catch(i){n.error=n.error||"Host's domain name can not be converted to ASCII via punycode: "+i}l(n,F)}a&&a.parse&&a.parse(n,r)}else n.error=n.error||"URI can not be parsed.";return n}function C(e,r){var n=!1!==r.iri?N:F,t=[];return e.userinfo!==undefined&&(t.push(e.userinfo),t.push("@")),e.host!==undefined&&t.push(m(v(String(e.host),n),n).replace(n.IPV6ADDRESS,function(e,r,n){return"["+r+(n?"%25"+n:"")+"]"})),"number"==typeof e.port&&(t.push(":"),t.push(e.port.toString(10))),t.length?t.join(""):undefined}function y(e){for(var r=[];e.length;)if(e.match(W))e=e.replace(W,"");else if(e.match(X))e=e.replace(X,"/");else if(e.match(ee))e=e.replace(ee,"/"),r.pop();else if("."===e||".."===e)e="";else{var n=e.match(re);if(!n)throw new Error("Unexpected dot segment condition");var t=n[0];e=e.slice(t.length),r.push(t)}return r.join("")}function S(e){var r=arguments.length>1&&arguments[1]!==undefined?arguments[1]:{},n=r.iri?N:F,t=[],o=B[(r.scheme||e.scheme||"").toLowerCase()];if(o&&o.serialize&&o.serialize(e,r),e.host)if(n.IPV6ADDRESS.test(e.host));else if(r.domainHost||o&&o.domainHost)try{e.host=r.iri?Y.toUnicode(e.host):Y.toASCII(e.host.replace(n.PCT_ENCODED,d).toLowerCase())}catch(u){e.error=e.error||"Host's domain name can not be converted to "+(r.iri?"Unicode":"ASCII")+" via punycode: "+u}l(e,n),"suffix"!==r.reference&&e.scheme&&(t.push(e.scheme),t.push(":"));var a=C(e,r);if(a!==undefined&&("suffix"!==r.reference&&t.push("//"),t.push(a),e.path&&"/"!==e.path.charAt(0)&&t.push("/")),e.path!==undefined){var i=e.path;r.absolutePath||o&&o.absolutePath||(i=y(i)),a===undefined&&(i=i.replace(/^\/\//,"/%2F")),t.push(i)}return e.query!==undefined&&(t.push("?"),t.push(e.query)),e.fragment!==undefined&&(t.push("#"),t.push(e.fragment)),t.join("")}function A(e,r){var n=arguments.length>2&&arguments[2]!==undefined?arguments[2]:{},t=arguments[3],o={};return t||(e=E(S(e,n),n),r=E(S(r,n),n)),n=n||{},!n.tolerant&&r.scheme?(o.scheme=r.scheme,o.userinfo=r.userinfo,o.host=r.host,o.port=r.port,o.path=y(r.path||""),o.query=r.query):(r.userinfo!==undefined||r.host!==undefined||r.port!==undefined?(o.userinfo=r.userinfo,o.host=r.host,o.port=r.port,o.path=y(r.path||""),o.query=r.query):(r.path?("/"===r.path.charAt(0)?o.path=y(r.path):(e.userinfo===undefined&&e.host===undefined&&e.port===undefined||e.path?e.path?o.path=e.path.slice(0,e.path.lastIndexOf("/")+1)+r.path:o.path=r.path:o.path="/"+r.path,o.path=y(o.path)),o.query=r.query):(o.path=e.path,r.query!==undefined?o.query=r.query:o.query=e.query),o.userinfo=e.userinfo,o.host=e.host,o.port=e.port),o.scheme=e.scheme),o.fragment=r.fragment,o}function D(e,r,n){var t=i({scheme:"null"},n);return S(A(E(e,t),E(r,t),t,!0),t)}function w(e,r){return"string"==typeof e?e=S(E(e,r),r):"object"===t(e)&&(e=E(S(e,r),r)),e}function b(e,r,n){return"string"==typeof e?e=S(E(e,n),n):"object"===t(e)&&(e=S(e,n)),"string"==typeof r?r=S(E(r,n),n):"object"===t(r)&&(r=S(r,n)),e===r}function x(e,r){return e&&e.toString().replace(r&&r.iri?N.ESCAPE:F.ESCAPE,h)}function O(e,r){return e&&e.toString().replace(r&&r.iri?N.PCT_ENCODED:F.PCT_ENCODED,d)}function I(e){var r=d(e);return r.match(fe)?r:e}var F=u(!1),N=u(!0),R=function(){function e(e,r){var n=[],t=!0,o=!1,a=undefined;try{for(var i,u=e[Symbol.iterator]();!(t=(i=u.next()).done)&&(n.push(i.value),!r||n.length!==r);t=!0);}catch(s){o=!0,a=s}finally{try{!t&&u["return"]&&u["return"]()}finally{if(o)throw a}}return n}return function(r,n){if(Array.isArray(r))return r;if(Symbol.iterator in Object(r))return e(r,n);throw new TypeError("Invalid attempt to destructure non-iterable instance")}}(),T=function(e){if(Array.isArray(e)){for(var r=0,n=Array(e.length);r<e.length;r++)n[r]=e[r];return n}return Array.from(e)},_=2147483647,P=/^xn--/,U=/[^\0-\x7E]/,j=/[\x2E\u3002\uFF0E\uFF61]/g,q={overflow:"Overflow: input needs wider integers to process","not-basic":"Illegal input >= 0x80 (not a basic code point)","invalid-input":"Invalid input"},H=Math.floor,z=String.fromCharCode,L=function(e){return String.fromCodePoint.apply(String,T(e))},$=function(e){return e-48<10?e-22:e-65<26?e-65:e-97<26?e-97:36},M=function(e,r){return e+22+75*(e<26)-((0!=r)<<5)},V=function(e,r,n){var t=0;for(e=n?H(e/700):e>>1,e+=H(e/r);e>455;t+=36)e=H(e/35);return H(t+36*e/(e+38))},k=function(e){var r=[],n=e.length,t=0,o=128,a=72,i=e.lastIndexOf("-");i<0&&(i=0);for(var u=0;u<i;++u)e.charCodeAt(u)>=128&&s("not-basic"),r.push(e.charCodeAt(u));for(var f=i>0?i+1:0;f<n;){for(var c=t,p=1,h=36;;h+=36){f>=n&&s("invalid-input");var d=$(e.charCodeAt(f++));(d>=36||d>H((_-t)/p))&&s("overflow"),t+=d*p;var l=h<=a?1:h>=a+26?26:h-a;if(d<l)break;var g=36-l;p>H(_/g)&&s("overflow"),p*=g}var v=r.length+1;a=V(t-c,v,0==c),H(t/v)>_-o&&s("overflow"),o+=H(t/v),t%=v,r.splice(t++,0,o)}return String.fromCodePoint.apply(String,r)},Z=function(e){var r=[];e=p(e);var n=e.length,t=128,o=0,a=72,i=!0,u=!1,f=undefined;try{for(var c,h=e[Symbol.iterator]();!(i=(c=h.next()).done);i=!0){var d=c.value;d<128&&r.push(z(d))}}catch(j){u=!0,f=j}finally{try{!i&&h["return"]&&h["return"]()}finally{if(u)throw f}}var l=r.length,g=l;for(l&&r.push("-");g<n;){var v=_,m=!0,E=!1,C=undefined;try{for(var y,S=e[Symbol.iterator]();!(m=(y=S.next()).done);m=!0){var A=y.value;A>=t&&A<v&&(v=A)}}catch(j){E=!0,C=j}finally{try{!m&&S["return"]&&S["return"]()}finally{if(E)throw C}}var D=g+1;v-t>H((_-o)/D)&&s("overflow"),o+=(v-t)*D,t=v;var w=!0,b=!1,x=undefined;try{for(var O,I=e[Symbol.iterator]();!(w=(O=I.next()).done);w=!0){var F=O.value;if(F<t&&++o>_&&s("overflow"),F==t){for(var N=o,R=36;;R+=36){var T=R<=a?1:R>=a+26?26:R-a;if(N<T)break;var P=N-T,U=36-T;r.push(z(M(T+P%U,0))),N=H(P/U)}r.push(z(M(N,0))),a=V(o,D,g==l),o=0,++g}}}catch(j){b=!0,x=j}finally{try{!w&&I["return"]&&I["return"]()}finally{if(b)throw x}}++o,++t}return r.join("")},G=function(e){return c(e,function(e){return P.test(e)?k(e.slice(4).toLowerCase()):e})},Q=function(e){return c(e,function(e){return U.test(e)?"xn--"+Z(e):e})},Y={version:"2.1.0",ucs2:{decode:p,encode:L},decode:k,encode:Z,toASCII:Q,toUnicode:G},B={},J=/^(?:([^:\/?#]+):)?(?:\/\/((?:([^\/?#@]*)@)?(\[[^\/?#\]]+\]|[^\/?#:]*)(?:\:(\d*))?))?([^?#]*)(?:\?([^#]*))?(?:#((?:.|\n|\r)*))?/i,K="".match(/(){0}/)[1]===undefined,W=/^\.\.?\//,X=/^\/\.(\/|$)/,ee=/^\/\.\.(\/|$)/,re=/^\/?(?:.|\n)*?(?=\/|$)/,ne={scheme:"http",domainHost:!0,parse:function(e,r){return e.host||(e.error=e.error||"HTTP URIs must have a host."),e},serialize:function(e,r){return e.port!==("https"!==String(e.scheme).toLowerCase()?80:443)&&""!==e.port||(e.port=undefined),e.path||(e.path="/"),e}},te={scheme:"https",domainHost:ne.domainHost,parse:ne.parse,serialize:ne.serialize},oe={},ae="[A-Za-z0-9\\-\\.\\_\\~\\xA0-\\u200D\\u2010-\\u2029\\u202F-\\uD7FF\\uF900-\\uFDCF\\uFDF0-\\uFFEF]",ie="[0-9A-Fa-f]",ue=n(n("%[EFef][0-9A-Fa-f]%"+ie+ie+"%"+ie+ie)+"|"+n("%[89A-Fa-f][0-9A-Fa-f]%"+ie+ie)+"|"+n("%"+ie+ie)),se=r("[\\!\\$\\%\\'\\(\\)\\*\\+\\,\\-\\.0-9\\<\\>A-Z\\x5E-\\x7E]",'[\\"\\\\]'),fe=new RegExp(ae,"g"),ce=new RegExp(ue,"g"),pe=new RegExp(r("[^]","[A-Za-z0-9\\!\\$\\%\\'\\*\\+\\-\\^\\_\\`\\{\\|\\}\\~]","[\\.]",'[\\"]',se),"g"),he=new RegExp(r("[^]",ae,"[\\!\\$\\'\\(\\)\\*\\+\\,\\;\\:\\@]"),"g"),de=he,le={scheme:"mailto",parse:function(e,r){var n=e,t=n.to=n.path?n.path.split(","):[];if(n.path=undefined,n.query){for(var o=!1,a={},i=n.query.split("&"),u=0,s=i.length;u<s;++u){var f=i[u].split("=");switch(f[0]){case"to":for(var c=f[1].split(","),p=0,h=c.length;p<h;++p)t.push(c[p]);break;case"subject":n.subject=O(f[1],r);break;case"body":n.body=O(f[1],r);break;default:o=!0,a[O(f[0],r)]=O(f[1],r)}}o&&(n.headers=a)}n.query=undefined;for(var d=0,l=t.length;d<l;++d){var g=t[d].split("@");if(g[0]=O(g[0]),r.unicodeSupport)g[1]=O(g[1],r).toLowerCase();else try{g[1]=Y.toASCII(O(g[1],r).toLowerCase())}catch(v){n.error=n.error||"Email address's domain name can not be converted to ASCII via punycode: "+v}t[d]=g.join("@")}return n},serialize:function(e,r){var n=e,t=a(e.to);if(t){for(var i=0,u=t.length;i<u;++i){var s=String(t[i]),f=s.lastIndexOf("@"),c=s.slice(0,f).replace(ce,I).replace(ce,o).replace(pe,h),p=s.slice(f+1);try{p=r.iri?Y.toUnicode(p):Y.toASCII(O(p,r).toLowerCase())}catch(v){n.error=n.error||"Email address's domain name can not be converted to "+(r.iri?"Unicode":"ASCII")+" via punycode: "+v}t[i]=c+"@"+p}n.path=t.join(",")}var d=e.headers=e.headers||{};e.subject&&(d.subject=e.subject),e.body&&(d.body=e.body);var l=[];for(var g in d)d[g]!==oe[g]&&l.push(g.replace(ce,I).replace(ce,o).replace(he,h)+"="+d[g].replace(ce,I).replace(ce,o).replace(de,h));return l.length&&(n.query=l.join("&")),n}},ge=/^([^\:]+)\:(.*)/,ve={scheme:"urn",parse:function(e,r){var n=e.path&&e.path.match(ge),t=e;if(n){var o=r.scheme||t.scheme||"urn",a=n[1].toLowerCase(),i=n[2],u=o+":"+(r.nid||a),s=B[u];t.nid=a,t.nss=i,t.path=undefined,s&&(t=s.parse(t,r))}else t.error=t.error||"URN can not be parsed.";return t},serialize:function(e,r){var n=r.scheme||e.scheme||"urn",t=e.nid,o=n+":"+(r.nid||t),a=B[o];a&&(e=a.serialize(e,r));var i=e,u=e.nss;return i.path=(t||r.nid)+":"+u,i}},me=/^[0-9A-Fa-f]{8}(?:\-[0-9A-Fa-f]{4}){3}\-[0-9A-Fa-f]{12}$/,Ee={scheme:"urn:uuid",parse:function(e,r){var n=e;return n.uuid=n.nss,n.nss=undefined,r.tolerant||n.uuid&&n.uuid.match(me)||(n.error=n.error||"UUID is not valid."),n},serialize:function(e,r){var n=e;return n.nss=(e.uuid||"").toLowerCase(),n}};B[ne.scheme]=ne,B[te.scheme]=te,B[le.scheme]=le,B[ve.scheme]=ve,B[Ee.scheme]=Ee,e.SCHEMES=B,e.pctEncChar=h,e.pctDecChars=d,e.parse=E,e.removeDotSegments=y,e.serialize=S,e.resolveComponents=A,e.resolve=D,e.normalize=w,e.equal=b,e.escapeComponent=x,e.unescapeComponent=O,Object.defineProperty(e,"__esModule",{value:!0})}); //# sourceMappingURL=uri.all.min.js.map
zhihu-crawler
/zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/uri-js/dist/es5/uri.all.min.js
uri.all.min.js
(function (global, factory) { typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports) : typeof define === 'function' && define.amd ? define(['exports'], factory) : (factory((global.URI = global.URI || {}))); }(this, (function (exports) { 'use strict'; function merge() { for (var _len = arguments.length, sets = Array(_len), _key = 0; _key < _len; _key++) { sets[_key] = arguments[_key]; } if (sets.length > 1) { sets[0] = sets[0].slice(0, -1); var xl = sets.length - 1; for (var x = 1; x < xl; ++x) { sets[x] = sets[x].slice(1, -1); } sets[xl] = sets[xl].slice(1); return sets.join(''); } else { return sets[0]; } } function subexp(str) { return "(?:" + str + ")"; } function typeOf(o) { return o === undefined ? "undefined" : o === null ? "null" : Object.prototype.toString.call(o).split(" ").pop().split("]").shift().toLowerCase(); } function toUpperCase(str) { return str.toUpperCase(); } function toArray(obj) { return obj !== undefined && obj !== null ? obj instanceof Array ? obj : typeof obj.length !== "number" || obj.split || obj.setInterval || obj.call ? [obj] : Array.prototype.slice.call(obj) : []; } function assign(target, source) { var obj = target; if (source) { for (var key in source) { obj[key] = source[key]; } } return obj; } function buildExps(isIRI) { var ALPHA$$ = "[A-Za-z]", CR$ = "[\\x0D]", DIGIT$$ = "[0-9]", DQUOTE$$ = "[\\x22]", HEXDIG$$ = merge(DIGIT$$, "[A-Fa-f]"), //case-insensitive LF$$ = "[\\x0A]", SP$$ = "[\\x20]", PCT_ENCODED$ = subexp(subexp("%[EFef]" + HEXDIG$$ + "%" + HEXDIG$$ + HEXDIG$$ + "%" + HEXDIG$$ + HEXDIG$$) + "|" + subexp("%[89A-Fa-f]" + HEXDIG$$ + "%" + HEXDIG$$ + HEXDIG$$) + "|" + subexp("%" + HEXDIG$$ + HEXDIG$$)), //expanded GEN_DELIMS$$ = "[\\:\\/\\?\\#\\[\\]\\@]", SUB_DELIMS$$ = "[\\!\\$\\&\\'\\(\\)\\*\\+\\,\\;\\=]", RESERVED$$ = merge(GEN_DELIMS$$, SUB_DELIMS$$), UCSCHAR$$ = isIRI ? "[\\xA0-\\u200D\\u2010-\\u2029\\u202F-\\uD7FF\\uF900-\\uFDCF\\uFDF0-\\uFFEF]" : "[]", //subset, excludes bidi control characters IPRIVATE$$ = isIRI ? "[\\uE000-\\uF8FF]" : "[]", //subset UNRESERVED$$ = merge(ALPHA$$, DIGIT$$, "[\\-\\.\\_\\~]", UCSCHAR$$), SCHEME$ = subexp(ALPHA$$ + merge(ALPHA$$, DIGIT$$, "[\\+\\-\\.]") + "*"), USERINFO$ = subexp(subexp(PCT_ENCODED$ + "|" + merge(UNRESERVED$$, SUB_DELIMS$$, "[\\:]")) + "*"), DEC_OCTET$ = subexp(subexp("25[0-5]") + "|" + subexp("2[0-4]" + DIGIT$$) + "|" + subexp("1" + DIGIT$$ + DIGIT$$) + "|" + subexp("[1-9]" + DIGIT$$) + "|" + DIGIT$$), DEC_OCTET_RELAXED$ = subexp(subexp("25[0-5]") + "|" + subexp("2[0-4]" + DIGIT$$) + "|" + subexp("1" + DIGIT$$ + DIGIT$$) + "|" + subexp("0?[1-9]" + DIGIT$$) + "|0?0?" + DIGIT$$), //relaxed parsing rules IPV4ADDRESS$ = subexp(DEC_OCTET_RELAXED$ + "\\." + DEC_OCTET_RELAXED$ + "\\." + DEC_OCTET_RELAXED$ + "\\." + DEC_OCTET_RELAXED$), H16$ = subexp(HEXDIG$$ + "{1,4}"), LS32$ = subexp(subexp(H16$ + "\\:" + H16$) + "|" + IPV4ADDRESS$), IPV6ADDRESS1$ = subexp(subexp(H16$ + "\\:") + "{6}" + LS32$), // 6( h16 ":" ) ls32 IPV6ADDRESS2$ = subexp("\\:\\:" + subexp(H16$ + "\\:") + "{5}" + LS32$), // "::" 5( h16 ":" ) ls32 IPV6ADDRESS3$ = subexp(subexp(H16$) + "?\\:\\:" + subexp(H16$ + "\\:") + "{4}" + LS32$), //[ h16 ] "::" 4( h16 ":" ) ls32 IPV6ADDRESS4$ = subexp(subexp(subexp(H16$ + "\\:") + "{0,1}" + H16$) + "?\\:\\:" + subexp(H16$ + "\\:") + "{3}" + LS32$), //[ *1( h16 ":" ) h16 ] "::" 3( h16 ":" ) ls32 IPV6ADDRESS5$ = subexp(subexp(subexp(H16$ + "\\:") + "{0,2}" + H16$) + "?\\:\\:" + subexp(H16$ + "\\:") + "{2}" + LS32$), //[ *2( h16 ":" ) h16 ] "::" 2( h16 ":" ) ls32 IPV6ADDRESS6$ = subexp(subexp(subexp(H16$ + "\\:") + "{0,3}" + H16$) + "?\\:\\:" + H16$ + "\\:" + LS32$), //[ *3( h16 ":" ) h16 ] "::" h16 ":" ls32 IPV6ADDRESS7$ = subexp(subexp(subexp(H16$ + "\\:") + "{0,4}" + H16$) + "?\\:\\:" + LS32$), //[ *4( h16 ":" ) h16 ] "::" ls32 IPV6ADDRESS8$ = subexp(subexp(subexp(H16$ + "\\:") + "{0,5}" + H16$) + "?\\:\\:" + H16$), //[ *5( h16 ":" ) h16 ] "::" h16 IPV6ADDRESS9$ = subexp(subexp(subexp(H16$ + "\\:") + "{0,6}" + H16$) + "?\\:\\:"), //[ *6( h16 ":" ) h16 ] "::" IPV6ADDRESS$ = subexp([IPV6ADDRESS1$, IPV6ADDRESS2$, IPV6ADDRESS3$, IPV6ADDRESS4$, IPV6ADDRESS5$, IPV6ADDRESS6$, IPV6ADDRESS7$, IPV6ADDRESS8$, IPV6ADDRESS9$].join("|")), ZONEID$ = subexp(subexp(UNRESERVED$$ + "|" + PCT_ENCODED$) + "+"), //RFC 6874 IPV6ADDRZ$ = subexp(IPV6ADDRESS$ + "\\%25" + ZONEID$), //RFC 6874 IPV6ADDRZ_RELAXED$ = subexp(IPV6ADDRESS$ + subexp("\\%25|\\%(?!" + HEXDIG$$ + "{2})") + ZONEID$), //RFC 6874, with relaxed parsing rules IPVFUTURE$ = subexp("[vV]" + HEXDIG$$ + "+\\." + merge(UNRESERVED$$, SUB_DELIMS$$, "[\\:]") + "+"), IP_LITERAL$ = subexp("\\[" + subexp(IPV6ADDRZ_RELAXED$ + "|" + IPV6ADDRESS$ + "|" + IPVFUTURE$) + "\\]"), //RFC 6874 REG_NAME$ = subexp(subexp(PCT_ENCODED$ + "|" + merge(UNRESERVED$$, SUB_DELIMS$$)) + "*"), HOST$ = subexp(IP_LITERAL$ + "|" + IPV4ADDRESS$ + "(?!" + REG_NAME$ + ")" + "|" + REG_NAME$), PORT$ = subexp(DIGIT$$ + "*"), AUTHORITY$ = subexp(subexp(USERINFO$ + "@") + "?" + HOST$ + subexp("\\:" + PORT$) + "?"), PCHAR$ = subexp(PCT_ENCODED$ + "|" + merge(UNRESERVED$$, SUB_DELIMS$$, "[\\:\\@]")), SEGMENT$ = subexp(PCHAR$ + "*"), SEGMENT_NZ$ = subexp(PCHAR$ + "+"), SEGMENT_NZ_NC$ = subexp(subexp(PCT_ENCODED$ + "|" + merge(UNRESERVED$$, SUB_DELIMS$$, "[\\@]")) + "+"), PATH_ABEMPTY$ = subexp(subexp("\\/" + SEGMENT$) + "*"), PATH_ABSOLUTE$ = subexp("\\/" + subexp(SEGMENT_NZ$ + PATH_ABEMPTY$) + "?"), //simplified PATH_NOSCHEME$ = subexp(SEGMENT_NZ_NC$ + PATH_ABEMPTY$), //simplified PATH_ROOTLESS$ = subexp(SEGMENT_NZ$ + PATH_ABEMPTY$), //simplified PATH_EMPTY$ = "(?!" + PCHAR$ + ")", PATH$ = subexp(PATH_ABEMPTY$ + "|" + PATH_ABSOLUTE$ + "|" + PATH_NOSCHEME$ + "|" + PATH_ROOTLESS$ + "|" + PATH_EMPTY$), QUERY$ = subexp(subexp(PCHAR$ + "|" + merge("[\\/\\?]", IPRIVATE$$)) + "*"), FRAGMENT$ = subexp(subexp(PCHAR$ + "|[\\/\\?]") + "*"), HIER_PART$ = subexp(subexp("\\/\\/" + AUTHORITY$ + PATH_ABEMPTY$) + "|" + PATH_ABSOLUTE$ + "|" + PATH_ROOTLESS$ + "|" + PATH_EMPTY$), URI$ = subexp(SCHEME$ + "\\:" + HIER_PART$ + subexp("\\?" + QUERY$) + "?" + subexp("\\#" + FRAGMENT$) + "?"), RELATIVE_PART$ = subexp(subexp("\\/\\/" + AUTHORITY$ + PATH_ABEMPTY$) + "|" + PATH_ABSOLUTE$ + "|" + PATH_NOSCHEME$ + "|" + PATH_EMPTY$), RELATIVE$ = subexp(RELATIVE_PART$ + subexp("\\?" + QUERY$) + "?" + subexp("\\#" + FRAGMENT$) + "?"), URI_REFERENCE$ = subexp(URI$ + "|" + RELATIVE$), ABSOLUTE_URI$ = subexp(SCHEME$ + "\\:" + HIER_PART$ + subexp("\\?" + QUERY$) + "?"), GENERIC_REF$ = "^(" + SCHEME$ + ")\\:" + subexp(subexp("\\/\\/(" + subexp("(" + USERINFO$ + ")@") + "?(" + HOST$ + ")" + subexp("\\:(" + PORT$ + ")") + "?)") + "?(" + PATH_ABEMPTY$ + "|" + PATH_ABSOLUTE$ + "|" + PATH_ROOTLESS$ + "|" + PATH_EMPTY$ + ")") + subexp("\\?(" + QUERY$ + ")") + "?" + subexp("\\#(" + FRAGMENT$ + ")") + "?$", RELATIVE_REF$ = "^(){0}" + subexp(subexp("\\/\\/(" + subexp("(" + USERINFO$ + ")@") + "?(" + HOST$ + ")" + subexp("\\:(" + PORT$ + ")") + "?)") + "?(" + PATH_ABEMPTY$ + "|" + PATH_ABSOLUTE$ + "|" + PATH_NOSCHEME$ + "|" + PATH_EMPTY$ + ")") + subexp("\\?(" + QUERY$ + ")") + "?" + subexp("\\#(" + FRAGMENT$ + ")") + "?$", ABSOLUTE_REF$ = "^(" + SCHEME$ + ")\\:" + subexp(subexp("\\/\\/(" + subexp("(" + USERINFO$ + ")@") + "?(" + HOST$ + ")" + subexp("\\:(" + PORT$ + ")") + "?)") + "?(" + PATH_ABEMPTY$ + "|" + PATH_ABSOLUTE$ + "|" + PATH_ROOTLESS$ + "|" + PATH_EMPTY$ + ")") + subexp("\\?(" + QUERY$ + ")") + "?$", SAMEDOC_REF$ = "^" + subexp("\\#(" + FRAGMENT$ + ")") + "?$", AUTHORITY_REF$ = "^" + subexp("(" + USERINFO$ + ")@") + "?(" + HOST$ + ")" + subexp("\\:(" + PORT$ + ")") + "?$"; return { NOT_SCHEME: new RegExp(merge("[^]", ALPHA$$, DIGIT$$, "[\\+\\-\\.]"), "g"), NOT_USERINFO: new RegExp(merge("[^\\%\\:]", UNRESERVED$$, SUB_DELIMS$$), "g"), NOT_HOST: new RegExp(merge("[^\\%\\[\\]\\:]", UNRESERVED$$, SUB_DELIMS$$), "g"), NOT_PATH: new RegExp(merge("[^\\%\\/\\:\\@]", UNRESERVED$$, SUB_DELIMS$$), "g"), NOT_PATH_NOSCHEME: new RegExp(merge("[^\\%\\/\\@]", UNRESERVED$$, SUB_DELIMS$$), "g"), NOT_QUERY: new RegExp(merge("[^\\%]", UNRESERVED$$, SUB_DELIMS$$, "[\\:\\@\\/\\?]", IPRIVATE$$), "g"), NOT_FRAGMENT: new RegExp(merge("[^\\%]", UNRESERVED$$, SUB_DELIMS$$, "[\\:\\@\\/\\?]"), "g"), ESCAPE: new RegExp(merge("[^]", UNRESERVED$$, SUB_DELIMS$$), "g"), UNRESERVED: new RegExp(UNRESERVED$$, "g"), OTHER_CHARS: new RegExp(merge("[^\\%]", UNRESERVED$$, RESERVED$$), "g"), PCT_ENCODED: new RegExp(PCT_ENCODED$, "g"), IPV4ADDRESS: new RegExp("^(" + IPV4ADDRESS$ + ")$"), IPV6ADDRESS: new RegExp("^\\[?(" + IPV6ADDRESS$ + ")" + subexp(subexp("\\%25|\\%(?!" + HEXDIG$$ + "{2})") + "(" + ZONEID$ + ")") + "?\\]?$") //RFC 6874, with relaxed parsing rules }; } var URI_PROTOCOL = buildExps(false); var IRI_PROTOCOL = buildExps(true); var slicedToArray = function () { function sliceIterator(arr, i) { var _arr = []; var _n = true; var _d = false; var _e = undefined; try { for (var _i = arr[Symbol.iterator](), _s; !(_n = (_s = _i.next()).done); _n = true) { _arr.push(_s.value); if (i && _arr.length === i) break; } } catch (err) { _d = true; _e = err; } finally { try { if (!_n && _i["return"]) _i["return"](); } finally { if (_d) throw _e; } } return _arr; } return function (arr, i) { if (Array.isArray(arr)) { return arr; } else if (Symbol.iterator in Object(arr)) { return sliceIterator(arr, i); } else { throw new TypeError("Invalid attempt to destructure non-iterable instance"); } }; }(); var toConsumableArray = function (arr) { if (Array.isArray(arr)) { for (var i = 0, arr2 = Array(arr.length); i < arr.length; i++) arr2[i] = arr[i]; return arr2; } else { return Array.from(arr); } }; /** Highest positive signed 32-bit float value */ var maxInt = 2147483647; // aka. 0x7FFFFFFF or 2^31-1 /** Bootstring parameters */ var base = 36; var tMin = 1; var tMax = 26; var skew = 38; var damp = 700; var initialBias = 72; var initialN = 128; // 0x80 var delimiter = '-'; // '\x2D' /** Regular expressions */ var regexPunycode = /^xn--/; var regexNonASCII = /[^\0-\x7E]/; // non-ASCII chars var regexSeparators = /[\x2E\u3002\uFF0E\uFF61]/g; // RFC 3490 separators /** Error messages */ var errors = { 'overflow': 'Overflow: input needs wider integers to process', 'not-basic': 'Illegal input >= 0x80 (not a basic code point)', 'invalid-input': 'Invalid input' }; /** Convenience shortcuts */ var baseMinusTMin = base - tMin; var floor = Math.floor; var stringFromCharCode = String.fromCharCode; /*--------------------------------------------------------------------------*/ /** * A generic error utility function. * @private * @param {String} type The error type. * @returns {Error} Throws a `RangeError` with the applicable error message. */ function error$1(type) { throw new RangeError(errors[type]); } /** * A generic `Array#map` utility function. * @private * @param {Array} array The array to iterate over. * @param {Function} callback The function that gets called for every array * item. * @returns {Array} A new array of values returned by the callback function. */ function map(array, fn) { var result = []; var length = array.length; while (length--) { result[length] = fn(array[length]); } return result; } /** * A simple `Array#map`-like wrapper to work with domain name strings or email * addresses. * @private * @param {String} domain The domain name or email address. * @param {Function} callback The function that gets called for every * character. * @returns {Array} A new string of characters returned by the callback * function. */ function mapDomain(string, fn) { var parts = string.split('@'); var result = ''; if (parts.length > 1) { // In email addresses, only the domain name should be punycoded. Leave // the local part (i.e. everything up to `@`) intact. result = parts[0] + '@'; string = parts[1]; } // Avoid `split(regex)` for IE8 compatibility. See #17. string = string.replace(regexSeparators, '\x2E'); var labels = string.split('.'); var encoded = map(labels, fn).join('.'); return result + encoded; } /** * Creates an array containing the numeric code points of each Unicode * character in the string. While JavaScript uses UCS-2 internally, * this function will convert a pair of surrogate halves (each of which * UCS-2 exposes as separate characters) into a single code point, * matching UTF-16. * @see `punycode.ucs2.encode` * @see <https://mathiasbynens.be/notes/javascript-encoding> * @memberOf punycode.ucs2 * @name decode * @param {String} string The Unicode input string (UCS-2). * @returns {Array} The new array of code points. */ function ucs2decode(string) { var output = []; var counter = 0; var length = string.length; while (counter < length) { var value = string.charCodeAt(counter++); if (value >= 0xD800 && value <= 0xDBFF && counter < length) { // It's a high surrogate, and there is a next character. var extra = string.charCodeAt(counter++); if ((extra & 0xFC00) == 0xDC00) { // Low surrogate. output.push(((value & 0x3FF) << 10) + (extra & 0x3FF) + 0x10000); } else { // It's an unmatched surrogate; only append this code unit, in case the // next code unit is the high surrogate of a surrogate pair. output.push(value); counter--; } } else { output.push(value); } } return output; } /** * Creates a string based on an array of numeric code points. * @see `punycode.ucs2.decode` * @memberOf punycode.ucs2 * @name encode * @param {Array} codePoints The array of numeric code points. * @returns {String} The new Unicode string (UCS-2). */ var ucs2encode = function ucs2encode(array) { return String.fromCodePoint.apply(String, toConsumableArray(array)); }; /** * Converts a basic code point into a digit/integer. * @see `digitToBasic()` * @private * @param {Number} codePoint The basic numeric code point value. * @returns {Number} The numeric value of a basic code point (for use in * representing integers) in the range `0` to `base - 1`, or `base` if * the code point does not represent a value. */ var basicToDigit = function basicToDigit(codePoint) { if (codePoint - 0x30 < 0x0A) { return codePoint - 0x16; } if (codePoint - 0x41 < 0x1A) { return codePoint - 0x41; } if (codePoint - 0x61 < 0x1A) { return codePoint - 0x61; } return base; }; /** * Converts a digit/integer into a basic code point. * @see `basicToDigit()` * @private * @param {Number} digit The numeric value of a basic code point. * @returns {Number} The basic code point whose value (when used for * representing integers) is `digit`, which needs to be in the range * `0` to `base - 1`. If `flag` is non-zero, the uppercase form is * used; else, the lowercase form is used. The behavior is undefined * if `flag` is non-zero and `digit` has no uppercase form. */ var digitToBasic = function digitToBasic(digit, flag) { // 0..25 map to ASCII a..z or A..Z // 26..35 map to ASCII 0..9 return digit + 22 + 75 * (digit < 26) - ((flag != 0) << 5); }; /** * Bias adaptation function as per section 3.4 of RFC 3492. * https://tools.ietf.org/html/rfc3492#section-3.4 * @private */ var adapt = function adapt(delta, numPoints, firstTime) { var k = 0; delta = firstTime ? floor(delta / damp) : delta >> 1; delta += floor(delta / numPoints); for (; /* no initialization */delta > baseMinusTMin * tMax >> 1; k += base) { delta = floor(delta / baseMinusTMin); } return floor(k + (baseMinusTMin + 1) * delta / (delta + skew)); }; /** * Converts a Punycode string of ASCII-only symbols to a string of Unicode * symbols. * @memberOf punycode * @param {String} input The Punycode string of ASCII-only symbols. * @returns {String} The resulting string of Unicode symbols. */ var decode = function decode(input) { // Don't use UCS-2. var output = []; var inputLength = input.length; var i = 0; var n = initialN; var bias = initialBias; // Handle the basic code points: let `basic` be the number of input code // points before the last delimiter, or `0` if there is none, then copy // the first basic code points to the output. var basic = input.lastIndexOf(delimiter); if (basic < 0) { basic = 0; } for (var j = 0; j < basic; ++j) { // if it's not a basic code point if (input.charCodeAt(j) >= 0x80) { error$1('not-basic'); } output.push(input.charCodeAt(j)); } // Main decoding loop: start just after the last delimiter if any basic code // points were copied; start at the beginning otherwise. for (var index = basic > 0 ? basic + 1 : 0; index < inputLength;) /* no final expression */{ // `index` is the index of the next character to be consumed. // Decode a generalized variable-length integer into `delta`, // which gets added to `i`. The overflow checking is easier // if we increase `i` as we go, then subtract off its starting // value at the end to obtain `delta`. var oldi = i; for (var w = 1, k = base;; /* no condition */k += base) { if (index >= inputLength) { error$1('invalid-input'); } var digit = basicToDigit(input.charCodeAt(index++)); if (digit >= base || digit > floor((maxInt - i) / w)) { error$1('overflow'); } i += digit * w; var t = k <= bias ? tMin : k >= bias + tMax ? tMax : k - bias; if (digit < t) { break; } var baseMinusT = base - t; if (w > floor(maxInt / baseMinusT)) { error$1('overflow'); } w *= baseMinusT; } var out = output.length + 1; bias = adapt(i - oldi, out, oldi == 0); // `i` was supposed to wrap around from `out` to `0`, // incrementing `n` each time, so we'll fix that now: if (floor(i / out) > maxInt - n) { error$1('overflow'); } n += floor(i / out); i %= out; // Insert `n` at position `i` of the output. output.splice(i++, 0, n); } return String.fromCodePoint.apply(String, output); }; /** * Converts a string of Unicode symbols (e.g. a domain name label) to a * Punycode string of ASCII-only symbols. * @memberOf punycode * @param {String} input The string of Unicode symbols. * @returns {String} The resulting Punycode string of ASCII-only symbols. */ var encode = function encode(input) { var output = []; // Convert the input in UCS-2 to an array of Unicode code points. input = ucs2decode(input); // Cache the length. var inputLength = input.length; // Initialize the state. var n = initialN; var delta = 0; var bias = initialBias; // Handle the basic code points. var _iteratorNormalCompletion = true; var _didIteratorError = false; var _iteratorError = undefined; try { for (var _iterator = input[Symbol.iterator](), _step; !(_iteratorNormalCompletion = (_step = _iterator.next()).done); _iteratorNormalCompletion = true) { var _currentValue2 = _step.value; if (_currentValue2 < 0x80) { output.push(stringFromCharCode(_currentValue2)); } } } catch (err) { _didIteratorError = true; _iteratorError = err; } finally { try { if (!_iteratorNormalCompletion && _iterator.return) { _iterator.return(); } } finally { if (_didIteratorError) { throw _iteratorError; } } } var basicLength = output.length; var handledCPCount = basicLength; // `handledCPCount` is the number of code points that have been handled; // `basicLength` is the number of basic code points. // Finish the basic string with a delimiter unless it's empty. if (basicLength) { output.push(delimiter); } // Main encoding loop: while (handledCPCount < inputLength) { // All non-basic code points < n have been handled already. Find the next // larger one: var m = maxInt; var _iteratorNormalCompletion2 = true; var _didIteratorError2 = false; var _iteratorError2 = undefined; try { for (var _iterator2 = input[Symbol.iterator](), _step2; !(_iteratorNormalCompletion2 = (_step2 = _iterator2.next()).done); _iteratorNormalCompletion2 = true) { var currentValue = _step2.value; if (currentValue >= n && currentValue < m) { m = currentValue; } } // Increase `delta` enough to advance the decoder's <n,i> state to <m,0>, // but guard against overflow. } catch (err) { _didIteratorError2 = true; _iteratorError2 = err; } finally { try { if (!_iteratorNormalCompletion2 && _iterator2.return) { _iterator2.return(); } } finally { if (_didIteratorError2) { throw _iteratorError2; } } } var handledCPCountPlusOne = handledCPCount + 1; if (m - n > floor((maxInt - delta) / handledCPCountPlusOne)) { error$1('overflow'); } delta += (m - n) * handledCPCountPlusOne; n = m; var _iteratorNormalCompletion3 = true; var _didIteratorError3 = false; var _iteratorError3 = undefined; try { for (var _iterator3 = input[Symbol.iterator](), _step3; !(_iteratorNormalCompletion3 = (_step3 = _iterator3.next()).done); _iteratorNormalCompletion3 = true) { var _currentValue = _step3.value; if (_currentValue < n && ++delta > maxInt) { error$1('overflow'); } if (_currentValue == n) { // Represent delta as a generalized variable-length integer. var q = delta; for (var k = base;; /* no condition */k += base) { var t = k <= bias ? tMin : k >= bias + tMax ? tMax : k - bias; if (q < t) { break; } var qMinusT = q - t; var baseMinusT = base - t; output.push(stringFromCharCode(digitToBasic(t + qMinusT % baseMinusT, 0))); q = floor(qMinusT / baseMinusT); } output.push(stringFromCharCode(digitToBasic(q, 0))); bias = adapt(delta, handledCPCountPlusOne, handledCPCount == basicLength); delta = 0; ++handledCPCount; } } } catch (err) { _didIteratorError3 = true; _iteratorError3 = err; } finally { try { if (!_iteratorNormalCompletion3 && _iterator3.return) { _iterator3.return(); } } finally { if (_didIteratorError3) { throw _iteratorError3; } } } ++delta; ++n; } return output.join(''); }; /** * Converts a Punycode string representing a domain name or an email address * to Unicode. Only the Punycoded parts of the input will be converted, i.e. * it doesn't matter if you call it on a string that has already been * converted to Unicode. * @memberOf punycode * @param {String} input The Punycoded domain name or email address to * convert to Unicode. * @returns {String} The Unicode representation of the given Punycode * string. */ var toUnicode = function toUnicode(input) { return mapDomain(input, function (string) { return regexPunycode.test(string) ? decode(string.slice(4).toLowerCase()) : string; }); }; /** * Converts a Unicode string representing a domain name or an email address to * Punycode. Only the non-ASCII parts of the domain name will be converted, * i.e. it doesn't matter if you call it with a domain that's already in * ASCII. * @memberOf punycode * @param {String} input The domain name or email address to convert, as a * Unicode string. * @returns {String} The Punycode representation of the given domain name or * email address. */ var toASCII = function toASCII(input) { return mapDomain(input, function (string) { return regexNonASCII.test(string) ? 'xn--' + encode(string) : string; }); }; /*--------------------------------------------------------------------------*/ /** Define the public API */ var punycode = { /** * A string representing the current Punycode.js version number. * @memberOf punycode * @type String */ 'version': '2.1.0', /** * An object of methods to convert from JavaScript's internal character * representation (UCS-2) to Unicode code points, and back. * @see <https://mathiasbynens.be/notes/javascript-encoding> * @memberOf punycode * @type Object */ 'ucs2': { 'decode': ucs2decode, 'encode': ucs2encode }, 'decode': decode, 'encode': encode, 'toASCII': toASCII, 'toUnicode': toUnicode }; /** * URI.js * * @fileoverview An RFC 3986 compliant, scheme extendable URI parsing/validating/resolving library for JavaScript. * @author <a href="mailto:[email protected]">Gary Court</a> * @see http://github.com/garycourt/uri-js */ /** * Copyright 2011 Gary Court. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are * permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, this list * of conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY GARY COURT ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GARY COURT OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are those of the * authors and should not be interpreted as representing official policies, either expressed * or implied, of Gary Court. */ var SCHEMES = {}; function pctEncChar(chr) { var c = chr.charCodeAt(0); var e = void 0; if (c < 16) e = "%0" + c.toString(16).toUpperCase();else if (c < 128) e = "%" + c.toString(16).toUpperCase();else if (c < 2048) e = "%" + (c >> 6 | 192).toString(16).toUpperCase() + "%" + (c & 63 | 128).toString(16).toUpperCase();else e = "%" + (c >> 12 | 224).toString(16).toUpperCase() + "%" + (c >> 6 & 63 | 128).toString(16).toUpperCase() + "%" + (c & 63 | 128).toString(16).toUpperCase(); return e; } function pctDecChars(str) { var newStr = ""; var i = 0; var il = str.length; while (i < il) { var c = parseInt(str.substr(i + 1, 2), 16); if (c < 128) { newStr += String.fromCharCode(c); i += 3; } else if (c >= 194 && c < 224) { if (il - i >= 6) { var c2 = parseInt(str.substr(i + 4, 2), 16); newStr += String.fromCharCode((c & 31) << 6 | c2 & 63); } else { newStr += str.substr(i, 6); } i += 6; } else if (c >= 224) { if (il - i >= 9) { var _c = parseInt(str.substr(i + 4, 2), 16); var c3 = parseInt(str.substr(i + 7, 2), 16); newStr += String.fromCharCode((c & 15) << 12 | (_c & 63) << 6 | c3 & 63); } else { newStr += str.substr(i, 9); } i += 9; } else { newStr += str.substr(i, 3); i += 3; } } return newStr; } function _normalizeComponentEncoding(components, protocol) { function decodeUnreserved(str) { var decStr = pctDecChars(str); return !decStr.match(protocol.UNRESERVED) ? str : decStr; } if (components.scheme) components.scheme = String(components.scheme).replace(protocol.PCT_ENCODED, decodeUnreserved).toLowerCase().replace(protocol.NOT_SCHEME, ""); if (components.userinfo !== undefined) components.userinfo = String(components.userinfo).replace(protocol.PCT_ENCODED, decodeUnreserved).replace(protocol.NOT_USERINFO, pctEncChar).replace(protocol.PCT_ENCODED, toUpperCase); if (components.host !== undefined) components.host = String(components.host).replace(protocol.PCT_ENCODED, decodeUnreserved).toLowerCase().replace(protocol.NOT_HOST, pctEncChar).replace(protocol.PCT_ENCODED, toUpperCase); if (components.path !== undefined) components.path = String(components.path).replace(protocol.PCT_ENCODED, decodeUnreserved).replace(components.scheme ? protocol.NOT_PATH : protocol.NOT_PATH_NOSCHEME, pctEncChar).replace(protocol.PCT_ENCODED, toUpperCase); if (components.query !== undefined) components.query = String(components.query).replace(protocol.PCT_ENCODED, decodeUnreserved).replace(protocol.NOT_QUERY, pctEncChar).replace(protocol.PCT_ENCODED, toUpperCase); if (components.fragment !== undefined) components.fragment = String(components.fragment).replace(protocol.PCT_ENCODED, decodeUnreserved).replace(protocol.NOT_FRAGMENT, pctEncChar).replace(protocol.PCT_ENCODED, toUpperCase); return components; } function _stripLeadingZeros(str) { return str.replace(/^0*(.*)/, "$1") || "0"; } function _normalizeIPv4(host, protocol) { var matches = host.match(protocol.IPV4ADDRESS) || []; var _matches = slicedToArray(matches, 2), address = _matches[1]; if (address) { return address.split(".").map(_stripLeadingZeros).join("."); } else { return host; } } function _normalizeIPv6(host, protocol) { var matches = host.match(protocol.IPV6ADDRESS) || []; var _matches2 = slicedToArray(matches, 3), address = _matches2[1], zone = _matches2[2]; if (address) { var _address$toLowerCase$ = address.toLowerCase().split('::').reverse(), _address$toLowerCase$2 = slicedToArray(_address$toLowerCase$, 2), last = _address$toLowerCase$2[0], first = _address$toLowerCase$2[1]; var firstFields = first ? first.split(":").map(_stripLeadingZeros) : []; var lastFields = last.split(":").map(_stripLeadingZeros); var isLastFieldIPv4Address = protocol.IPV4ADDRESS.test(lastFields[lastFields.length - 1]); var fieldCount = isLastFieldIPv4Address ? 7 : 8; var lastFieldsStart = lastFields.length - fieldCount; var fields = Array(fieldCount); for (var x = 0; x < fieldCount; ++x) { fields[x] = firstFields[x] || lastFields[lastFieldsStart + x] || ''; } if (isLastFieldIPv4Address) { fields[fieldCount - 1] = _normalizeIPv4(fields[fieldCount - 1], protocol); } var allZeroFields = fields.reduce(function (acc, field, index) { if (!field || field === "0") { var lastLongest = acc[acc.length - 1]; if (lastLongest && lastLongest.index + lastLongest.length === index) { lastLongest.length++; } else { acc.push({ index: index, length: 1 }); } } return acc; }, []); var longestZeroFields = allZeroFields.sort(function (a, b) { return b.length - a.length; })[0]; var newHost = void 0; if (longestZeroFields && longestZeroFields.length > 1) { var newFirst = fields.slice(0, longestZeroFields.index); var newLast = fields.slice(longestZeroFields.index + longestZeroFields.length); newHost = newFirst.join(":") + "::" + newLast.join(":"); } else { newHost = fields.join(":"); } if (zone) { newHost += "%" + zone; } return newHost; } else { return host; } } var URI_PARSE = /^(?:([^:\/?#]+):)?(?:\/\/((?:([^\/?#@]*)@)?(\[[^\/?#\]]+\]|[^\/?#:]*)(?:\:(\d*))?))?([^?#]*)(?:\?([^#]*))?(?:#((?:.|\n|\r)*))?/i; var NO_MATCH_IS_UNDEFINED = "".match(/(){0}/)[1] === undefined; function parse(uriString) { var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {}; var components = {}; var protocol = options.iri !== false ? IRI_PROTOCOL : URI_PROTOCOL; if (options.reference === "suffix") uriString = (options.scheme ? options.scheme + ":" : "") + "//" + uriString; var matches = uriString.match(URI_PARSE); if (matches) { if (NO_MATCH_IS_UNDEFINED) { //store each component components.scheme = matches[1]; components.userinfo = matches[3]; components.host = matches[4]; components.port = parseInt(matches[5], 10); components.path = matches[6] || ""; components.query = matches[7]; components.fragment = matches[8]; //fix port number if (isNaN(components.port)) { components.port = matches[5]; } } else { //IE FIX for improper RegExp matching //store each component components.scheme = matches[1] || undefined; components.userinfo = uriString.indexOf("@") !== -1 ? matches[3] : undefined; components.host = uriString.indexOf("//") !== -1 ? matches[4] : undefined; components.port = parseInt(matches[5], 10); components.path = matches[6] || ""; components.query = uriString.indexOf("?") !== -1 ? matches[7] : undefined; components.fragment = uriString.indexOf("#") !== -1 ? matches[8] : undefined; //fix port number if (isNaN(components.port)) { components.port = uriString.match(/\/\/(?:.|\n)*\:(?:\/|\?|\#|$)/) ? matches[4] : undefined; } } if (components.host) { //normalize IP hosts components.host = _normalizeIPv6(_normalizeIPv4(components.host, protocol), protocol); } //determine reference type if (components.scheme === undefined && components.userinfo === undefined && components.host === undefined && components.port === undefined && !components.path && components.query === undefined) { components.reference = "same-document"; } else if (components.scheme === undefined) { components.reference = "relative"; } else if (components.fragment === undefined) { components.reference = "absolute"; } else { components.reference = "uri"; } //check for reference errors if (options.reference && options.reference !== "suffix" && options.reference !== components.reference) { components.error = components.error || "URI is not a " + options.reference + " reference."; } //find scheme handler var schemeHandler = SCHEMES[(options.scheme || components.scheme || "").toLowerCase()]; //check if scheme can't handle IRIs if (!options.unicodeSupport && (!schemeHandler || !schemeHandler.unicodeSupport)) { //if host component is a domain name if (components.host && (options.domainHost || schemeHandler && schemeHandler.domainHost)) { //convert Unicode IDN -> ASCII IDN try { components.host = punycode.toASCII(components.host.replace(protocol.PCT_ENCODED, pctDecChars).toLowerCase()); } catch (e) { components.error = components.error || "Host's domain name can not be converted to ASCII via punycode: " + e; } } //convert IRI -> URI _normalizeComponentEncoding(components, URI_PROTOCOL); } else { //normalize encodings _normalizeComponentEncoding(components, protocol); } //perform scheme specific parsing if (schemeHandler && schemeHandler.parse) { schemeHandler.parse(components, options); } } else { components.error = components.error || "URI can not be parsed."; } return components; } function _recomposeAuthority(components, options) { var protocol = options.iri !== false ? IRI_PROTOCOL : URI_PROTOCOL; var uriTokens = []; if (components.userinfo !== undefined) { uriTokens.push(components.userinfo); uriTokens.push("@"); } if (components.host !== undefined) { //normalize IP hosts, add brackets and escape zone separator for IPv6 uriTokens.push(_normalizeIPv6(_normalizeIPv4(String(components.host), protocol), protocol).replace(protocol.IPV6ADDRESS, function (_, $1, $2) { return "[" + $1 + ($2 ? "%25" + $2 : "") + "]"; })); } if (typeof components.port === "number") { uriTokens.push(":"); uriTokens.push(components.port.toString(10)); } return uriTokens.length ? uriTokens.join("") : undefined; } var RDS1 = /^\.\.?\//; var RDS2 = /^\/\.(\/|$)/; var RDS3 = /^\/\.\.(\/|$)/; var RDS5 = /^\/?(?:.|\n)*?(?=\/|$)/; function removeDotSegments(input) { var output = []; while (input.length) { if (input.match(RDS1)) { input = input.replace(RDS1, ""); } else if (input.match(RDS2)) { input = input.replace(RDS2, "/"); } else if (input.match(RDS3)) { input = input.replace(RDS3, "/"); output.pop(); } else if (input === "." || input === "..") { input = ""; } else { var im = input.match(RDS5); if (im) { var s = im[0]; input = input.slice(s.length); output.push(s); } else { throw new Error("Unexpected dot segment condition"); } } } return output.join(""); } function serialize(components) { var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {}; var protocol = options.iri ? IRI_PROTOCOL : URI_PROTOCOL; var uriTokens = []; //find scheme handler var schemeHandler = SCHEMES[(options.scheme || components.scheme || "").toLowerCase()]; //perform scheme specific serialization if (schemeHandler && schemeHandler.serialize) schemeHandler.serialize(components, options); if (components.host) { //if host component is an IPv6 address if (protocol.IPV6ADDRESS.test(components.host)) {} //TODO: normalize IPv6 address as per RFC 5952 //if host component is a domain name else if (options.domainHost || schemeHandler && schemeHandler.domainHost) { //convert IDN via punycode try { components.host = !options.iri ? punycode.toASCII(components.host.replace(protocol.PCT_ENCODED, pctDecChars).toLowerCase()) : punycode.toUnicode(components.host); } catch (e) { components.error = components.error || "Host's domain name can not be converted to " + (!options.iri ? "ASCII" : "Unicode") + " via punycode: " + e; } } } //normalize encoding _normalizeComponentEncoding(components, protocol); if (options.reference !== "suffix" && components.scheme) { uriTokens.push(components.scheme); uriTokens.push(":"); } var authority = _recomposeAuthority(components, options); if (authority !== undefined) { if (options.reference !== "suffix") { uriTokens.push("//"); } uriTokens.push(authority); if (components.path && components.path.charAt(0) !== "/") { uriTokens.push("/"); } } if (components.path !== undefined) { var s = components.path; if (!options.absolutePath && (!schemeHandler || !schemeHandler.absolutePath)) { s = removeDotSegments(s); } if (authority === undefined) { s = s.replace(/^\/\//, "/%2F"); //don't allow the path to start with "//" } uriTokens.push(s); } if (components.query !== undefined) { uriTokens.push("?"); uriTokens.push(components.query); } if (components.fragment !== undefined) { uriTokens.push("#"); uriTokens.push(components.fragment); } return uriTokens.join(""); //merge tokens into a string } function resolveComponents(base, relative) { var options = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : {}; var skipNormalization = arguments[3]; var target = {}; if (!skipNormalization) { base = parse(serialize(base, options), options); //normalize base components relative = parse(serialize(relative, options), options); //normalize relative components } options = options || {}; if (!options.tolerant && relative.scheme) { target.scheme = relative.scheme; //target.authority = relative.authority; target.userinfo = relative.userinfo; target.host = relative.host; target.port = relative.port; target.path = removeDotSegments(relative.path || ""); target.query = relative.query; } else { if (relative.userinfo !== undefined || relative.host !== undefined || relative.port !== undefined) { //target.authority = relative.authority; target.userinfo = relative.userinfo; target.host = relative.host; target.port = relative.port; target.path = removeDotSegments(relative.path || ""); target.query = relative.query; } else { if (!relative.path) { target.path = base.path; if (relative.query !== undefined) { target.query = relative.query; } else { target.query = base.query; } } else { if (relative.path.charAt(0) === "/") { target.path = removeDotSegments(relative.path); } else { if ((base.userinfo !== undefined || base.host !== undefined || base.port !== undefined) && !base.path) { target.path = "/" + relative.path; } else if (!base.path) { target.path = relative.path; } else { target.path = base.path.slice(0, base.path.lastIndexOf("/") + 1) + relative.path; } target.path = removeDotSegments(target.path); } target.query = relative.query; } //target.authority = base.authority; target.userinfo = base.userinfo; target.host = base.host; target.port = base.port; } target.scheme = base.scheme; } target.fragment = relative.fragment; return target; } function resolve(baseURI, relativeURI, options) { var schemelessOptions = assign({ scheme: 'null' }, options); return serialize(resolveComponents(parse(baseURI, schemelessOptions), parse(relativeURI, schemelessOptions), schemelessOptions, true), schemelessOptions); } function normalize(uri, options) { if (typeof uri === "string") { uri = serialize(parse(uri, options), options); } else if (typeOf(uri) === "object") { uri = parse(serialize(uri, options), options); } return uri; } function equal(uriA, uriB, options) { if (typeof uriA === "string") { uriA = serialize(parse(uriA, options), options); } else if (typeOf(uriA) === "object") { uriA = serialize(uriA, options); } if (typeof uriB === "string") { uriB = serialize(parse(uriB, options), options); } else if (typeOf(uriB) === "object") { uriB = serialize(uriB, options); } return uriA === uriB; } function escapeComponent(str, options) { return str && str.toString().replace(!options || !options.iri ? URI_PROTOCOL.ESCAPE : IRI_PROTOCOL.ESCAPE, pctEncChar); } function unescapeComponent(str, options) { return str && str.toString().replace(!options || !options.iri ? URI_PROTOCOL.PCT_ENCODED : IRI_PROTOCOL.PCT_ENCODED, pctDecChars); } var handler = { scheme: "http", domainHost: true, parse: function parse(components, options) { //report missing host if (!components.host) { components.error = components.error || "HTTP URIs must have a host."; } return components; }, serialize: function serialize(components, options) { //normalize the default port if (components.port === (String(components.scheme).toLowerCase() !== "https" ? 80 : 443) || components.port === "") { components.port = undefined; } //normalize the empty path if (!components.path) { components.path = "/"; } //NOTE: We do not parse query strings for HTTP URIs //as WWW Form Url Encoded query strings are part of the HTML4+ spec, //and not the HTTP spec. return components; } }; var handler$1 = { scheme: "https", domainHost: handler.domainHost, parse: handler.parse, serialize: handler.serialize }; var O = {}; var isIRI = true; //RFC 3986 var UNRESERVED$$ = "[A-Za-z0-9\\-\\.\\_\\~" + (isIRI ? "\\xA0-\\u200D\\u2010-\\u2029\\u202F-\\uD7FF\\uF900-\\uFDCF\\uFDF0-\\uFFEF" : "") + "]"; var HEXDIG$$ = "[0-9A-Fa-f]"; //case-insensitive var PCT_ENCODED$ = subexp(subexp("%[EFef]" + HEXDIG$$ + "%" + HEXDIG$$ + HEXDIG$$ + "%" + HEXDIG$$ + HEXDIG$$) + "|" + subexp("%[89A-Fa-f]" + HEXDIG$$ + "%" + HEXDIG$$ + HEXDIG$$) + "|" + subexp("%" + HEXDIG$$ + HEXDIG$$)); //expanded //RFC 5322, except these symbols as per RFC 6068: @ : / ? # [ ] & ; = //const ATEXT$$ = "[A-Za-z0-9\\!\\#\\$\\%\\&\\'\\*\\+\\-\\/\\=\\?\\^\\_\\`\\{\\|\\}\\~]"; //const WSP$$ = "[\\x20\\x09]"; //const OBS_QTEXT$$ = "[\\x01-\\x08\\x0B\\x0C\\x0E-\\x1F\\x7F]"; //(%d1-8 / %d11-12 / %d14-31 / %d127) //const QTEXT$$ = merge("[\\x21\\x23-\\x5B\\x5D-\\x7E]", OBS_QTEXT$$); //%d33 / %d35-91 / %d93-126 / obs-qtext //const VCHAR$$ = "[\\x21-\\x7E]"; //const WSP$$ = "[\\x20\\x09]"; //const OBS_QP$ = subexp("\\\\" + merge("[\\x00\\x0D\\x0A]", OBS_QTEXT$$)); //%d0 / CR / LF / obs-qtext //const FWS$ = subexp(subexp(WSP$$ + "*" + "\\x0D\\x0A") + "?" + WSP$$ + "+"); //const QUOTED_PAIR$ = subexp(subexp("\\\\" + subexp(VCHAR$$ + "|" + WSP$$)) + "|" + OBS_QP$); //const QUOTED_STRING$ = subexp('\\"' + subexp(FWS$ + "?" + QCONTENT$) + "*" + FWS$ + "?" + '\\"'); var ATEXT$$ = "[A-Za-z0-9\\!\\$\\%\\'\\*\\+\\-\\^\\_\\`\\{\\|\\}\\~]"; var QTEXT$$ = "[\\!\\$\\%\\'\\(\\)\\*\\+\\,\\-\\.0-9\\<\\>A-Z\\x5E-\\x7E]"; var VCHAR$$ = merge(QTEXT$$, "[\\\"\\\\]"); var SOME_DELIMS$$ = "[\\!\\$\\'\\(\\)\\*\\+\\,\\;\\:\\@]"; var UNRESERVED = new RegExp(UNRESERVED$$, "g"); var PCT_ENCODED = new RegExp(PCT_ENCODED$, "g"); var NOT_LOCAL_PART = new RegExp(merge("[^]", ATEXT$$, "[\\.]", '[\\"]', VCHAR$$), "g"); var NOT_HFNAME = new RegExp(merge("[^]", UNRESERVED$$, SOME_DELIMS$$), "g"); var NOT_HFVALUE = NOT_HFNAME; function decodeUnreserved(str) { var decStr = pctDecChars(str); return !decStr.match(UNRESERVED) ? str : decStr; } var handler$2 = { scheme: "mailto", parse: function parse$$1(components, options) { var mailtoComponents = components; var to = mailtoComponents.to = mailtoComponents.path ? mailtoComponents.path.split(",") : []; mailtoComponents.path = undefined; if (mailtoComponents.query) { var unknownHeaders = false; var headers = {}; var hfields = mailtoComponents.query.split("&"); for (var x = 0, xl = hfields.length; x < xl; ++x) { var hfield = hfields[x].split("="); switch (hfield[0]) { case "to": var toAddrs = hfield[1].split(","); for (var _x = 0, _xl = toAddrs.length; _x < _xl; ++_x) { to.push(toAddrs[_x]); } break; case "subject": mailtoComponents.subject = unescapeComponent(hfield[1], options); break; case "body": mailtoComponents.body = unescapeComponent(hfield[1], options); break; default: unknownHeaders = true; headers[unescapeComponent(hfield[0], options)] = unescapeComponent(hfield[1], options); break; } } if (unknownHeaders) mailtoComponents.headers = headers; } mailtoComponents.query = undefined; for (var _x2 = 0, _xl2 = to.length; _x2 < _xl2; ++_x2) { var addr = to[_x2].split("@"); addr[0] = unescapeComponent(addr[0]); if (!options.unicodeSupport) { //convert Unicode IDN -> ASCII IDN try { addr[1] = punycode.toASCII(unescapeComponent(addr[1], options).toLowerCase()); } catch (e) { mailtoComponents.error = mailtoComponents.error || "Email address's domain name can not be converted to ASCII via punycode: " + e; } } else { addr[1] = unescapeComponent(addr[1], options).toLowerCase(); } to[_x2] = addr.join("@"); } return mailtoComponents; }, serialize: function serialize$$1(mailtoComponents, options) { var components = mailtoComponents; var to = toArray(mailtoComponents.to); if (to) { for (var x = 0, xl = to.length; x < xl; ++x) { var toAddr = String(to[x]); var atIdx = toAddr.lastIndexOf("@"); var localPart = toAddr.slice(0, atIdx).replace(PCT_ENCODED, decodeUnreserved).replace(PCT_ENCODED, toUpperCase).replace(NOT_LOCAL_PART, pctEncChar); var domain = toAddr.slice(atIdx + 1); //convert IDN via punycode try { domain = !options.iri ? punycode.toASCII(unescapeComponent(domain, options).toLowerCase()) : punycode.toUnicode(domain); } catch (e) { components.error = components.error || "Email address's domain name can not be converted to " + (!options.iri ? "ASCII" : "Unicode") + " via punycode: " + e; } to[x] = localPart + "@" + domain; } components.path = to.join(","); } var headers = mailtoComponents.headers = mailtoComponents.headers || {}; if (mailtoComponents.subject) headers["subject"] = mailtoComponents.subject; if (mailtoComponents.body) headers["body"] = mailtoComponents.body; var fields = []; for (var name in headers) { if (headers[name] !== O[name]) { fields.push(name.replace(PCT_ENCODED, decodeUnreserved).replace(PCT_ENCODED, toUpperCase).replace(NOT_HFNAME, pctEncChar) + "=" + headers[name].replace(PCT_ENCODED, decodeUnreserved).replace(PCT_ENCODED, toUpperCase).replace(NOT_HFVALUE, pctEncChar)); } } if (fields.length) { components.query = fields.join("&"); } return components; } }; var URN_PARSE = /^([^\:]+)\:(.*)/; //RFC 2141 var handler$3 = { scheme: "urn", parse: function parse$$1(components, options) { var matches = components.path && components.path.match(URN_PARSE); var urnComponents = components; if (matches) { var scheme = options.scheme || urnComponents.scheme || "urn"; var nid = matches[1].toLowerCase(); var nss = matches[2]; var urnScheme = scheme + ":" + (options.nid || nid); var schemeHandler = SCHEMES[urnScheme]; urnComponents.nid = nid; urnComponents.nss = nss; urnComponents.path = undefined; if (schemeHandler) { urnComponents = schemeHandler.parse(urnComponents, options); } } else { urnComponents.error = urnComponents.error || "URN can not be parsed."; } return urnComponents; }, serialize: function serialize$$1(urnComponents, options) { var scheme = options.scheme || urnComponents.scheme || "urn"; var nid = urnComponents.nid; var urnScheme = scheme + ":" + (options.nid || nid); var schemeHandler = SCHEMES[urnScheme]; if (schemeHandler) { urnComponents = schemeHandler.serialize(urnComponents, options); } var uriComponents = urnComponents; var nss = urnComponents.nss; uriComponents.path = (nid || options.nid) + ":" + nss; return uriComponents; } }; var UUID = /^[0-9A-Fa-f]{8}(?:\-[0-9A-Fa-f]{4}){3}\-[0-9A-Fa-f]{12}$/; //RFC 4122 var handler$4 = { scheme: "urn:uuid", parse: function parse(urnComponents, options) { var uuidComponents = urnComponents; uuidComponents.uuid = uuidComponents.nss; uuidComponents.nss = undefined; if (!options.tolerant && (!uuidComponents.uuid || !uuidComponents.uuid.match(UUID))) { uuidComponents.error = uuidComponents.error || "UUID is not valid."; } return uuidComponents; }, serialize: function serialize(uuidComponents, options) { var urnComponents = uuidComponents; //normalize UUID urnComponents.nss = (uuidComponents.uuid || "").toLowerCase(); return urnComponents; } }; SCHEMES[handler.scheme] = handler; SCHEMES[handler$1.scheme] = handler$1; SCHEMES[handler$2.scheme] = handler$2; SCHEMES[handler$3.scheme] = handler$3; SCHEMES[handler$4.scheme] = handler$4; exports.SCHEMES = SCHEMES; exports.pctEncChar = pctEncChar; exports.pctDecChars = pctDecChars; exports.parse = parse; exports.removeDotSegments = removeDotSegments; exports.serialize = serialize; exports.resolveComponents = resolveComponents; exports.resolve = resolve; exports.normalize = normalize; exports.equal = equal; exports.escapeComponent = escapeComponent; exports.unescapeComponent = unescapeComponent; Object.defineProperty(exports, '__esModule', { value: true }); }))); //# sourceMappingURL=uri.all.js.map
zhihu-crawler
/zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/uri-js/dist/es5/uri.all.js
uri.all.js
export interface URIComponents { scheme?: string; userinfo?: string; host?: string; port?: number | string; path?: string; query?: string; fragment?: string; reference?: string; error?: string; } export interface URIOptions { scheme?: string; reference?: string; tolerant?: boolean; absolutePath?: boolean; iri?: boolean; unicodeSupport?: boolean; domainHost?: boolean; } export interface URISchemeHandler<Components extends URIComponents = URIComponents, Options extends URIOptions = URIOptions, ParentComponents extends URIComponents = URIComponents> { scheme: string; parse(components: ParentComponents, options: Options): Components; serialize(components: Components, options: Options): ParentComponents; unicodeSupport?: boolean; domainHost?: boolean; absolutePath?: boolean; } export interface URIRegExps { NOT_SCHEME: RegExp; NOT_USERINFO: RegExp; NOT_HOST: RegExp; NOT_PATH: RegExp; NOT_PATH_NOSCHEME: RegExp; NOT_QUERY: RegExp; NOT_FRAGMENT: RegExp; ESCAPE: RegExp; UNRESERVED: RegExp; OTHER_CHARS: RegExp; PCT_ENCODED: RegExp; IPV4ADDRESS: RegExp; IPV6ADDRESS: RegExp; } export declare const SCHEMES: { [scheme: string]: URISchemeHandler; }; export declare function pctEncChar(chr: string): string; export declare function pctDecChars(str: string): string; export declare function parse(uriString: string, options?: URIOptions): URIComponents; export declare function removeDotSegments(input: string): string; export declare function serialize(components: URIComponents, options?: URIOptions): string; export declare function resolveComponents(base: URIComponents, relative: URIComponents, options?: URIOptions, skipNormalization?: boolean): URIComponents; export declare function resolve(baseURI: string, relativeURI: string, options?: URIOptions): string; export declare function normalize(uri: string, options?: URIOptions): string; export declare function normalize(uri: URIComponents, options?: URIOptions): URIComponents; export declare function equal(uriA: string, uriB: string, options?: URIOptions): boolean; export declare function equal(uriA: URIComponents, uriB: URIComponents, options?: URIOptions): boolean; export declare function escapeComponent(str: string, options?: URIOptions): string; export declare function unescapeComponent(str: string, options?: URIOptions): string;
zhihu-crawler
/zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/uri-js/dist/es5/uri.all.min.d.ts
uri.all.min.d.ts
* Copyright 2011 Gary Court. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are * permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, this list * of conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY GARY COURT ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GARY COURT OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are those of the * authors and should not be interpreted as representing official policies, either expressed * or implied, of Gary Court. */ import URI_PROTOCOL from "uri-js/dist/esnext/regexps-uri"; import IRI_PROTOCOL from "uri-js/dist/esnext/regexps-iri"; import punycode from "punycode"; import { toUpperCase, typeOf, assign } from "uri-js/dist/esnext/util"; export const SCHEMES = {}; export function pctEncChar(chr) { const c = chr.charCodeAt(0); let e; if (c < 16) e = "%0" + c.toString(16).toUpperCase(); else if (c < 128) e = "%" + c.toString(16).toUpperCase(); else if (c < 2048) e = "%" + ((c >> 6) | 192).toString(16).toUpperCase() + "%" + ((c & 63) | 128).toString(16).toUpperCase(); else e = "%" + ((c >> 12) | 224).toString(16).toUpperCase() + "%" + (((c >> 6) & 63) | 128).toString(16).toUpperCase() + "%" + ((c & 63) | 128).toString(16).toUpperCase(); return e; } export function pctDecChars(str) { let newStr = ""; let i = 0; const il = str.length; while (i < il) { const c = parseInt(str.substr(i + 1, 2), 16); if (c < 128) { newStr += String.fromCharCode(c); i += 3; } else if (c >= 194 && c < 224) { if ((il - i) >= 6) { const c2 = parseInt(str.substr(i + 4, 2), 16); newStr += String.fromCharCode(((c & 31) << 6) | (c2 & 63)); } else { newStr += str.substr(i, 6); } i += 6; } else if (c >= 224) { if ((il - i) >= 9) { const c2 = parseInt(str.substr(i + 4, 2), 16); const c3 = parseInt(str.substr(i + 7, 2), 16); newStr += String.fromCharCode(((c & 15) << 12) | ((c2 & 63) << 6) | (c3 & 63)); } else { newStr += str.substr(i, 9); } i += 9; } else { newStr += str.substr(i, 3); i += 3; } } return newStr; } function _normalizeComponentEncoding(components, protocol) { function decodeUnreserved(str) { const decStr = pctDecChars(str); return (!decStr.match(protocol.UNRESERVED) ? str : decStr); } if (components.scheme) components.scheme = String(components.scheme).replace(protocol.PCT_ENCODED, decodeUnreserved).toLowerCase().replace(protocol.NOT_SCHEME, ""); if (components.userinfo !== undefined) components.userinfo = String(components.userinfo).replace(protocol.PCT_ENCODED, decodeUnreserved).replace(protocol.NOT_USERINFO, pctEncChar).replace(protocol.PCT_ENCODED, toUpperCase); if (components.host !== undefined) components.host = String(components.host).replace(protocol.PCT_ENCODED, decodeUnreserved).toLowerCase().replace(protocol.NOT_HOST, pctEncChar).replace(protocol.PCT_ENCODED, toUpperCase); if (components.path !== undefined) components.path = String(components.path).replace(protocol.PCT_ENCODED, decodeUnreserved).replace((components.scheme ? protocol.NOT_PATH : protocol.NOT_PATH_NOSCHEME), pctEncChar).replace(protocol.PCT_ENCODED, toUpperCase); if (components.query !== undefined) components.query = String(components.query).replace(protocol.PCT_ENCODED, decodeUnreserved).replace(protocol.NOT_QUERY, pctEncChar).replace(protocol.PCT_ENCODED, toUpperCase); if (components.fragment !== undefined) components.fragment = String(components.fragment).replace(protocol.PCT_ENCODED, decodeUnreserved).replace(protocol.NOT_FRAGMENT, pctEncChar).replace(protocol.PCT_ENCODED, toUpperCase); return components; } ; function _stripLeadingZeros(str) { return str.replace(/^0*(.*)/, "$1") || "0"; } function _normalizeIPv4(host, protocol) { const matches = host.match(protocol.IPV4ADDRESS) || []; const [, address] = matches; if (address) { return address.split(".").map(_stripLeadingZeros).join("."); } else { return host; } } function _normalizeIPv6(host, protocol) { const matches = host.match(protocol.IPV6ADDRESS) || []; const [, address, zone] = matches; if (address) { const [last, first] = address.toLowerCase().split('::').reverse(); const firstFields = first ? first.split(":").map(_stripLeadingZeros) : []; const lastFields = last.split(":").map(_stripLeadingZeros); const isLastFieldIPv4Address = protocol.IPV4ADDRESS.test(lastFields[lastFields.length - 1]); const fieldCount = isLastFieldIPv4Address ? 7 : 8; const lastFieldsStart = lastFields.length - fieldCount; const fields = Array(fieldCount); for (let x = 0; x < fieldCount; ++x) { fields[x] = firstFields[x] || lastFields[lastFieldsStart + x] || ''; } if (isLastFieldIPv4Address) { fields[fieldCount - 1] = _normalizeIPv4(fields[fieldCount - 1], protocol); } const allZeroFields = fields.reduce((acc, field, index) => { if (!field || field === "0") { const lastLongest = acc[acc.length - 1]; if (lastLongest && lastLongest.index + lastLongest.length === index) { lastLongest.length++; } else { acc.push({ index, length: 1 }); } } return acc; }, []); const longestZeroFields = allZeroFields.sort((a, b) => b.length - a.length)[0]; let newHost; if (longestZeroFields && longestZeroFields.length > 1) { const newFirst = fields.slice(0, longestZeroFields.index); const newLast = fields.slice(longestZeroFields.index + longestZeroFields.length); newHost = newFirst.join(":") + "::" + newLast.join(":"); } else { newHost = fields.join(":"); } if (zone) { newHost += "%" + zone; } return newHost; } else { return host; } } const URI_PARSE = /^(?:([^:\/?#]+):)?(?:\/\/((?:([^\/?#@]*)@)?(\[[^\/?#\]]+\]|[^\/?#:]*)(?:\:(\d*))?))?([^?#]*)(?:\?([^#]*))?(?:#((?:.|\n|\r)*))?/i; const NO_MATCH_IS_UNDEFINED = ("").match(/(){0}/)[1] === undefined; export function parse(uriString, options = {}) { const components = {}; const protocol = (options.iri !== false ? IRI_PROTOCOL : URI_PROTOCOL); if (options.reference === "suffix") uriString = (options.scheme ? options.scheme + ":" : "") + "//" + uriString; const matches = uriString.match(URI_PARSE); if (matches) { if (NO_MATCH_IS_UNDEFINED) { //store each component components.scheme = matches[1]; components.userinfo = matches[3]; components.host = matches[4]; components.port = parseInt(matches[5], 10); components.path = matches[6] || ""; components.query = matches[7]; components.fragment = matches[8]; //fix port number if (isNaN(components.port)) { components.port = matches[5]; } } else { //IE FIX for improper RegExp matching //store each component components.scheme = matches[1] || undefined; components.userinfo = (uriString.indexOf("@") !== -1 ? matches[3] : undefined); components.host = (uriString.indexOf("//") !== -1 ? matches[4] : undefined); components.port = parseInt(matches[5], 10); components.path = matches[6] || ""; components.query = (uriString.indexOf("?") !== -1 ? matches[7] : undefined); components.fragment = (uriString.indexOf("#") !== -1 ? matches[8] : undefined); //fix port number if (isNaN(components.port)) { components.port = (uriString.match(/\/\/(?:.|\n)*\:(?:\/|\?|\#|$)/) ? matches[4] : undefined); } } if (components.host) { //normalize IP hosts components.host = _normalizeIPv6(_normalizeIPv4(components.host, protocol), protocol); } //determine reference type if (components.scheme === undefined && components.userinfo === undefined && components.host === undefined && components.port === undefined && !components.path && components.query === undefined) { components.reference = "same-document"; } else if (components.scheme === undefined) { components.reference = "relative"; } else if (components.fragment === undefined) { components.reference = "absolute"; } else { components.reference = "uri"; } //check for reference errors if (options.reference && options.reference !== "suffix" && options.reference !== components.reference) { components.error = components.error || "URI is not a " + options.reference + " reference."; } //find scheme handler const schemeHandler = SCHEMES[(options.scheme || components.scheme || "").toLowerCase()]; //check if scheme can't handle IRIs if (!options.unicodeSupport && (!schemeHandler || !schemeHandler.unicodeSupport)) { //if host component is a domain name if (components.host && (options.domainHost || (schemeHandler && schemeHandler.domainHost))) { //convert Unicode IDN -> ASCII IDN try { components.host = punycode.toASCII(components.host.replace(protocol.PCT_ENCODED, pctDecChars).toLowerCase()); } catch (e) { components.error = components.error || "Host's domain name can not be converted to ASCII via punycode: " + e; } } //convert IRI -> URI _normalizeComponentEncoding(components, URI_PROTOCOL); } else { //normalize encodings _normalizeComponentEncoding(components, protocol); } //perform scheme specific parsing if (schemeHandler && schemeHandler.parse) { schemeHandler.parse(components, options); } } else { components.error = components.error || "URI can not be parsed."; } return components; } ; function _recomposeAuthority(components, options) { const protocol = (options.iri !== false ? IRI_PROTOCOL : URI_PROTOCOL); const uriTokens = []; if (components.userinfo !== undefined) { uriTokens.push(components.userinfo); uriTokens.push("@"); } if (components.host !== undefined) { //normalize IP hosts, add brackets and escape zone separator for IPv6 uriTokens.push(_normalizeIPv6(_normalizeIPv4(String(components.host), protocol), protocol).replace(protocol.IPV6ADDRESS, (_, $1, $2) => "[" + $1 + ($2 ? "%25" + $2 : "") + "]")); } if (typeof components.port === "number") { uriTokens.push(":"); uriTokens.push(components.port.toString(10)); } return uriTokens.length ? uriTokens.join("") : undefined; } ; const RDS1 = /^\.\.?\//; const RDS2 = /^\/\.(\/|$)/; const RDS3 = /^\/\.\.(\/|$)/; const RDS4 = /^\.\.?$/; const RDS5 = /^\/?(?:.|\n)*?(?=\/|$)/; export function removeDotSegments(input) { const output = []; while (input.length) { if (input.match(RDS1)) { input = input.replace(RDS1, ""); } else if (input.match(RDS2)) { input = input.replace(RDS2, "/"); } else if (input.match(RDS3)) { input = input.replace(RDS3, "/"); output.pop(); } else if (input === "." || input === "..") { input = ""; } else { const im = input.match(RDS5); if (im) { const s = im[0]; input = input.slice(s.length); output.push(s); } else { throw new Error("Unexpected dot segment condition"); } } } return output.join(""); } ; export function serialize(components, options = {}) { const protocol = (options.iri ? IRI_PROTOCOL : URI_PROTOCOL); const uriTokens = []; //find scheme handler const schemeHandler = SCHEMES[(options.scheme || components.scheme || "").toLowerCase()]; //perform scheme specific serialization if (schemeHandler && schemeHandler.serialize) schemeHandler.serialize(components, options); if (components.host) { //if host component is an IPv6 address if (protocol.IPV6ADDRESS.test(components.host)) { //TODO: normalize IPv6 address as per RFC 5952 } //if host component is a domain name else if (options.domainHost || (schemeHandler && schemeHandler.domainHost)) { //convert IDN via punycode try { components.host = (!options.iri ? punycode.toASCII(components.host.replace(protocol.PCT_ENCODED, pctDecChars).toLowerCase()) : punycode.toUnicode(components.host)); } catch (e) { components.error = components.error || "Host's domain name can not be converted to " + (!options.iri ? "ASCII" : "Unicode") + " via punycode: " + e; } } } //normalize encoding _normalizeComponentEncoding(components, protocol); if (options.reference !== "suffix" && components.scheme) { uriTokens.push(components.scheme); uriTokens.push(":"); } const authority = _recomposeAuthority(components, options); if (authority !== undefined) { if (options.reference !== "suffix") { uriTokens.push("//"); } uriTokens.push(authority); if (components.path && components.path.charAt(0) !== "/") { uriTokens.push("/"); } } if (components.path !== undefined) { let s = components.path; if (!options.absolutePath && (!schemeHandler || !schemeHandler.absolutePath)) { s = removeDotSegments(s); } if (authority === undefined) { s = s.replace(/^\/\//, "/%2F"); //don't allow the path to start with "//" } uriTokens.push(s); } if (components.query !== undefined) { uriTokens.push("?"); uriTokens.push(components.query); } if (components.fragment !== undefined) { uriTokens.push("#"); uriTokens.push(components.fragment); } return uriTokens.join(""); //merge tokens into a string } ; export function resolveComponents(base, relative, options = {}, skipNormalization) { const target = {}; if (!skipNormalization) { base = parse(serialize(base, options), options); //normalize base components relative = parse(serialize(relative, options), options); //normalize relative components } options = options || {}; if (!options.tolerant && relative.scheme) { target.scheme = relative.scheme; //target.authority = relative.authority; target.userinfo = relative.userinfo; target.host = relative.host; target.port = relative.port; target.path = removeDotSegments(relative.path || ""); target.query = relative.query; } else { if (relative.userinfo !== undefined || relative.host !== undefined || relative.port !== undefined) { //target.authority = relative.authority; target.userinfo = relative.userinfo; target.host = relative.host; target.port = relative.port; target.path = removeDotSegments(relative.path || ""); target.query = relative.query; } else { if (!relative.path) { target.path = base.path; if (relative.query !== undefined) { target.query = relative.query; } else { target.query = base.query; } } else { if (relative.path.charAt(0) === "/") { target.path = removeDotSegments(relative.path); } else { if ((base.userinfo !== undefined || base.host !== undefined || base.port !== undefined) && !base.path) { target.path = "/" + relative.path; } else if (!base.path) { target.path = relative.path; } else { target.path = base.path.slice(0, base.path.lastIndexOf("/") + 1) + relative.path; } target.path = removeDotSegments(target.path); } target.query = relative.query; } //target.authority = base.authority; target.userinfo = base.userinfo; target.host = base.host; target.port = base.port; } target.scheme = base.scheme; } target.fragment = relative.fragment; return target; } ; export function resolve(baseURI, relativeURI, options) { const schemelessOptions = assign({ scheme: 'null' }, options); return serialize(resolveComponents(parse(baseURI, schemelessOptions), parse(relativeURI, schemelessOptions), schemelessOptions, true), schemelessOptions); } ; export function normalize(uri, options) { if (typeof uri === "string") { uri = serialize(parse(uri, options), options); } else if (typeOf(uri) === "object") { uri = parse(serialize(uri, options), options); } return uri; } ; export function equal(uriA, uriB, options) { if (typeof uriA === "string") { uriA = serialize(parse(uriA, options), options); } else if (typeOf(uriA) === "object") { uriA = serialize(uriA, options); } if (typeof uriB === "string") { uriB = serialize(parse(uriB, options), options); } else if (typeOf(uriB) === "object") { uriB = serialize(uriB, options); } return uriA === uriB; } ; export function escapeComponent(str, options) { return str && str.toString().replace((!options || !options.iri ? URI_PROTOCOL.ESCAPE : IRI_PROTOCOL.ESCAPE), pctEncChar); } ; export function unescapeComponent(str, options) { return str && str.toString().replace((!options || !options.iri ? URI_PROTOCOL.PCT_ENCODED : IRI_PROTOCOL.PCT_ENCODED), pctDecChars); } ; //# sourceMappingURL=uri.js.map
zhihu-crawler
/zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/uri-js/dist/esnext/uri.js
uri.js
export interface URIComponents { scheme?: string; userinfo?: string; host?: string; port?: number | string; path?: string; query?: string; fragment?: string; reference?: string; error?: string; } export interface URIOptions { scheme?: string; reference?: string; tolerant?: boolean; absolutePath?: boolean; iri?: boolean; unicodeSupport?: boolean; domainHost?: boolean; } export interface URISchemeHandler<Components extends URIComponents = URIComponents, Options extends URIOptions = URIOptions, ParentComponents extends URIComponents = URIComponents> { scheme: string; parse(components: ParentComponents, options: Options): Components; serialize(components: Components, options: Options): ParentComponents; unicodeSupport?: boolean; domainHost?: boolean; absolutePath?: boolean; } export interface URIRegExps { NOT_SCHEME: RegExp; NOT_USERINFO: RegExp; NOT_HOST: RegExp; NOT_PATH: RegExp; NOT_PATH_NOSCHEME: RegExp; NOT_QUERY: RegExp; NOT_FRAGMENT: RegExp; ESCAPE: RegExp; UNRESERVED: RegExp; OTHER_CHARS: RegExp; PCT_ENCODED: RegExp; IPV4ADDRESS: RegExp; IPV6ADDRESS: RegExp; } export declare const SCHEMES: { [scheme: string]: URISchemeHandler; }; export declare function pctEncChar(chr: string): string; export declare function pctDecChars(str: string): string; export declare function parse(uriString: string, options?: URIOptions): URIComponents; export declare function removeDotSegments(input: string): string; export declare function serialize(components: URIComponents, options?: URIOptions): string; export declare function resolveComponents(base: URIComponents, relative: URIComponents, options?: URIOptions, skipNormalization?: boolean): URIComponents; export declare function resolve(baseURI: string, relativeURI: string, options?: URIOptions): string; export declare function normalize(uri: string, options?: URIOptions): string; export declare function normalize(uri: URIComponents, options?: URIOptions): URIComponents; export declare function equal(uriA: string, uriB: string, options?: URIOptions): boolean; export declare function equal(uriA: URIComponents, uriB: URIComponents, options?: URIOptions): boolean; export declare function escapeComponent(str: string, options?: URIOptions): string; export declare function unescapeComponent(str: string, options?: URIOptions): string;
zhihu-crawler
/zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/uri-js/dist/esnext/uri.d.ts
uri.d.ts
import { merge, subexp } from "uri-js/dist/esnext/util"; export function buildExps(isIRI) { const ALPHA$$ = "[A-Za-z]", CR$ = "[\\x0D]", DIGIT$$ = "[0-9]", DQUOTE$$ = "[\\x22]", HEXDIG$$ = merge(DIGIT$$, "[A-Fa-f]"), //case-insensitive LF$$ = "[\\x0A]", SP$$ = "[\\x20]", PCT_ENCODED$ = subexp(subexp("%[EFef]" + HEXDIG$$ + "%" + HEXDIG$$ + HEXDIG$$ + "%" + HEXDIG$$ + HEXDIG$$) + "|" + subexp("%[89A-Fa-f]" + HEXDIG$$ + "%" + HEXDIG$$ + HEXDIG$$) + "|" + subexp("%" + HEXDIG$$ + HEXDIG$$)), //expanded GEN_DELIMS$$ = "[\\:\\/\\?\\#\\[\\]\\@]", SUB_DELIMS$$ = "[\\!\\$\\&\\'\\(\\)\\*\\+\\,\\;\\=]", RESERVED$$ = merge(GEN_DELIMS$$, SUB_DELIMS$$), UCSCHAR$$ = isIRI ? "[\\xA0-\\u200D\\u2010-\\u2029\\u202F-\\uD7FF\\uF900-\\uFDCF\\uFDF0-\\uFFEF]" : "[]", //subset, excludes bidi control characters IPRIVATE$$ = isIRI ? "[\\uE000-\\uF8FF]" : "[]", //subset UNRESERVED$$ = merge(ALPHA$$, DIGIT$$, "[\\-\\.\\_\\~]", UCSCHAR$$), SCHEME$ = subexp(ALPHA$$ + merge(ALPHA$$, DIGIT$$, "[\\+\\-\\.]") + "*"), USERINFO$ = subexp(subexp(PCT_ENCODED$ + "|" + merge(UNRESERVED$$, SUB_DELIMS$$, "[\\:]")) + "*"), DEC_OCTET$ = subexp(subexp("25[0-5]") + "|" + subexp("2[0-4]" + DIGIT$$) + "|" + subexp("1" + DIGIT$$ + DIGIT$$) + "|" + subexp("[1-9]" + DIGIT$$) + "|" + DIGIT$$), DEC_OCTET_RELAXED$ = subexp(subexp("25[0-5]") + "|" + subexp("2[0-4]" + DIGIT$$) + "|" + subexp("1" + DIGIT$$ + DIGIT$$) + "|" + subexp("0?[1-9]" + DIGIT$$) + "|0?0?" + DIGIT$$), //relaxed parsing rules IPV4ADDRESS$ = subexp(DEC_OCTET_RELAXED$ + "\\." + DEC_OCTET_RELAXED$ + "\\." + DEC_OCTET_RELAXED$ + "\\." + DEC_OCTET_RELAXED$), H16$ = subexp(HEXDIG$$ + "{1,4}"), LS32$ = subexp(subexp(H16$ + "\\:" + H16$) + "|" + IPV4ADDRESS$), IPV6ADDRESS1$ = subexp(subexp(H16$ + "\\:") + "{6}" + LS32$), // 6( h16 ":" ) ls32 IPV6ADDRESS2$ = subexp("\\:\\:" + subexp(H16$ + "\\:") + "{5}" + LS32$), // "::" 5( h16 ":" ) ls32 IPV6ADDRESS3$ = subexp(subexp(H16$) + "?\\:\\:" + subexp(H16$ + "\\:") + "{4}" + LS32$), //[ h16 ] "::" 4( h16 ":" ) ls32 IPV6ADDRESS4$ = subexp(subexp(subexp(H16$ + "\\:") + "{0,1}" + H16$) + "?\\:\\:" + subexp(H16$ + "\\:") + "{3}" + LS32$), //[ *1( h16 ":" ) h16 ] "::" 3( h16 ":" ) ls32 IPV6ADDRESS5$ = subexp(subexp(subexp(H16$ + "\\:") + "{0,2}" + H16$) + "?\\:\\:" + subexp(H16$ + "\\:") + "{2}" + LS32$), //[ *2( h16 ":" ) h16 ] "::" 2( h16 ":" ) ls32 IPV6ADDRESS6$ = subexp(subexp(subexp(H16$ + "\\:") + "{0,3}" + H16$) + "?\\:\\:" + H16$ + "\\:" + LS32$), //[ *3( h16 ":" ) h16 ] "::" h16 ":" ls32 IPV6ADDRESS7$ = subexp(subexp(subexp(H16$ + "\\:") + "{0,4}" + H16$) + "?\\:\\:" + LS32$), //[ *4( h16 ":" ) h16 ] "::" ls32 IPV6ADDRESS8$ = subexp(subexp(subexp(H16$ + "\\:") + "{0,5}" + H16$) + "?\\:\\:" + H16$), //[ *5( h16 ":" ) h16 ] "::" h16 IPV6ADDRESS9$ = subexp(subexp(subexp(H16$ + "\\:") + "{0,6}" + H16$) + "?\\:\\:"), //[ *6( h16 ":" ) h16 ] "::" IPV6ADDRESS$ = subexp([IPV6ADDRESS1$, IPV6ADDRESS2$, IPV6ADDRESS3$, IPV6ADDRESS4$, IPV6ADDRESS5$, IPV6ADDRESS6$, IPV6ADDRESS7$, IPV6ADDRESS8$, IPV6ADDRESS9$].join("|")), ZONEID$ = subexp(subexp(UNRESERVED$$ + "|" + PCT_ENCODED$) + "+"), //RFC 6874 IPV6ADDRZ$ = subexp(IPV6ADDRESS$ + "\\%25" + ZONEID$), //RFC 6874 IPV6ADDRZ_RELAXED$ = subexp(IPV6ADDRESS$ + subexp("\\%25|\\%(?!" + HEXDIG$$ + "{2})") + ZONEID$), //RFC 6874, with relaxed parsing rules IPVFUTURE$ = subexp("[vV]" + HEXDIG$$ + "+\\." + merge(UNRESERVED$$, SUB_DELIMS$$, "[\\:]") + "+"), IP_LITERAL$ = subexp("\\[" + subexp(IPV6ADDRZ_RELAXED$ + "|" + IPV6ADDRESS$ + "|" + IPVFUTURE$) + "\\]"), //RFC 6874 REG_NAME$ = subexp(subexp(PCT_ENCODED$ + "|" + merge(UNRESERVED$$, SUB_DELIMS$$)) + "*"), HOST$ = subexp(IP_LITERAL$ + "|" + IPV4ADDRESS$ + "(?!" + REG_NAME$ + ")" + "|" + REG_NAME$), PORT$ = subexp(DIGIT$$ + "*"), AUTHORITY$ = subexp(subexp(USERINFO$ + "@") + "?" + HOST$ + subexp("\\:" + PORT$) + "?"), PCHAR$ = subexp(PCT_ENCODED$ + "|" + merge(UNRESERVED$$, SUB_DELIMS$$, "[\\:\\@]")), SEGMENT$ = subexp(PCHAR$ + "*"), SEGMENT_NZ$ = subexp(PCHAR$ + "+"), SEGMENT_NZ_NC$ = subexp(subexp(PCT_ENCODED$ + "|" + merge(UNRESERVED$$, SUB_DELIMS$$, "[\\@]")) + "+"), PATH_ABEMPTY$ = subexp(subexp("\\/" + SEGMENT$) + "*"), PATH_ABSOLUTE$ = subexp("\\/" + subexp(SEGMENT_NZ$ + PATH_ABEMPTY$) + "?"), //simplified PATH_NOSCHEME$ = subexp(SEGMENT_NZ_NC$ + PATH_ABEMPTY$), //simplified PATH_ROOTLESS$ = subexp(SEGMENT_NZ$ + PATH_ABEMPTY$), //simplified PATH_EMPTY$ = "(?!" + PCHAR$ + ")", PATH$ = subexp(PATH_ABEMPTY$ + "|" + PATH_ABSOLUTE$ + "|" + PATH_NOSCHEME$ + "|" + PATH_ROOTLESS$ + "|" + PATH_EMPTY$), QUERY$ = subexp(subexp(PCHAR$ + "|" + merge("[\\/\\?]", IPRIVATE$$)) + "*"), FRAGMENT$ = subexp(subexp(PCHAR$ + "|[\\/\\?]") + "*"), HIER_PART$ = subexp(subexp("\\/\\/" + AUTHORITY$ + PATH_ABEMPTY$) + "|" + PATH_ABSOLUTE$ + "|" + PATH_ROOTLESS$ + "|" + PATH_EMPTY$), URI$ = subexp(SCHEME$ + "\\:" + HIER_PART$ + subexp("\\?" + QUERY$) + "?" + subexp("\\#" + FRAGMENT$) + "?"), RELATIVE_PART$ = subexp(subexp("\\/\\/" + AUTHORITY$ + PATH_ABEMPTY$) + "|" + PATH_ABSOLUTE$ + "|" + PATH_NOSCHEME$ + "|" + PATH_EMPTY$), RELATIVE$ = subexp(RELATIVE_PART$ + subexp("\\?" + QUERY$) + "?" + subexp("\\#" + FRAGMENT$) + "?"), URI_REFERENCE$ = subexp(URI$ + "|" + RELATIVE$), ABSOLUTE_URI$ = subexp(SCHEME$ + "\\:" + HIER_PART$ + subexp("\\?" + QUERY$) + "?"), GENERIC_REF$ = "^(" + SCHEME$ + ")\\:" + subexp(subexp("\\/\\/(" + subexp("(" + USERINFO$ + ")@") + "?(" + HOST$ + ")" + subexp("\\:(" + PORT$ + ")") + "?)") + "?(" + PATH_ABEMPTY$ + "|" + PATH_ABSOLUTE$ + "|" + PATH_ROOTLESS$ + "|" + PATH_EMPTY$ + ")") + subexp("\\?(" + QUERY$ + ")") + "?" + subexp("\\#(" + FRAGMENT$ + ")") + "?$", RELATIVE_REF$ = "^(){0}" + subexp(subexp("\\/\\/(" + subexp("(" + USERINFO$ + ")@") + "?(" + HOST$ + ")" + subexp("\\:(" + PORT$ + ")") + "?)") + "?(" + PATH_ABEMPTY$ + "|" + PATH_ABSOLUTE$ + "|" + PATH_NOSCHEME$ + "|" + PATH_EMPTY$ + ")") + subexp("\\?(" + QUERY$ + ")") + "?" + subexp("\\#(" + FRAGMENT$ + ")") + "?$", ABSOLUTE_REF$ = "^(" + SCHEME$ + ")\\:" + subexp(subexp("\\/\\/(" + subexp("(" + USERINFO$ + ")@") + "?(" + HOST$ + ")" + subexp("\\:(" + PORT$ + ")") + "?)") + "?(" + PATH_ABEMPTY$ + "|" + PATH_ABSOLUTE$ + "|" + PATH_ROOTLESS$ + "|" + PATH_EMPTY$ + ")") + subexp("\\?(" + QUERY$ + ")") + "?$", SAMEDOC_REF$ = "^" + subexp("\\#(" + FRAGMENT$ + ")") + "?$", AUTHORITY_REF$ = "^" + subexp("(" + USERINFO$ + ")@") + "?(" + HOST$ + ")" + subexp("\\:(" + PORT$ + ")") + "?$"; return { NOT_SCHEME: new RegExp(merge("[^]", ALPHA$$, DIGIT$$, "[\\+\\-\\.]"), "g"), NOT_USERINFO: new RegExp(merge("[^\\%\\:]", UNRESERVED$$, SUB_DELIMS$$), "g"), NOT_HOST: new RegExp(merge("[^\\%\\[\\]\\:]", UNRESERVED$$, SUB_DELIMS$$), "g"), NOT_PATH: new RegExp(merge("[^\\%\\/\\:\\@]", UNRESERVED$$, SUB_DELIMS$$), "g"), NOT_PATH_NOSCHEME: new RegExp(merge("[^\\%\\/\\@]", UNRESERVED$$, SUB_DELIMS$$), "g"), NOT_QUERY: new RegExp(merge("[^\\%]", UNRESERVED$$, SUB_DELIMS$$, "[\\:\\@\\/\\?]", IPRIVATE$$), "g"), NOT_FRAGMENT: new RegExp(merge("[^\\%]", UNRESERVED$$, SUB_DELIMS$$, "[\\:\\@\\/\\?]"), "g"), ESCAPE: new RegExp(merge("[^]", UNRESERVED$$, SUB_DELIMS$$), "g"), UNRESERVED: new RegExp(UNRESERVED$$, "g"), OTHER_CHARS: new RegExp(merge("[^\\%]", UNRESERVED$$, RESERVED$$), "g"), PCT_ENCODED: new RegExp(PCT_ENCODED$, "g"), IPV4ADDRESS: new RegExp("^(" + IPV4ADDRESS$ + ")$"), IPV6ADDRESS: new RegExp("^\\[?(" + IPV6ADDRESS$ + ")" + subexp(subexp("\\%25|\\%(?!" + HEXDIG$$ + "{2})") + "(" + ZONEID$ + ")") + "?\\]?$") //RFC 6874, with relaxed parsing rules }; } export default buildExps(false); //# sourceMappingURL=regexps-uri.js.map
zhihu-crawler
/zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/uri-js/dist/esnext/regexps-uri.js
regexps-uri.js
import { pctEncChar, pctDecChars, unescapeComponent } from "uri-js/dist/esnext/uri"; import punycode from "punycode"; import { merge, subexp, toUpperCase, toArray } from "uri-js/dist/esnext/util"; const O = {}; const isIRI = true; //RFC 3986 const UNRESERVED$$ = "[A-Za-z0-9\\-\\.\\_\\~" + (isIRI ? "\\xA0-\\u200D\\u2010-\\u2029\\u202F-\\uD7FF\\uF900-\\uFDCF\\uFDF0-\\uFFEF" : "") + "]"; const HEXDIG$$ = "[0-9A-Fa-f]"; //case-insensitive const PCT_ENCODED$ = subexp(subexp("%[EFef]" + HEXDIG$$ + "%" + HEXDIG$$ + HEXDIG$$ + "%" + HEXDIG$$ + HEXDIG$$) + "|" + subexp("%[89A-Fa-f]" + HEXDIG$$ + "%" + HEXDIG$$ + HEXDIG$$) + "|" + subexp("%" + HEXDIG$$ + HEXDIG$$)); //expanded //RFC 5322, except these symbols as per RFC 6068: @ : / ? # [ ] & ; = //const ATEXT$$ = "[A-Za-z0-9\\!\\#\\$\\%\\&\\'\\*\\+\\-\\/\\=\\?\\^\\_\\`\\{\\|\\}\\~]"; //const WSP$$ = "[\\x20\\x09]"; //const OBS_QTEXT$$ = "[\\x01-\\x08\\x0B\\x0C\\x0E-\\x1F\\x7F]"; //(%d1-8 / %d11-12 / %d14-31 / %d127) //const QTEXT$$ = merge("[\\x21\\x23-\\x5B\\x5D-\\x7E]", OBS_QTEXT$$); //%d33 / %d35-91 / %d93-126 / obs-qtext //const VCHAR$$ = "[\\x21-\\x7E]"; //const WSP$$ = "[\\x20\\x09]"; //const OBS_QP$ = subexp("\\\\" + merge("[\\x00\\x0D\\x0A]", OBS_QTEXT$$)); //%d0 / CR / LF / obs-qtext //const FWS$ = subexp(subexp(WSP$$ + "*" + "\\x0D\\x0A") + "?" + WSP$$ + "+"); //const QUOTED_PAIR$ = subexp(subexp("\\\\" + subexp(VCHAR$$ + "|" + WSP$$)) + "|" + OBS_QP$); //const QUOTED_STRING$ = subexp('\\"' + subexp(FWS$ + "?" + QCONTENT$) + "*" + FWS$ + "?" + '\\"'); const ATEXT$$ = "[A-Za-z0-9\\!\\$\\%\\'\\*\\+\\-\\^\\_\\`\\{\\|\\}\\~]"; const QTEXT$$ = "[\\!\\$\\%\\'\\(\\)\\*\\+\\,\\-\\.0-9\\<\\>A-Z\\x5E-\\x7E]"; const VCHAR$$ = merge(QTEXT$$, "[\\\"\\\\]"); const DOT_ATOM_TEXT$ = subexp(ATEXT$$ + "+" + subexp("\\." + ATEXT$$ + "+") + "*"); const QUOTED_PAIR$ = subexp("\\\\" + VCHAR$$); const QCONTENT$ = subexp(QTEXT$$ + "|" + QUOTED_PAIR$); const QUOTED_STRING$ = subexp('\\"' + QCONTENT$ + "*" + '\\"'); //RFC 6068 const DTEXT_NO_OBS$$ = "[\\x21-\\x5A\\x5E-\\x7E]"; //%d33-90 / %d94-126 const SOME_DELIMS$$ = "[\\!\\$\\'\\(\\)\\*\\+\\,\\;\\:\\@]"; const QCHAR$ = subexp(UNRESERVED$$ + "|" + PCT_ENCODED$ + "|" + SOME_DELIMS$$); const DOMAIN$ = subexp(DOT_ATOM_TEXT$ + "|" + "\\[" + DTEXT_NO_OBS$$ + "*" + "\\]"); const LOCAL_PART$ = subexp(DOT_ATOM_TEXT$ + "|" + QUOTED_STRING$); const ADDR_SPEC$ = subexp(LOCAL_PART$ + "\\@" + DOMAIN$); const TO$ = subexp(ADDR_SPEC$ + subexp("\\," + ADDR_SPEC$) + "*"); const HFNAME$ = subexp(QCHAR$ + "*"); const HFVALUE$ = HFNAME$; const HFIELD$ = subexp(HFNAME$ + "\\=" + HFVALUE$); const HFIELDS2$ = subexp(HFIELD$ + subexp("\\&" + HFIELD$) + "*"); const HFIELDS$ = subexp("\\?" + HFIELDS2$); const MAILTO_URI = new RegExp("^mailto\\:" + TO$ + "?" + HFIELDS$ + "?$"); const UNRESERVED = new RegExp(UNRESERVED$$, "g"); const PCT_ENCODED = new RegExp(PCT_ENCODED$, "g"); const NOT_LOCAL_PART = new RegExp(merge("[^]", ATEXT$$, "[\\.]", '[\\"]', VCHAR$$), "g"); const NOT_DOMAIN = new RegExp(merge("[^]", ATEXT$$, "[\\.]", "[\\[]", DTEXT_NO_OBS$$, "[\\]]"), "g"); const NOT_HFNAME = new RegExp(merge("[^]", UNRESERVED$$, SOME_DELIMS$$), "g"); const NOT_HFVALUE = NOT_HFNAME; const TO = new RegExp("^" + TO$ + "$"); const HFIELDS = new RegExp("^" + HFIELDS2$ + "$"); function decodeUnreserved(str) { const decStr = pctDecChars(str); return (!decStr.match(UNRESERVED) ? str : decStr); } const handler = { scheme: "mailto", parse: function (components, options) { const mailtoComponents = components; const to = mailtoComponents.to = (mailtoComponents.path ? mailtoComponents.path.split(",") : []); mailtoComponents.path = undefined; if (mailtoComponents.query) { let unknownHeaders = false; const headers = {}; const hfields = mailtoComponents.query.split("&"); for (let x = 0, xl = hfields.length; x < xl; ++x) { const hfield = hfields[x].split("="); switch (hfield[0]) { case "to": const toAddrs = hfield[1].split(","); for (let x = 0, xl = toAddrs.length; x < xl; ++x) { to.push(toAddrs[x]); } break; case "subject": mailtoComponents.subject = unescapeComponent(hfield[1], options); break; case "body": mailtoComponents.body = unescapeComponent(hfield[1], options); break; default: unknownHeaders = true; headers[unescapeComponent(hfield[0], options)] = unescapeComponent(hfield[1], options); break; } } if (unknownHeaders) mailtoComponents.headers = headers; } mailtoComponents.query = undefined; for (let x = 0, xl = to.length; x < xl; ++x) { const addr = to[x].split("@"); addr[0] = unescapeComponent(addr[0]); if (!options.unicodeSupport) { //convert Unicode IDN -> ASCII IDN try { addr[1] = punycode.toASCII(unescapeComponent(addr[1], options).toLowerCase()); } catch (e) { mailtoComponents.error = mailtoComponents.error || "Email address's domain name can not be converted to ASCII via punycode: " + e; } } else { addr[1] = unescapeComponent(addr[1], options).toLowerCase(); } to[x] = addr.join("@"); } return mailtoComponents; }, serialize: function (mailtoComponents, options) { const components = mailtoComponents; const to = toArray(mailtoComponents.to); if (to) { for (let x = 0, xl = to.length; x < xl; ++x) { const toAddr = String(to[x]); const atIdx = toAddr.lastIndexOf("@"); const localPart = (toAddr.slice(0, atIdx)).replace(PCT_ENCODED, decodeUnreserved).replace(PCT_ENCODED, toUpperCase).replace(NOT_LOCAL_PART, pctEncChar); let domain = toAddr.slice(atIdx + 1); //convert IDN via punycode try { domain = (!options.iri ? punycode.toASCII(unescapeComponent(domain, options).toLowerCase()) : punycode.toUnicode(domain)); } catch (e) { components.error = components.error || "Email address's domain name can not be converted to " + (!options.iri ? "ASCII" : "Unicode") + " via punycode: " + e; } to[x] = localPart + "@" + domain; } components.path = to.join(","); } const headers = mailtoComponents.headers = mailtoComponents.headers || {}; if (mailtoComponents.subject) headers["subject"] = mailtoComponents.subject; if (mailtoComponents.body) headers["body"] = mailtoComponents.body; const fields = []; for (const name in headers) { if (headers[name] !== O[name]) { fields.push(name.replace(PCT_ENCODED, decodeUnreserved).replace(PCT_ENCODED, toUpperCase).replace(NOT_HFNAME, pctEncChar) + "=" + headers[name].replace(PCT_ENCODED, decodeUnreserved).replace(PCT_ENCODED, toUpperCase).replace(NOT_HFVALUE, pctEncChar)); } } if (fields.length) { components.query = fields.join("&"); } return components; } }; export default handler; //# sourceMappingURL=mailto.js.map
zhihu-crawler
/zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/uri-js/dist/esnext/schemes/mailto.js
mailto.js
import { SCHEMES } from "uri-js/dist/esnext/uri"; const NID$ = "(?:[0-9A-Za-z][0-9A-Za-z\\-]{1,31})"; const PCT_ENCODED$ = "(?:\\%[0-9A-Fa-f]{2})"; const TRANS$$ = "[0-9A-Za-z\\(\\)\\+\\,\\-\\.\\:\\=\\@\\;\\$\\_\\!\\*\\'\\/\\?\\#]"; const NSS$ = "(?:(?:" + PCT_ENCODED$ + "|" + TRANS$$ + ")+)"; const URN_SCHEME = new RegExp("^urn\\:(" + NID$ + ")$"); const URN_PATH = new RegExp("^(" + NID$ + ")\\:(" + NSS$ + ")$"); const URN_PARSE = /^([^\:]+)\:(.*)/; const URN_EXCLUDED = /[\x00-\x20\\\"\&\<\>\[\]\^\`\{\|\}\~\x7F-\xFF]/g; //RFC 2141 const handler = { scheme: "urn", parse: function (components, options) { const matches = components.path && components.path.match(URN_PARSE); let urnComponents = components; if (matches) { const scheme = options.scheme || urnComponents.scheme || "urn"; const nid = matches[1].toLowerCase(); const nss = matches[2]; const urnScheme = `${scheme}:${options.nid || nid}`; const schemeHandler = SCHEMES[urnScheme]; urnComponents.nid = nid; urnComponents.nss = nss; urnComponents.path = undefined; if (schemeHandler) { urnComponents = schemeHandler.parse(urnComponents, options); } } else { urnComponents.error = urnComponents.error || "URN can not be parsed."; } return urnComponents; }, serialize: function (urnComponents, options) { const scheme = options.scheme || urnComponents.scheme || "urn"; const nid = urnComponents.nid; const urnScheme = `${scheme}:${options.nid || nid}`; const schemeHandler = SCHEMES[urnScheme]; if (schemeHandler) { urnComponents = schemeHandler.serialize(urnComponents, options); } const uriComponents = urnComponents; const nss = urnComponents.nss; uriComponents.path = `${nid || options.nid}:${nss}`; return uriComponents; }, }; export default handler; //# sourceMappingURL=urn.js.map
zhihu-crawler
/zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/uri-js/dist/esnext/schemes/urn.js
urn.js
# [NWSAPI](http://dperini.github.io/nwsapi/) Fast CSS Selectors API Engine ![](https://img.shields.io/npm/v/nwsapi.svg?colorB=orange&style=flat) ![](https://img.shields.io/github/tag/dperini/nwsapi.svg?style=flat) ![](https://img.shields.io/npm/dw/nwsapi.svg?style=flat) ![](https://img.shields.io/github/issues/dperini/nwsapi.svg?style=flat) NWSAPI is the development progress of [NWMATCHER](https://github.com/dperini/nwmatcher) aiming at [Selectors Level 4](https://www.w3.org/TR/selectors-4/) conformance. It has been completely reworked to be easily extended and maintained. It is a right-to-left selector parser and compiler written in pure Javascript with no external dependencies. It was initially thought as a cross browser library to improve event delegation and web page scraping in various frameworks but it has become a popular replacement of the native CSS selection and matching functionality in newer browsers and headless environments. It uses [regular expressions](https://en.wikipedia.org/wiki/Regular_expression) to parse CSS selector strings and [metaprogramming](https://en.wikipedia.org/wiki/Metaprogramming) to transforms these selector strings into Javascript function resolvers. This process is executed only once for each selector string allowing memoization of the function resolvers and achieving unmatched performances. ## Installation To include NWSAPI in a standard web page: ```html <script type="text/javascript" src="nwsapi.js"></script> ``` To include NWSAPI in a standard web page and automatically replace the native QSA: ```html <script type="text/javascript" src="nwsapi.js" onload="NW.Dom.install()"></script> ``` To use NWSAPI with Node.js: ``` $ npm install nwsapi ``` NWSAPI currently supports browsers (as a global, `NW.Dom`) and headless environments (as a CommonJS module). ## Supported Selectors Here is a list of all the CSS2/CSS3/CSS4 [Supported selectors](https://github.com/dperini/nwsapi/wiki/CSS-supported-selectors). ## Features and Compliance You can read more about NWSAPI [features and compliance](https://github.com/dperini/nwsapi/wiki/Features-and-compliance) on the wiki. ## API ### DOM Selection #### `ancestor( selector, context, callback )` Returns a reference to the nearest ancestor element matching `selector`, starting at `context`. Returns `null` if no element is found. If `callback` is provided, it is invoked for the matched element. #### `first( selector, context, callback )` Returns a reference to the first element matching `selector`, starting at `context`. Returns `null` if no element matches. If `callback` is provided, it is invoked for the matched element. #### `match( selector, element, callback )` Returns `true` if `element` matches `selector`, starting at `context`; returns `false` otherwise. If `callback` is provided, it is invoked for the matched element. #### `select( selector, context, callback )` Returns an array of all the elements matching `selector`, starting at `context`; returns empty `Array` otherwise. If `callback` is provided, it is invoked for each matching element. ### DOM Helpers #### `byId( id, from )` Returns a reference to the first element with ID `id`, optionally filtered to descendants of the element `from`. #### `byTag( tag, from )` Returns an array of elements having the specified tag name `tag`, optionally filtered to descendants of the element `from`. #### `byClass( class, from )` Returns an array of elements having the specified class name `class`, optionally filtered to descendants of the element `from`. ### Engine Configuration #### `configure( options )` The following is the list of currently available configuration options, their default values and descriptions, they are boolean flags that can be set to `true` or `false`: * `IDS_DUPES`: true - true to allow using multiple elements having the same id, false to disallow * `LIVECACHE`: true - true for caching both results and resolvers, false for caching only resolvers * `MIXEDCASE`: true - true to match tag names case insensitive, false to match using case sensitive * `LOGERRORS`: true - true to print errors and warnings to the console, false to mute both of them ### Examples on extending the basic functionalities #### `configure( { <configuration-flag>: [ true | false ] } )` Disable logging errors/warnings to console, disallow duplicate ids. Example: ```js NW.Dom.configure( { LOGERRORS: false, IDS_DUPES: false } ); ``` NOTE: NW.Dom.configure() without parameters return the current configuration. #### `registerCombinator( symbol, resolver )` Registers a new symbol and its matching resolver in the combinators table. Example: ```js NW.Dom.registerCombinator( '^', 'e.parentElement' ); ``` #### `registerOperator( symbol, resolver )` Registers a new symbol and its matching resolver in the attribute operators table. Example: ```js NW.Dom.registerOperator( '!=', { p1: '^', p2: '$', p3: 'false' } ); ``` #### `registerSelector( name, rexp, func )` Registers a new selector, the matching RE and the resolver function, in the selectors table. Example: ```js NW.Dom.registerSelector('Controls', /^\:(control)(.*)/i, (function(global) { return function(match, source, mode, callback) { var status = true; source = 'if(/^(button|input|select|textarea)/i.test(e.nodeName)){' + source + '}'; return { 'source': source, 'status': status }; }; })(this)); ```
zhihu-crawler
/zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/nwsapi/README.md
README.md
(function Export(global, factory) { 'use strict'; if (typeof module == 'object' && typeof exports == 'object') { module.exports = factory; } else if (typeof define == 'function' && define['amd']) { define(factory); } else { global.NW || (global.NW = { }); global.NW.Dom = factory(global, Export); } })(this, function Factory(global, Export) { var version = 'nwsapi-2.2.0', doc = global.document, root = doc.documentElement, slice = Array.prototype.slice, WSP = '[\\x20\\t\\r\\n\\f]', CFG = { // extensions operators: '[~*^$|]=|=', combinators: '[\\x20\\t>+~](?=[^>+~])' }, NOT = { // not enclosed in double/single/parens/square double_enc: '(?=(?:[^"]*["][^"]*["])*[^"]*$)', single_enc: "(?=(?:[^']*['][^']*['])*[^']*$)", parens_enc: '(?![^\\x28]*\\x29)', square_enc: '(?![^\\x5b]*\\x5d)' }, REX = { // regular expressions HasEscapes: RegExp('\\\\'), HexNumbers: RegExp('^[0-9a-fA-F]'), EscOrQuote: RegExp('^\\\\|[\\x22\\x27]'), RegExpChar: RegExp('(?:(?!\\\\)[\\\\^$.*+?()[\\]{}|\\/])', 'g'), TrimSpaces: RegExp('[\\r\\n\\f]|^' + WSP + '+|' + WSP + '+$', 'g'), CommaGroup: RegExp('(\\s*,\\s*)' + NOT.square_enc + NOT.parens_enc, 'g'), SplitGroup: RegExp('((?:\\x28[^\\x29]*\\x29|\\[[^\\]]*\\]|\\\\.|[^,])+)', 'g'), FixEscapes: RegExp('\\\\([0-9a-fA-F]{1,6}' + WSP + '?|.)|([\\x22\\x27])', 'g'), CombineWSP: RegExp('[\\n\\r\\f\\x20]+' + NOT.single_enc + NOT.double_enc, 'g'), TabCharWSP: RegExp('(\\x20?\\t+\\x20?)' + NOT.single_enc + NOT.double_enc, 'g'), PseudosWSP: RegExp('\\s+([-+])\\s+' + NOT.square_enc, 'g') }, STD = { combinator: RegExp('\\s?([>+~])\\s?', 'g'), apimethods: RegExp('^(?:[a-z]+|\\*)\\|', 'i'), namespaces: RegExp('(\\*|[a-z]+)\\|[-a-z]+', 'i') }, GROUPS = { // pseudo-classes requiring parameters linguistic: '(dir|lang)\\x28\\s?([-\\w]{2,})\\s?(?:\\x29|$)', logicalsel: '(matches|not)\\x28\\s?([^()]*|[^\\x28]*\\x28[^\\x29]*\\x29)\\s?(?:\\x29|$)', treestruct: '(nth(?:-last)?(?:-child|-of-type))(?:\\x28\\s?(even|odd|(?:[-+]?\\d*)(?:n\\s?[-+]?\\s?\\d*)?)\\s?(?:\\x29|$))', // pseudo-classes not requiring parameters locationpc: '(link|visited|target)\\b', useraction: '(hover|active|focus|focus-within)\\b', structural: '(root|empty|(?:(?:first|last|only)(?:-child|-of-type)))\\b', inputstate: '(enabled|disabled|read-only|read-write|placeholder-shown|default)\\b', inputvalue: '(checked|indeterminate|required|optional|valid|invalid|in-range|out-of-range)\\b', // pseudo-elements starting with single colon (:) pseudo_sng: '(after|before|first-letter|first-line)\\b', // pseudo-elements starting with double colon (::) pseudo_dbl: ':(after|before|first-letter|first-line|selection|placeholder|-webkit-[-a-zA-Z0-9]{2,})\\b' }, Patterns = { // pseudo-classes treestruct: RegExp('^:(?:' + GROUPS.treestruct + ')(.*)', 'i'), structural: RegExp('^:(?:' + GROUPS.structural + ')(.*)', 'i'), linguistic: RegExp('^:(?:' + GROUPS.linguistic + ')(.*)', 'i'), useraction: RegExp('^:(?:' + GROUPS.useraction + ')(.*)', 'i'), inputstate: RegExp('^:(?:' + GROUPS.inputstate + ')(.*)', 'i'), inputvalue: RegExp('^:(?:' + GROUPS.inputvalue + ')(.*)', 'i'), locationpc: RegExp('^:(?:' + GROUPS.locationpc + ')(.*)', 'i'), logicalsel: RegExp('^:(?:' + GROUPS.logicalsel + ')(.*)', 'i'), pseudo_dbl: RegExp('^:(?:' + GROUPS.pseudo_dbl + ')(.*)', 'i'), pseudo_sng: RegExp('^:(?:' + GROUPS.pseudo_sng + ')(.*)', 'i'), // combinator symbols children: RegExp('^' + WSP + '?\\>' + WSP + '?(.*)'), adjacent: RegExp('^' + WSP + '?\\+' + WSP + '?(.*)'), relative: RegExp('^' + WSP + '?\\~' + WSP + '?(.*)'), ancestor: RegExp('^' + WSP + '+(.*)'), // universal & namespace universal: RegExp('^\\*(.*)'), namespace: RegExp('^(\\w+|\\*)?\\|(.*)') }, // regexp to aproximate detection of RTL languages (Arabic) RTL = RegExp('^[\\u0591-\\u08ff\\ufb1d-\\ufdfd\\ufe70-\\ufefc ]+$'), // emulate firefox error strings qsNotArgs = 'Not enough arguments', qsInvalid = ' is not a valid selector', // detect structural pseudo-classes in selectors reNthElem = RegExp('(:nth(?:-last)?-child)', 'i'), reNthType = RegExp('(:nth(?:-last)?-of-type)', 'i'), // placeholder for global regexp reOptimizer, reValidator, // special handling configuration flags Config = { IDS_DUPES: true, MIXEDCASE: true, LOGERRORS: true, VERBOSITY: true }, NAMESPACE, QUIRKS_MODE, HTML_DOCUMENT, ATTR_STD_OPS = { '=': 1, '^=': 1, '$=': 1, '|=': 1, '*=': 1, '~=': 1 }, HTML_TABLE = { 'accept': 1, 'accept-charset': 1, 'align': 1, 'alink': 1, 'axis': 1, 'bgcolor': 1, 'charset': 1, 'checked': 1, 'clear': 1, 'codetype': 1, 'color': 1, 'compact': 1, 'declare': 1, 'defer': 1, 'dir': 1, 'direction': 1, 'disabled': 1, 'enctype': 1, 'face': 1, 'frame': 1, 'hreflang': 1, 'http-equiv': 1, 'lang': 1, 'language': 1, 'link': 1, 'media': 1, 'method': 1, 'multiple': 1, 'nohref': 1, 'noresize': 1, 'noshade': 1, 'nowrap': 1, 'readonly': 1, 'rel': 1, 'rev': 1, 'rules': 1, 'scope': 1, 'scrolling': 1, 'selected': 1, 'shape': 1, 'target': 1, 'text': 1, 'type': 1, 'valign': 1, 'valuetype': 1, 'vlink': 1 }, Combinators = { }, Selectors = { }, Operators = { '=': { p1: '^', p2: '$', p3: 'true' }, '^=': { p1: '^', p2: '', p3: 'true' }, '$=': { p1: '', p2: '$', p3: 'true' }, '*=': { p1: '', p2: '', p3: 'true' }, '|=': { p1: '^', p2: '(-|$)', p3: 'true' }, '~=': { p1: '(^|\\s)', p2: '(\\s|$)', p3: 'true' } }, concatCall = function(nodes, callback) { var i = 0, l = nodes.length, list = Array(l); while (l > i) { if (false === callback(list[i] = nodes[i])) break; ++i; } return list; }, concatList = function(list, nodes) { var i = -1, l = nodes.length; while (l--) { list[list.length] = nodes[++i]; } return list; }, documentOrder = function(a, b) { if (!hasDupes && a === b) { hasDupes = true; return 0; } return a.compareDocumentPosition(b) & 4 ? -1 : 1; }, hasDupes = false, unique = function(nodes) { var i = 0, j = -1, l = nodes.length + 1, list = [ ]; while (--l) { if (nodes[i++] === nodes[i]) continue; list[++j] = nodes[i - 1]; } hasDupes = false; return list; }, // check context for mixed content hasMixedCaseTagNames = function(context) { var ns, api = 'getElementsByTagNameNS'; // current host context (ownerDocument) context = context.ownerDocument || context; // documentElement (root) element namespace or default html/xhtml namespace ns = context.documentElement.namespaceURI || 'http://www.w3.org/1999/xhtml'; // checking the number of non HTML nodes in the document return (context[api]('*', '*').length - context[api](ns, '*').length) > 0; }, switchContext = function(context, force) { var oldDoc = doc; doc = context.ownerDocument || context; if (force || oldDoc !== doc) { // force a new check for each document change // performed before the next select operation root = doc.documentElement; HTML_DOCUMENT = isHTML(doc); QUIRKS_MODE = HTML_DOCUMENT && doc.compatMode.indexOf('CSS') < 0; NAMESPACE = root && root.namespaceURI; Snapshot.doc = doc; Snapshot.root = root; } return (Snapshot.from = context); }, // convert single codepoint to UTF-16 encoding codePointToUTF16 = function(codePoint) { // out of range, use replacement character if (codePoint < 1 || codePoint > 0x10ffff || (codePoint > 0xd7ff && codePoint < 0xe000)) { return '\\ufffd'; } // javascript strings are UTF-16 encoded if (codePoint < 0x10000) { var lowHex = '000' + codePoint.toString(16); return '\\u' + lowHex.substr(lowHex.length - 4); } // supplementary high + low surrogates return '\\u' + (((codePoint - 0x10000) >> 0x0a) + 0xd800).toString(16) + '\\u' + (((codePoint - 0x10000) % 0x400) + 0xdc00).toString(16); }, // convert single codepoint to string stringFromCodePoint = function(codePoint) { // out of range, use replacement character if (codePoint < 1 || codePoint > 0x10ffff || (codePoint > 0xd7ff && codePoint < 0xe000)) { return '\ufffd'; } if (codePoint < 0x10000) { return String.fromCharCode(codePoint); } return String.fromCodePoint ? String.fromCodePoint(codePoint) : String.fromCharCode( ((codePoint - 0x10000) >> 0x0a) + 0xd800, ((codePoint - 0x10000) % 0x400) + 0xdc00); }, // convert escape sequence in a CSS string or identifier // to javascript string with javascript escape sequences convertEscapes = function(str) { return REX.HasEscapes.test(str) ? str.replace(REX.FixEscapes, function(substring, p1, p2) { // unescaped " or ' return p2 ? '\\' + p2 : // javascript strings are UTF-16 encoded REX.HexNumbers.test(p1) ? codePointToUTF16(parseInt(p1, 16)) : // \' \" REX.EscOrQuote.test(p1) ? substring : // \g \h \. \# etc p1; } ) : str; }, // convert escape sequence in a CSS string or identifier // to javascript string with characters representations unescapeIdentifier = function(str) { return REX.HasEscapes.test(str) ? str.replace(REX.FixEscapes, function(substring, p1, p2) { // unescaped " or ' return p2 ? p2 : // javascript strings are UTF-16 encoded REX.HexNumbers.test(p1) ? stringFromCodePoint(parseInt(p1, 16)) : // \' \" REX.EscOrQuote.test(p1) ? substring : // \g \h \. \# etc p1; } ) : str; }, method = { '#': 'getElementById', '*': 'getElementsByTagName', '.': 'getElementsByClassName' }, compat = { '#': function(c, n) { REX.HasEscapes.test(n) && (n = unescapeIdentifier(n)); return function(e, f) { return byId(n, c); }; }, '*': function(c, n) { REX.HasEscapes.test(n) && (n = unescapeIdentifier(n)); return function(e, f) { return byTag(n, c); }; }, '.': function(c, n) { REX.HasEscapes.test(n) && (n = unescapeIdentifier(n)); return function(e, f) { return byClass(n, c); }; } }, // find duplicate ids using iterative walk byIdRaw = function(id, context) { var node = context, nodes = [ ], next = node.firstElementChild; while ((node = next)) { node.id == id && (nodes[nodes.length] = node); if ((next = node.firstElementChild || node.nextElementSibling)) continue; while (!next && (node = node.parentElement) && node !== context) { next = node.nextElementSibling; } } return nodes; }, // context agnostic getElementById byId = function(id, context) { var e, nodes, api = method['#']; // duplicates id allowed if (Config.IDS_DUPES === false) { if (api in context) { return (e = context[api](id)) ? [ e ] : none; } } else { if ('all' in context) { if ((e = context.all[id])) { if (e.nodeType == 1) return e.getAttribute('id') != id ? [ ] : [ e ]; else if (id == 'length') return (e = context[api](id)) ? [ e ] : none; for (i = 0, l = e.length, nodes = [ ]; l > i; ++i) { if (e[i].id == id) nodes[nodes.length] = e[i]; } return nodes && nodes.length ? nodes : [ nodes ]; } else return none; } } return byIdRaw(id, context); }, // context agnostic getElementsByTagName byTag = function(tag, context) { var e, nodes, api = method['*']; // DOCUMENT_NODE (9) & ELEMENT_NODE (1) if (api in context) { return slice.call(context[api](tag)); } else { // DOCUMENT_FRAGMENT_NODE (11) if ((e = context.firstElementChild)) { tag = tag.toLowerCase(); if (!(e.nextElementSibling || tag == '*' || e.nodeName.toLowerCase() == tag)) { return slice.call(e[api](tag)); } else { nodes = [ ]; do { if (tag == '*' || e.nodeName.toLowerCase() == tag) nodes[nodes.length] = e; concatList(nodes, e[api](tag)); } while ((e = e.nextElementSibling)); } } else nodes = none; } return nodes; }, // context agnostic getElementsByClassName byClass = function(cls, context) { var e, nodes, api = method['.'], reCls; // DOCUMENT_NODE (9) & ELEMENT_NODE (1) if (api in context) { return slice.call(context[api](cls)); } else { // DOCUMENT_FRAGMENT_NODE (11) if ((e = context.firstElementChild)) { reCls = RegExp('(^|\\s)' + cls + '(\\s|$)', QUIRKS_MODE ? 'i' : ''); if (!(e.nextElementSibling || reCls.test(e.className))) { return slice.call(e[api](cls)); } else { nodes = [ ]; do { if (reCls.test(e.className)) nodes[nodes.length] = e; concatList(nodes, e[api](cls)); } while ((e = e.nextElementSibling)); } } else nodes = none; } return nodes; }, // namespace aware hasAttribute // helper for XML/XHTML documents hasAttributeNS = function(e, name) { var i, l, attr = e.getAttributeNames(); name = RegExp(':?' + name + '$', HTML_DOCUMENT ? 'i' : ''); for (i = 0, l = attr.length; l > i; ++i) { if (name.test(attr[i])) return true; } return false; }, // fast resolver for the :nth-child() and :nth-last-child() pseudo-classes nthElement = (function() { var idx = 0, len = 0, set = 0, parent = undefined, parents = Array(), nodes = Array(); return function(element, dir) { // ensure caches are emptied after each run, invoking with dir = 2 if (dir == 2) { idx = 0; len = 0; set = 0; nodes.length = 0; parents.length = 0; parent = undefined; return -1; } var e, i, j, k, l; if (parent === element.parentElement) { i = set; j = idx; l = len; } else { l = parents.length; parent = element.parentElement; for (i = -1, j = 0, k = l - 1; l > j; ++j, --k) { if (parents[j] === parent) { i = j; break; } if (parents[k] === parent) { i = k; break; } } if (i < 0) { parents[i = l] = parent; l = 0; nodes[i] = Array(); e = parent && parent.firstElementChild || element; while (e) { nodes[i][l] = e; if (e === element) j = l; e = e.nextElementSibling; ++l; } set = i; idx = 0; len = l; if (l < 2) return l; } else { l = nodes[i].length; set = i; } } if (element !== nodes[i][j] && element !== nodes[i][j = 0]) { for (j = 0, e = nodes[i], k = l - 1; l > j; ++j, --k) { if (e[j] === element) { break; } if (e[k] === element) { j = k; break; } } } idx = j + 1; len = l; return dir ? l - j : idx; }; })(), // fast resolver for the :nth-of-type() and :nth-last-of-type() pseudo-classes nthOfType = (function() { var idx = 0, len = 0, set = 0, parent = undefined, parents = Array(), nodes = Array(); return function(element, dir) { // ensure caches are emptied after each run, invoking with dir = 2 if (dir == 2) { idx = 0; len = 0; set = 0; nodes.length = 0; parents.length = 0; parent = undefined; return -1; } var e, i, j, k, l, name = element.nodeName; if (nodes[set] && nodes[set][name] && parent === element.parentElement) { i = set; j = idx; l = len; } else { l = parents.length; parent = element.parentElement; for (i = -1, j = 0, k = l - 1; l > j; ++j, --k) { if (parents[j] === parent) { i = j; break; } if (parents[k] === parent) { i = k; break; } } if (i < 0 || !nodes[i][name]) { parents[i = l] = parent; nodes[i] || (nodes[i] = Object()); l = 0; nodes[i][name] = Array(); e = parent && parent.firstElementChild || element; while (e) { if (e === element) j = l; if (e.nodeName == name) { nodes[i][name][l] = e; ++l; } e = e.nextElementSibling; } set = i; idx = j; len = l; if (l < 2) return l; } else { l = nodes[i][name].length; set = i; } } if (element !== nodes[i][name][j] && element !== nodes[i][name][j = 0]) { for (j = 0, e = nodes[i][name], k = l - 1; l > j; ++j, --k) { if (e[j] === element) { break; } if (e[k] === element) { j = k; break; } } } idx = j + 1; len = l; return dir ? l - j : idx; }; })(), // check if the document type is HTML isHTML = function(node) { var doc = node.ownerDocument || node; return doc.nodeType == 9 && // contentType not in IE <= 11 'contentType' in doc ? doc.contentType.indexOf('/html') > 0 : doc.createElement('DiV').nodeName == 'DIV'; }, // configure the engine to use special handling configure = function(option, clear) { if (typeof option == 'string') { return !!Config[option]; } if (typeof option != 'object') { return Config; } for (var i in option) { Config[i] = !!option[i]; } // clear lambda cache if (clear) { matchResolvers = { }; selectResolvers = { }; } setIdentifierSyntax(); return true; }, // centralized error and exceptions handling emit = function(message, proto) { var err; if (Config.VERBOSITY) { if (proto) { err = new proto(message); } else { err = new global.DOMException(message, 'SyntaxError'); } throw err; } if (Config.LOGERRORS && console && console.log) { console.log(message); } }, // execute the engine initialization code initialize = function(doc) { setIdentifierSyntax(); lastContext = switchContext(doc, true); }, // build validation regexps used by the engine setIdentifierSyntax = function() { // // NOTE: SPECIAL CASES IN CSS SYNTAX PARSING RULES // // The <EOF-token> https://drafts.csswg.org/css-syntax/#typedef-eof-token // allow mangled|unclosed selector syntax at the end of selectors strings // // Literal equivalent hex representations of the characters: " ' ` ] ) // // \\x22 = " - double quotes \\x5b = [ - open square bracket // \\x27 = ' - single quote \\x5d = ] - closed square bracket // \\x60 = ` - back tick \\x28 = ( - open round parens // \\x5c = \ - back slash \\x29 = ) - closed round parens // // using hex format prevents false matches of opened/closed instances // pairs, coloring breakage and other editors highlightning problems. // var identifier = // doesn't start with a digit '(?=[^0-9])' + // can start with double dash '(?:-{2}' + // may include ascii chars '|[a-zA-Z0-9-_]' + // non-ascii chars '|[^\\x00-\\x9f]' + // escaped chars '|\\\\[^\\r\\n\\f0-9a-fA-F]' + // unicode chars '|\\\\[0-9a-fA-F]{1,6}(?:\\r\\n|\\s)?' + // any escaped chars '|\\\\.' + ')+', pseudonames = '[-\\w]+', pseudoparms = '(?:[-+]?\\d*)(?:n\\s?[-+]?\\s?\\d*)', doublequote = '"[^"\\\\]*(?:\\\\.[^"\\\\]*)*(?:"|$)', singlequote = "'[^'\\\\]*(?:\\\\.[^'\\\\]*)*(?:'|$)", attrparser = identifier + '|' + doublequote + '|' + singlequote, attrvalues = '([\\x22\\x27]?)((?!\\3)*|(?:\\\\?.)*?)(?:\\3|$)', attributes = '\\[' + // attribute presence '(?:\\*\\|)?' + WSP + '?' + '(' + identifier + '(?::' + identifier + ')?)' + WSP + '?' + '(?:' + '(' + CFG.operators + ')' + WSP + '?' + '(?:' + attrparser + ')' + ')?' + // attribute case sensitivity WSP + '?' + '(i)?' + WSP + '?' + '(?:\\]|$)', attrmatcher = attributes.replace(attrparser, attrvalues), pseudoclass = '(?:\\x28' + WSP + '*' + '(?:' + pseudoparms + '?)?|' + // universal * & // namespace *|* '(?:\\*|\\|)|' + '(?:' + '(?::' + pseudonames + '(?:\\x28' + pseudoparms + '?(?:\\x29|$))?|' + ')|' + '(?:[.#]?' + identifier + ')|' + '(?:' + attributes + ')' + ')+|' + '(?:' + WSP + '?,' + WSP + '?)|' + '(?:' + WSP + '?)|' + '(?:\\x29|$))*', standardValidator = '(?=' + WSP + '?[^>+~(){}<>])' + '(?:' + // universal * & // namespace *|* '(?:\\*|\\|)|' + '(?:[.#]?' + identifier + ')+|' + '(?:' + attributes + ')+|' + '(?:::?' + pseudonames + pseudoclass + ')|' + '(?:' + WSP + '?' + CFG.combinators + WSP + '?)|' + '(?:' + WSP + '?,' + WSP + '?)|' + '(?:' + WSP + '?)' + ')+'; // the following global RE is used to return the // deepest nodeName in selector strings and then // use it to retrieve all possible matching nodes // that will be filtered by compiled resolvers reOptimizer = RegExp( '(?:([.:#*]?)' + '(' + identifier + ')' + '(?:' + ':[-\\w]+|' + '\\[[^\\]]+(?:\\]|$)|' + '\\x28[^\\x29]+(?:\\x29|$)' + ')*)$'); // global reValidator = RegExp(standardValidator, 'g'); Patterns.id = RegExp('^#(' + identifier + ')(.*)'); Patterns.tagName = RegExp('^(' + identifier + ')(.*)'); Patterns.className = RegExp('^\\.(' + identifier + ')(.*)'); Patterns.attribute = RegExp('^(?:' + attrmatcher + ')(.*)'); }, F_INIT = '"use strict";return function Resolver(c,f,x,r)', S_HEAD = 'var e,n,o,j=r.length-1,k=-1', M_HEAD = 'var e,n,o', S_LOOP = 'main:while((e=c[++k]))', N_LOOP = 'main:while((e=c.item(++k)))', M_LOOP = 'e=c;', S_BODY = 'r[++j]=c[k];', N_BODY = 'r[++j]=c.item(k);', M_BODY = '', S_TAIL = 'continue main;', M_TAIL = 'r=true;', S_TEST = 'if(f(c[k])){break main;}', N_TEST = 'if(f(c.item(k))){break main;}', M_TEST = 'f(c);', S_VARS = [ ], M_VARS = [ ], // compile groups or single selector strings into // executable functions for matching or selecting compile = function(selector, mode, callback) { var factory, token, head = '', loop = '', macro = '', source = '', vars = ''; // 'mode' can be boolean or null // true = select / false = match // null to use collection.item() switch (mode) { case true: if (selectLambdas[selector]) { return selectLambdas[selector]; } macro = S_BODY + (callback ? S_TEST : '') + S_TAIL; head = S_HEAD; loop = S_LOOP; break; case false: if (matchLambdas[selector]) { return matchLambdas[selector]; } macro = M_BODY + (callback ? M_TEST : '') + M_TAIL; head = M_HEAD; loop = M_LOOP; break; case null: if (selectLambdas[selector]) { return selectLambdas[selector]; } macro = N_BODY + (callback ? N_TEST : '') + S_TAIL; head = S_HEAD; loop = N_LOOP; break; default: break; } source = compileSelector(selector, macro, mode, callback, false); loop += mode || mode === null ? '{' + source + '}' : source; if (mode || mode === null && selector.includes(':nth')) { loop += reNthElem.test(selector) ? 's.nthElement(null, 2);' : ''; loop += reNthType.test(selector) ? 's.nthOfType(null, 2);' : ''; } if (S_VARS[0] || M_VARS[0]) { vars = ',' + (S_VARS.join(',') || M_VARS.join(',')); S_VARS.length = 0; M_VARS.length = 0; } factory = Function('s', F_INIT + '{' + head + vars + ';' + loop + 'return r;}')(Snapshot); return mode || mode === null ? (selectLambdas[selector] = factory) : (matchLambdas[selector] = factory); }, // build conditional code to check components of selector strings compileSelector = function(expression, source, mode, callback, not) { // N is the negation pseudo-class flag // D is the default inverted negation flag var a, b, n, f, i, l, name, nested, NS, N = not ? '!' : '', D = not ? '' : '!', compat, expr, match, result, status, symbol, test, type, selector = expression, selector_string, vars; // original 'select' or 'match' selector string before normalization selector_string = mode ? lastSelected : lastMatched; // isolate selector combinators/components and normalize whitespace selector = selector.replace(STD.combinator, '$1');//.replace(STD.whitespace, ' '); while (selector) { // get namespace prefix if present or get first char of selector symbol = STD.apimethods.test(selector) ? '|' : selector[0]; switch (symbol) { // universal resolver case '*': match = selector.match(Patterns.universal); if (N == '!') { source = 'if(' + N + 'true' + '){' + source + '}'; } break; // id resolver case '#': match = selector.match(Patterns.id); source = 'if(' + N + '(/^' + match[1] + '$/.test(e.getAttribute("id"))' + ')){' + source + '}'; break; // class name resolver case '.': match = selector.match(Patterns.className); compat = (QUIRKS_MODE ? 'i' : '') + '.test(e.getAttribute("class"))'; source = 'if(' + N + '(/(^|\\s)' + match[1] + '(\\s|$)/' + compat + ')){' + source + '}'; break; // tag name resolver case (/[a-z]/i.test(symbol) ? symbol : undefined): match = selector.match(Patterns.tagName); source = 'if(' + N + '(e.nodeName' + (Config.MIXEDCASE || hasMixedCaseTagNames(doc) ? '.toLowerCase()=="' + match[1].toLowerCase() + '"' : '=="' + match[1].toUpperCase() + '"') + ')){' + source + '}'; break; // namespace resolver case '|': match = selector.match(Patterns.namespace); if (match[1] == '*') { source = 'if(' + N + 'true){' + source + '}'; } else if (!match[1]) { source = 'if(' + N + '(!e.namespaceURI)){' + source + '}'; } else if (typeof match[1] == 'string' && root.prefix == match[1]) { source = 'if(' + N + '(e.namespaceURI=="' + NAMESPACE + '")){' + source + '}'; } else { emit('\'' + selector_string + '\'' + qsInvalid); } break; // attributes resolver case '[': match = selector.match(Patterns.attribute); NS = match[0].match(STD.namespaces); name = match[1]; expr = name.split(':'); expr = expr.length == 2 ? expr[1] : expr[0]; if (match[2] && !(test = Operators[match[2]])) { emit('\'' + selector_string + '\'' + qsInvalid); return ''; } if (match[4] === '') { test = match[2] == '~=' ? { p1: '^\\s', p2: '+$', p3: 'true' } : match[2] in ATTR_STD_OPS && match[2] != '~=' ? { p1: '^', p2: '$', p3: 'true' } : test; } else if (match[2] == '~=' && match[4].includes(' ')) { // whitespace separated list but value contains space source = 'if(' + N + 'false){' + source + '}'; break; } else if (match[4]) { match[4] = convertEscapes(match[4]).replace(REX.RegExpChar, '\\$&'); } type = match[5] == 'i' || (HTML_DOCUMENT && HTML_TABLE[expr.toLowerCase()]) ? 'i' : ''; source = 'if(' + N + '(' + (!match[2] ? (NS ? 's.hasAttributeNS(e,"' + name + '")' : 'e.hasAttribute("' + name + '")') : !match[4] && ATTR_STD_OPS[match[2]] && match[2] != '~=' ? 'e.getAttribute("' + name + '")==""' : '(/' + test.p1 + match[4] + test.p2 + '/' + type + ').test(e.getAttribute("' + name + '"))==' + test.p3) + ')){' + source + '}'; break; // *** General sibling combinator // E ~ F (F relative sibling of E) case '~': match = selector.match(Patterns.relative); source = 'n=e;while((e=e.previousElementSibling)){' + source + '}e=n;'; break; // *** Adjacent sibling combinator // E + F (F adiacent sibling of E) case '+': match = selector.match(Patterns.adjacent); source = 'n=e;if((e=e.previousElementSibling)){' + source + '}e=n;'; break; // *** Descendant combinator // E F (E ancestor of F) case '\x09': case '\x20': match = selector.match(Patterns.ancestor); source = 'n=e;while((e=e.parentElement)){' + source + '}e=n;'; break; // *** Child combinator // E > F (F children of E) case '>': match = selector.match(Patterns.children); source = 'n=e;if((e=e.parentElement)){' + source + '}e=n;'; break; // *** user supplied combinators extensions case (symbol in Combinators ? symbol : undefined): // for other registered combinators extensions match[match.length - 1] = '*'; source = Combinators[symbol](match) + source; break; // *** tree-structural pseudo-classes // :root, :empty, :first-child, :last-child, :only-child, :first-of-type, :last-of-type, :only-of-type case ':': if ((match = selector.match(Patterns.structural))) { match[1] = match[1].toLowerCase(); switch (match[1]) { case 'root': // there can only be one :root element, so exit the loop once found source = 'if(' + N + '(e===s.root)){' + source + (mode ? 'break main;' : '') + '}'; break; case 'empty': // matches elements that don't contain elements or text nodes source = 'n=e.firstChild;while(n&&!(/1|3/).test(n.nodeType)){n=n.nextSibling}if(' + D + 'n){' + source + '}'; break; // *** child-indexed pseudo-classes // :first-child, :last-child, :only-child case 'only-child': source = 'if(' + N + '(!e.nextElementSibling&&!e.previousElementSibling)){' + source + '}'; break; case 'last-child': source = 'if(' + N + '(!e.nextElementSibling)){' + source + '}'; break; case 'first-child': source = 'if(' + N + '(!e.previousElementSibling)){' + source + '}'; break; // *** typed child-indexed pseudo-classes // :only-of-type, :last-of-type, :first-of-type case 'only-of-type': source = 'o=e.nodeName;' + 'n=e;while((n=n.nextElementSibling)&&n.nodeName!=o);if(!n){' + 'n=e;while((n=n.previousElementSibling)&&n.nodeName!=o);}if(' + D + 'n){' + source + '}'; break; case 'last-of-type': source = 'n=e;o=e.nodeName;while((n=n.nextElementSibling)&&n.nodeName!=o);if(' + D + 'n){' + source + '}'; break; case 'first-of-type': source = 'n=e;o=e.nodeName;while((n=n.previousElementSibling)&&n.nodeName!=o);if(' + D + 'n){' + source + '}'; break; default: emit('\'' + selector_string + '\'' + qsInvalid); break; } } // *** child-indexed & typed child-indexed pseudo-classes // :nth-child, :nth-of-type, :nth-last-child, :nth-last-of-type else if ((match = selector.match(Patterns.treestruct))) { match[1] = match[1].toLowerCase(); switch (match[1]) { case 'nth-child': case 'nth-of-type': case 'nth-last-child': case 'nth-last-of-type': expr = /-of-type/i.test(match[1]); if (match[1] && match[2]) { type = /last/i.test(match[1]); if (match[2] == 'n') { source = 'if(' + N + 'true){' + source + '}'; break; } else if (match[2] == '1') { test = type ? 'next' : 'previous'; source = expr ? 'n=e;o=e.nodeName;' + 'while((n=n.' + test + 'ElementSibling)&&n.nodeName!=o);if(' + D + 'n){' + source + '}' : 'if(' + N + '!e.' + test + 'ElementSibling){' + source + '}'; break; } else if (match[2] == 'even' || match[2] == '2n0' || match[2] == '2n+0' || match[2] == '2n') { test = 'n%2==0'; } else if (match[2] == 'odd' || match[2] == '2n1' || match[2] == '2n+1') { test = 'n%2==1'; } else { f = /n/i.test(match[2]); n = match[2].split('n'); a = parseInt(n[0], 10) || 0; b = parseInt(n[1], 10) || 0; if (n[0] == '-') { a = -1; } if (n[0] == '+') { a = +1; } test = (b ? '(n' + (b > 0 ? '-' : '+') + Math.abs(b) + ')' : 'n') + '%' + a + '==0' ; test = a >= +1 ? (f ? 'n>' + (b - 1) + (Math.abs(a) != 1 ? '&&' + test : '') : 'n==' + a) : a <= -1 ? (f ? 'n<' + (b + 1) + (Math.abs(a) != 1 ? '&&' + test : '') : 'n==' + a) : a === 0 ? (n[0] ? 'n==' + b : 'n>' + (b - 1)) : 'false'; } expr = expr ? 'OfType' : 'Element'; type = type ? 'true' : 'false'; source = 'n=s.nth' + expr + '(e,' + type + ');if(' + N + '(' + test + ')){' + source + '}'; } else { emit('\'' + selector_string + '\'' + qsInvalid); } break; default: emit('\'' + selector_string + '\'' + qsInvalid); break; } } // *** logical combination pseudo-classes // :matches( s1, [ s2, ... ]), :not( s1, [ s2, ... ]) else if ((match = selector.match(Patterns.logicalsel))) { match[1] = match[1].toLowerCase(); switch (match[1]) { case 'matches': if (not === true || nested === true) { emit(':matches() pseudo-class cannot be nested'); } nested = true; expr = match[2].replace(REX.CommaGroup, ',').replace(REX.TrimSpaces, ''); // check nested compound selectors s1, s2 expr = match[2].match(REX.SplitGroup); for (i = 0, l = expr.length; l > i; ++i) { expr[i] = expr[i].replace(REX.TrimSpaces, ''); source = 'if(s.match("' + expr[i].replace(/\x22/g, '\\"') + '",e)){' + source + '}'; } break; case 'not': if (not === true || nested === true) { emit(':not() pseudo-class cannot be nested'); } expr = match[2].replace(REX.CommaGroup, ',').replace(REX.TrimSpaces, ''); // check nested compound selectors s1, s2 expr = match[2].match(REX.SplitGroup); for (i = 0, l = expr.length; l > i; ++i) { expr[i] = expr[i].replace(REX.TrimSpaces, ''); source = compileSelector(expr[i], source, false, callback, true); } break; default: emit('\'' + selector_string + '\'' + qsInvalid); break; } } // *** linguistic pseudo-classes // :dir( ltr / rtl ), :lang( en ) else if ((match = selector.match(Patterns.linguistic))) { match[1] = match[1].toLowerCase(); switch (match[1]) { case 'dir': source = 'var p;if(' + N + '(' + '(/' + match[2] + '/i.test(e.dir))||(p=s.ancestor("[dir]", e))&&' + '(/' + match[2] + '/i.test(p.dir))||(e.dir==""||e.dir=="auto")&&' + '(' + (match[2] == 'ltr' ? '!':'')+ RTL +'.test(e.textContent)))' + '){' + source + '};'; break; case 'lang': expr = '(?:^|-)' + match[2] + '(?:-|$)'; source = 'var p;if(' + N + '(' + '(e.isConnected&&(e.lang==""&&(p=s.ancestor("[lang]",e)))&&' + '(p.lang=="' + match[2] + '")||/'+ expr +'/i.test(e.lang)))' + '){' + source + '};'; break; default: emit('\'' + selector_string + '\'' + qsInvalid); break; } } // *** location pseudo-classes // :link, :visited, :target else if ((match = selector.match(Patterns.locationpc))) { match[1] = match[1].toLowerCase(); switch (match[1]) { case 'link': source = 'if(' + N + '(/^a|area|link$/i.test(e.nodeName)&&e.hasAttribute("href"))){' + source + '}'; break; case 'visited': source = 'if(' + N + '(/^a|area|link$/i.test(e.nodeName)&&e.hasAttribute("href")&&e.visited)){' + source + '}'; break; case 'target': source = 'if(' + N + '((s.doc.compareDocumentPosition(e)&16)&&s.doc.location.hash&&e.id==s.doc.location.hash.slice(1))){' + source + '}'; break; default: emit('\'' + selector_string + '\'' + qsInvalid); break; } } // *** user actions pseudo-classes // :hover, :active, :focus else if ((match = selector.match(Patterns.useraction))) { match[1] = match[1].toLowerCase(); switch (match[1]) { case 'hover': source = 'hasFocus' in doc && doc.hasFocus() ? 'if(' + N + '(e===s.doc.hoverElement)){' + source + '}' : 'if(' + D + 'true){' + source + '}'; break; case 'active': source = 'hasFocus' in doc && doc.hasFocus() ? 'if(' + N + '(e===s.doc.activeElement)){' + source + '}' : 'if(' + D + 'true){' + source + '}'; break; case 'focus': source = 'hasFocus' in doc ? 'if(' + N + '(e===s.doc.activeElement&&s.doc.hasFocus()&&(e.type||e.href||typeof e.tabIndex=="number"))){' + source + '}' : 'if(' + N + '(e===s.doc.activeElement&&(e.type||e.href))){' + source + '}'; break; case 'focus-within': source = 'hasFocus' in doc ? 'n=s.doc.activeElement;while(e){if(e===n||e.parentNode===n)break;}' + 'if(' + N + '(e===n&&s.doc.hasFocus()&&(e.type||e.href||typeof e.tabIndex=="number"))){' + source + '}' : source; break; default: emit('\'' + selector_string + '\'' + qsInvalid); break; } } // *** user interface and form pseudo-classes // :enabled, :disabled, :read-only, :read-write, :placeholder-shown, :default else if ((match = selector.match(Patterns.inputstate))) { match[1] = match[1].toLowerCase(); switch (match[1]) { case 'enabled': source = 'if(' + N + '(("form" in e||/^optgroup$/i.test(e.nodeName))&&"disabled" in e &&e.disabled===false' + ')){' + source + '}'; break; case 'disabled': // https://www.w3.org/TR/html5/forms.html#enabling-and-disabling-form-controls:-the-disabled-attribute source = 'if(' + N + '(("form" in e||/^optgroup$/i.test(e.nodeName))&&"disabled" in e&&' + '(e.disabled===true||(n=s.ancestor("fieldset",e))&&(n=s.first("legend",n))&&!n.contains(e))' + ')){' + source + '}'; break; case 'read-only': source = 'if(' + N + '(' + '(/^textarea$/i.test(e.nodeName)&&(e.readOnly||e.disabled))||' + '("|password|text|".includes("|"+e.type+"|")&&e.readOnly)' + ')){' + source + '}'; break; case 'read-write': source = 'if(' + N + '(' + '((/^textarea$/i.test(e.nodeName)&&!e.readOnly&&!e.disabled)||' + '("|password|text|".includes("|"+e.type+"|")&&!e.readOnly&&!e.disabled))||' + '(e.hasAttribute("contenteditable")||(s.doc.designMode=="on"))' + ')){' + source + '}'; break; case 'placeholder-shown': source = 'if(' + N + '(' + '(/^input|textarea$/i.test(e.nodeName))&&e.hasAttribute("placeholder")&&' + '("|textarea|password|number|search|email|text|tel|url|".includes("|"+e.type+"|"))&&' + '(!s.match(":focus",e))' + ')){' + source + '}'; break; case 'default': source = 'if(' + N + '("form" in e && e.form)){' + 'var x=0;n=[];' + 'if(e.type=="image")n=e.form.getElementsByTagName("input");' + 'if(e.type=="submit")n=e.form.elements;' + 'while(n[x]&&e!==n[x]){' + 'if(n[x].type=="image")break;' + 'if(n[x].type=="submit")break;' + 'x++;' + '}' + '}' + 'if(' + N + '(e.form&&(e===n[x]&&"|image|submit|".includes("|"+e.type+"|"))||' + '((/^option$/i.test(e.nodeName))&&e.defaultSelected)||' + '(("|radio|checkbox|".includes("|"+e.type+"|"))&&e.defaultChecked)' + ')){' + source + '}'; break; default: emit('\'' + selector_string + '\'' + qsInvalid); break; } } // *** input pseudo-classes (for form validation) // :checked, :indeterminate, :valid, :invalid, :in-range, :out-of-range, :required, :optional else if ((match = selector.match(Patterns.inputvalue))) { match[1] = match[1].toLowerCase(); switch (match[1]) { case 'checked': source = 'if(' + N + '(/^input$/i.test(e.nodeName)&&' + '("|radio|checkbox|".includes("|"+e.type+"|")&&e.checked)||' + '(/^option$/i.test(e.nodeName)&&(e.selected||e.checked))' + ')){' + source + '}'; break; case 'indeterminate': source = 'if(' + N + '(/^progress$/i.test(e.nodeName)&&!e.hasAttribute("value"))||' + '(/^input$/i.test(e.nodeName)&&("checkbox"==e.type&&e.indeterminate)||' + '("radio"==e.type&&e.name&&!s.first("input[name="+e.name+"]:checked",e.form))' + ')){' + source + '}'; break; case 'required': source = 'if(' + N + '(/^input|select|textarea$/i.test(e.nodeName)&&e.required)' + '){' + source + '}'; break; case 'optional': source = 'if(' + N + '(/^input|select|textarea$/i.test(e.nodeName)&&!e.required)' + '){' + source + '}'; break; case 'invalid': source = 'if(' + N + '((' + '(/^form$/i.test(e.nodeName)&&!e.noValidate)||' + '(e.willValidate&&!e.formNoValidate))&&!e.checkValidity())||' + '(/^fieldset$/i.test(e.nodeName)&&s.first(":invalid",e))' + '){' + source + '}'; break; case 'valid': source = 'if(' + N + '((' + '(/^form$/i.test(e.nodeName)&&!e.noValidate)||' + '(e.willValidate&&!e.formNoValidate))&&e.checkValidity())||' + '(/^fieldset$/i.test(e.nodeName)&&s.first(":valid",e))' + '){' + source + '}'; break; case 'in-range': source = 'if(' + N + '(/^input$/i.test(e.nodeName))&&' + '(e.willValidate&&!e.formNoValidate)&&' + '(!e.validity.rangeUnderflow&&!e.validity.rangeOverflow)&&' + '("|date|datetime-local|month|number|range|time|week|".includes("|"+e.type+"|"))&&' + '("range"==e.type||e.getAttribute("min")||e.getAttribute("max"))' + '){' + source + '}'; break; case 'out-of-range': source = 'if(' + N + '(/^input$/i.test(e.nodeName))&&' + '(e.willValidate&&!e.formNoValidate)&&' + '(e.validity.rangeUnderflow||e.validity.rangeOverflow)&&' + '("|date|datetime-local|month|number|range|time|week|".includes("|"+e.type+"|"))&&' + '("range"==e.type||e.getAttribute("min")||e.getAttribute("max"))' + '){' + source + '}'; break; default: emit('\'' + selector_string + '\'' + qsInvalid); break; } } // allow pseudo-elements starting with single colon (:) // :after, :before, :first-letter, :first-line else if ((match = selector.match(Patterns.pseudo_sng))) { source = 'if(' + D + '(e.nodeType==1)){' + source + '}'; } // allow pseudo-elements starting with double colon (::) // ::after, ::before, ::marker, ::placeholder, ::inactive-selection, ::selection, ::-webkit-<foo-bar> else if ((match = selector.match(Patterns.pseudo_dbl))) { source = 'if(' + D + '(e.nodeType==1)){' + source + '}'; } else { // reset expr = false; status = false; // process registered selector extensions for (expr in Selectors) { if ((match = selector.match(Selectors[expr].Expression))) { result = Selectors[expr].Callback(match, source, mode, callback); if ('match' in result) { match = result.match; } vars = result.modvar; if (mode) { // add extra select() vars vars && S_VARS.indexOf(vars) < 0 && (S_VARS[S_VARS.length] = vars); } else { // add extra match() vars vars && M_VARS.indexOf(vars) < 0 && (M_VARS[M_VARS.length] = vars); } // extension source code source = result.source; // extension status code status = result.status; // break on status error if (status) { break; } } } if (!status) { emit('unknown pseudo-class selector \'' + selector + '\''); return ''; } if (!expr) { emit('unknown token in selector \'' + selector + '\''); return ''; } } break; default: emit('\'' + selector_string + '\'' + qsInvalid); break; } // end of switch symbol if (!match) { emit('\'' + selector_string + '\'' + qsInvalid); return ''; } // pop last component selector = match.pop(); } // end of while selector return source; }, // replace ':scope' pseudo-class with element references makeref = function(selectors, element) { return selectors.replace(/:scope/i, element.nodeName.toLowerCase() + (element.id ? '#' + element.id : '') + (element.className ? '.' + element.classList[0] : '')); }, // equivalent of w3c 'closest' method ancestor = function _closest(selectors, element, callback) { if ((/:scope/i).test(selectors)) { selectors = makeref(selectors, element); } while (element) { if (match(selectors, element, callback)) break; element = element.parentElement; } return element; }, match_assert = function(f, element, callback) { for (var i = 0, l = f.length, r = false; l > i; ++i) f[i](element, callback, null, false) && (r = true); return r; }, match_collect = function(selectors, callback) { for (var i = 0, l = selectors.length, f = [ ]; l > i; ++i) f[i] = compile(selectors[i], false, callback); return { factory: f }; }, // equivalent of w3c 'matches' method match = function _matches(selectors, element, callback) { var expressions, parsed; if (element && matchResolvers[selectors]) { return match_assert(matchResolvers[selectors].factory, element, callback); } lastMatched = selectors; // arguments validation if (arguments.length === 0) { emit(qsNotArgs, TypeError); return Config.VERBOSITY ? undefined : false; } else if (arguments[0] === '') { emit('\'\'' + qsInvalid); return Config.VERBOSITY ? undefined : false; } // input NULL or UNDEFINED if (typeof selectors != 'string') { selectors = '' + selectors; } if ((/:scope/i).test(selectors)) { selectors = makeref(selectors, element); } // normalize input string parsed = selectors. replace(/\x00|\\$/g, '\ufffd'). replace(REX.CombineWSP, '\x20'). replace(REX.PseudosWSP, '$1'). replace(REX.TabCharWSP, '\t'). replace(REX.CommaGroup, ','). replace(REX.TrimSpaces, ''); // parse, validate and split possible compound selectors if ((expressions = parsed.match(reValidator)) && expressions.join('') == parsed) { expressions = parsed.match(REX.SplitGroup); if (parsed[parsed.length - 1] == ',') { emit(qsInvalid); return Config.VERBOSITY ? undefined : false; } } else { emit('\'' + selectors + '\'' + qsInvalid); return Config.VERBOSITY ? undefined : false; } matchResolvers[selectors] = match_collect(expressions, callback); return match_assert(matchResolvers[selectors].factory, element, callback); }, // equivalent of w3c 'querySelector' method first = function _querySelector(selectors, context, callback) { if (arguments.length === 0) { emit(qsNotArgs, TypeError); } return select(selectors, context, typeof callback == 'function' ? function firstMatch(element) { callback(element); return false; } : function firstMatch() { return false; } )[0] || null; }, // equivalent of w3c 'querySelectorAll' method select = function _querySelectorAll(selectors, context, callback) { var expressions, nodes, parsed, resolver; context || (context = doc); if (selectors) { if ((resolver = selectResolvers[selectors])) { if (resolver.context === context && resolver.callback === callback) { var f = resolver.factory, h = resolver.htmlset, n = resolver.nodeset, nodes = [ ]; if (n.length > 1) { for (var i = 0, l = n.length, list; l > i; ++i) { list = compat[n[i][0]](context, n[i].slice(1))(); if (f[i] !== null) { f[i](list, callback, context, nodes); } else { nodes = nodes.concat(list); } } if (l > 1 && nodes.length > 1) { nodes.sort(documentOrder); hasDupes && (nodes = unique(nodes)); } } else { if (f[0]) { nodes = f[0](h[0](), callback, context, nodes); } else { nodes = h[0](); } } return typeof callback == 'function' ? concatCall(nodes, callback) : nodes; } } } lastSelected = selectors; // arguments validation if (arguments.length === 0) { emit(qsNotArgs, TypeError); return Config.VERBOSITY ? undefined : none; } else if (arguments[0] === '') { emit('\'\'' + qsInvalid); return Config.VERBOSITY ? undefined : none; } else if (lastContext !== context) { lastContext = switchContext(context); } // input NULL or UNDEFINED if (typeof selectors != 'string') { selectors = '' + selectors; } if ((/:scope/i).test(selectors)) { selectors = makeref(selectors, context); } // normalize input string parsed = selectors. replace(/\x00|\\$/g, '\ufffd'). replace(REX.CombineWSP, '\x20'). replace(REX.PseudosWSP, '$1'). replace(REX.TabCharWSP, '\t'). replace(REX.CommaGroup, ','). replace(REX.TrimSpaces, ''); // parse, validate and split possible compound selectors if ((expressions = parsed.match(reValidator)) && expressions.join('') == parsed) { expressions = parsed.match(REX.SplitGroup); if (parsed[parsed.length - 1] == ',') { emit(qsInvalid); return Config.VERBOSITY ? undefined : false; } } else { emit('\'' + selectors + '\'' + qsInvalid); return Config.VERBOSITY ? undefined : false; } // save/reuse factory and closure collection selectResolvers[selectors] = collect(expressions, context, callback); nodes = selectResolvers[selectors].results; return typeof callback == 'function' ? concatCall(nodes, callback) : nodes; }, // optimize selectors avoiding duplicated checks optimize = function(selector, token) { var index = token.index, length = token[1].length + token[2].length; return selector.slice(0, index) + (' >+~'.indexOf(selector.charAt(index - 1)) > -1 ? (':['.indexOf(selector.charAt(index + length + 1)) > -1 ? '*' : '') : '') + selector.slice(index + length - (token[1] == '*' ? 1 : 0)); }, // prepare factory resolvers and closure collections collect = function(selectors, context, callback) { var i, l, token, seen = { }, factory = [ ], htmlset = [ ], nodeset = [ ], results = [ ]; for (i = 0, l = selectors.length; l > i; ++i) { if (!seen[selectors[i]] && (seen[selectors[i]] = true)) { if ((token = selectors[i].match(reOptimizer)) && token[1] != ':') { token[1] || (token[1] = '*'); selectors[i] = optimize(selectors[i], token); } else { token = ['', '*', '*']; } nodeset[i] = token[1] + token[2]; htmlset[i] = compat[token[1]](context, token[2]); factory[i] = compile(selectors[i], true, null); if (factory[i]) { factory[i](htmlset[i](), callback, context, results); } else { results = results.concat(htmlset[i]()); } } } if (l > 1) { results.sort(documentOrder); hasDupes && (results = unique(results)); } return { callback: callback, context: context, factory: factory, htmlset: htmlset, nodeset: nodeset, results: results }; }, // QSA placeholders to native references _closest, _matches, _querySelector, _querySelectorAll, // overrides QSA methods (only for browsers) install = function(all) { // save native QSA references _closest = Element.prototype.closest; _matches = Element.prototype.matches; _querySelector = Document.prototype.querySelector; _querySelectorAll = Document.prototype.querySelectorAll; Element.prototype.closest = function closest() { var ctor = Object.getPrototypeOf(this).__proto__.__proto__.constructor.name; if (!('nodeType' in this)) { emit('\'closest\' called on an object that does not implement interface ' + ctor + '.', TypeError); } return arguments.length < 1 ? ancestor.apply(this, [ ]) : arguments.length < 2 ? ancestor.apply(this, [ arguments[0], this ]) : ancestor.apply(this, [ arguments[0], this, typeof arguments[1] == 'function' ? arguments[1] : undefined ]); }; Element.prototype.matches = function matches() { var ctor = Object.getPrototypeOf(this).__proto__.__proto__.constructor.name; if (!('nodeType' in this)) { emit('\'matches\' called on an object that does not implement interface ' + ctor + '.', TypeError); } return arguments.length < 1 ? match.apply(this, [ ]) : arguments.length < 2 ? match.apply(this, [ arguments[0], this ]) : match.apply(this, [ arguments[0], this, typeof arguments[1] == 'function' ? arguments[1] : undefined ]); }; Element.prototype.querySelector = Document.prototype.querySelector = DocumentFragment.prototype.querySelector = function querySelector() { var ctor = Object.getPrototypeOf(this).__proto__.__proto__.constructor.name; if (!('nodeType' in this)) { emit('\'querySelector\' called on an object that does not implement interface ' + ctor + '.', TypeError); } return arguments.length < 1 ? first.apply(this, [ ]) : arguments.length < 2 ? first.apply(this, [ arguments[0], this ]) : first.apply(this, [ arguments[0], this, typeof arguments[1] == 'function' ? arguments[1] : undefined ]); }; Element.prototype.querySelectorAll = Document.prototype.querySelectorAll = DocumentFragment.prototype.querySelectorAll = function querySelectorAll() { var ctor = Object.getPrototypeOf(this).__proto__.__proto__.constructor.name; if (!('nodeType' in this)) { emit('\'querySelectorAll\' called on an object that does not implement interface ' + ctor + '.', TypeError); } return arguments.length < 1 ? select.apply(this, [ ]) : arguments.length < 2 ? select.apply(this, [ arguments[0], this ]) : select.apply(this, [ arguments[0], this, typeof arguments[1] == 'function' ? arguments[1] : undefined ]); }; if (all) { document.addEventListener('load', function(e) { var c, d, r, s, t = e.target; if (/iframe/i.test(t.nodeName)) { c = '(' + Export + ')(this, ' + Factory + ');'; d = t.contentDocument; s = d.createElement('script'); s.textContent = c + 'NW.Dom.install()'; r = d.documentElement; r.removeChild(r.insertBefore(s, r.firstChild)); } }, true); } }, // restore QSA methods (only for browsers) uninstall = function() { // reinstates QSA native references Element.prototype.closest = _closest; Element.prototype.matches = _matches; Element.prototype.querySelector = Document.prototype.querySelector = DocumentFragment.prototype.querySelector = _querySelector; Element.prototype.querySelectorAll = Document.prototype.querySelectorAll = DocumentFragment.prototype.querySelectorAll = _querySelectorAll; }, // empty set none = Array(), // context lastContext, // selector lastMatched, lastSelected, // cached lambdas matchLambdas = { }, selectLambdas = { }, // cached resolvers matchResolvers = { }, selectResolvers = { }, // passed to resolvers Snapshot = { doc: doc, from: doc, root: root, byTag: byTag, first: first, match: match, ancestor: ancestor, nthOfType: nthOfType, nthElement: nthElement, hasAttributeNS: hasAttributeNS }, // public exported methods/objects Dom = { // exported cache objects lastMatched: lastMatched, lastSelected: lastSelected, matchLambdas: matchLambdas, selectLambdas: selectLambdas, matchResolvers: matchResolvers, selectResolvers: selectResolvers, // exported compiler macros CFG: CFG, M_BODY: M_BODY, S_BODY: S_BODY, M_TEST: M_TEST, S_TEST: S_TEST, // exported engine methods byId: byId, byTag: byTag, byClass: byClass, match: match, first: first, select: select, closest: ancestor, compile: compile, configure: configure, emit: emit, Config: Config, Snapshot: Snapshot, Version: version, install: install, uninstall: uninstall, Operators: Operators, Selectors: Selectors, // register a new selector combinator symbol and its related function resolver registerCombinator: function(combinator, resolver) { var i = 0, l = combinator.length, symbol; for (; l > i; ++i) { if (combinator[i] != '=') { symbol = combinator[i]; break; } } if (CFG.combinators.indexOf(symbol) < 0) { CFG.combinators = CFG.combinators.replace('](', symbol + ']('); CFG.combinators = CFG.combinators.replace('])', symbol + '])'); Combinators[combinator] = resolver; setIdentifierSyntax(); } else { console.warn('Warning: the \'' + combinator + '\' combinator is already registered.'); } }, // register a new attribute operator symbol and its related function resolver registerOperator: function(operator, resolver) { var i = 0, l = operator.length, symbol; for (; l > i; ++i) { if (operator[i] != '=') { symbol = operator[i]; break; } } if (CFG.operators.indexOf(symbol) < 0 && !Operators[operator]) { CFG.operators = CFG.operators.replace(']=', symbol + ']='); Operators[operator] = resolver; setIdentifierSyntax(); } else { console.warn('Warning: the \'' + operator + '\' operator is already registered.'); } }, // register a new selector symbol and its related function resolver registerSelector: function(name, rexp, func) { Selectors[name] || (Selectors[name] = { Expression: rexp, Callback: func }); } }; initialize(doc); return Dom; });
zhihu-crawler
/zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/nwsapi/src/nwsapi.js
nwsapi.js
NW.Dom.registerSelector( 'jquery:child', /^\:((?:(nth|eq|lt|gt)\(([^()]*)\))|(?:even|odd|first|last))(.*)/i, (function(global) { return function(match, source, mode, callback) { var status = true, macro = mode ? NW.Dom.S_BODY : NW.Dom.M_BODY; macro = macro.replace('@', typeof callback == 'function' ? (mode ? NW.Dom.S_TEST : NW.Dom.M_TEST) : ''); switch (match[1].toLowerCase()) { case 'odd': source = source.replace(macro, 'if((n=n^1)==0){' + macro + '}'); break; case 'even': source = source.replace(macro, 'if((n=n^1)==1){' + macro + '}'); break; case 'first': source = 'n=s.root.getElementsByTagName(e.nodeName);if(n.length&&n[0]===e){' + source + '}'; break; case 'last': source = 'n=s.root.getElementsByTagName(e.nodeName);if(n.length&&n[n.length-1]===e){' + source + '}'; break; default: switch (match[2].toLowerCase()) { case 'nth': source = 'n=s.root.getElementsByTagName(e.nodeName);if(n.length&&n[' + match[3] + ']===e){' + source + '}'; break; case 'eq': source = source.replace(macro, 'if(x++==' + match[3] + '){' + macro + '}'); break; case 'lt': source = source.replace(macro, 'if(x++<' + match[3] + '){' + macro + '}'); break; case 'gt': source = source.replace(macro, 'if(x++>' + match[3] + '){' + macro + '}'); break; default: status = false; break; } break; } // compiler will add this to "source" return { 'source': source, 'status': status, 'modvar': 'x=0' }; }; })(this)); // for element pseudo-classes extensions NW.Dom.registerSelector( 'jquery:pseudo', /^\:(has|checkbox|file|image|password|radio|reset|submit|text|button|input|header|hidden|visible|parent)(?:\(\s*(["']*)?([^'"()]*)\2\s*\))?(.*)/i, (function(global) { return function(match, source, mode, callback) { var status = true, macro = mode ? NW.Dom.S_BODY : NW.Dom.M_BODY; macro = macro.replace('@', typeof callback == 'function' ? (mode ? NW.Dom.S_TEST : NW.Dom.M_TEST) : ''); switch(match[1].toLowerCase()) { case 'has': source = source.replace(macro, 'if(e.getElementsByTagName("' + match[3].replace(/^\s|\s$/g, '') + '")[0]){' + macro + '}'); break; case 'checkbox': case 'file': case 'image': case 'password': case 'radio': case 'reset': case 'submit': case 'text': // :checkbox, :file, :image, :password, :radio, :reset, :submit, :text source = 'if(/^' + match[1] + '$/i.test(e.type)){' + source + '}'; break; case 'button': source = 'if(/^button$/i.test(e.nodeName)){' + source + '}'; break; case 'input': source = 'if(/^(?:button|input|select|textarea)$/i.test(e.nodeName)){' + source + '}'; break; case 'header': source = 'if(/^h[1-6]$/i.test(e.nodeName)){' + source + '}'; break; case 'hidden': source = 'if(!e.offsetWidth&&!e.offsetHeight){' + source + '}'; break; case 'visible': source = 'if(e.offsetWidth||e.offsetHeight){' + source + '}'; break; case 'parent': source = 'if(e.firstChild){' + source + '}'; break; default: status = false; break; } // compiler will add this to "source" return { 'source': source, 'status': status }; }; })(this));
zhihu-crawler
/zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/nwsapi/src/modules/nwsapi-jquery.js
nwsapi-jquery.js
(function(D){ // TODO: all of this needs tests var match = D.match, select = D.select, root = document.documentElement, // Use the Element Traversal API if available. nextElement = 'nextElementSibling', previousElement = 'previousElementSibling', parentElement = 'parentElement'; // Fall back to the DOM Level 1 API. if (!(nextElement in root)) nextElement = 'nextSibling'; if (!(previousElement in root)) previousElement = 'previousSibling'; if (!(parentElement in root)) parentElement = 'parentNode'; function walkElements(property, element, expr) { var i = 0, isIndex = typeof expr == 'number'; if (typeof expr == 'undefined') { isIndex = true; expr = 0; } while ((element = element[property])) { if (element.nodeType != 1) continue; if (isIndex) { ++i; if (i == expr) return element; } else if (match(element, expr)) { return element; } } return null; } /** * @method up * @param {HTMLElement} element element to walk from * @param {String | Number} expr CSS expression or an index * @return {HTMLElement | null} */ function up(element, expr) { return walkElements(parentElement, element, expr); } /** * @method next * @param {HTMLElement} element element to walk from * @param {String | Number} expr CSS expression or an index * @return {HTMLElement | null} */ function next(element, expr) { return walkElements(nextElement, element, expr); } /** * @method previous * @param {HTMLElement} element element to walk from * @param {String | Number} expr CSS expression or an index * @return {HTMLElement | null} */ function previous(element, expr) { return walkElements(previousElement, element, expr); } /** * @method down * @param {HTMLElement} element element to walk from * @param {String | Number} expr CSS expression or an index * @return {HTMLElement | null} */ function down(element, expr) { var isIndex = typeof expr == 'number', descendants, index, descendant; if (expr === null) { element = element.firstChild; while (element && element.nodeType != 1) element = element[nextElement]; return element; } if (!isIndex && match(element, expr) || isIndex && expr === 0) return element; descendants = select('*', element); if (isIndex) return descendants[expr] || null; index = 0; while ((descendant = descendants[index]) && !match(descendant, expr)) { ++index; } return descendant || null; } D.up = up; D.down = down; D.next = next; D.previous = previous; })(NW.Dom);
zhihu-crawler
/zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/nwsapi/src/modules/nwsapi-traversal.js
nwsapi-traversal.js
# json-schema-traverse Traverse JSON Schema passing each schema object to callback [![Build Status](https://travis-ci.org/epoberezkin/json-schema-traverse.svg?branch=master)](https://travis-ci.org/epoberezkin/json-schema-traverse) [![npm version](https://badge.fury.io/js/json-schema-traverse.svg)](https://www.npmjs.com/package/json-schema-traverse) [![Coverage Status](https://coveralls.io/repos/github/epoberezkin/json-schema-traverse/badge.svg?branch=master)](https://coveralls.io/github/epoberezkin/json-schema-traverse?branch=master) ## Install ``` npm install json-schema-traverse ``` ## Usage ```javascript const traverse = require('json-schema-traverse'); const schema = { properties: { foo: {type: 'string'}, bar: {type: 'integer'} } }; traverse(schema, {cb}); // cb is called 3 times with: // 1. root schema // 2. {type: 'string'} // 3. {type: 'integer'} // Or: traverse(schema, {cb: {pre, post}}); // pre is called 3 times with: // 1. root schema // 2. {type: 'string'} // 3. {type: 'integer'} // // post is called 3 times with: // 1. {type: 'string'} // 2. {type: 'integer'} // 3. root schema ``` Callback function `cb` is called for each schema object (not including draft-06 boolean schemas), including the root schema, in pre-order traversal. Schema references ($ref) are not resolved, they are passed as is. Alternatively, you can pass a `{pre, post}` object as `cb`, and then `pre` will be called before traversing child elements, and `post` will be called after all child elements have been traversed. Callback is passed these parameters: - _schema_: the current schema object - _JSON pointer_: from the root schema to the current schema object - _root schema_: the schema passed to `traverse` object - _parent JSON pointer_: from the root schema to the parent schema object (see below) - _parent keyword_: the keyword inside which this schema appears (e.g. `properties`, `anyOf`, etc.) - _parent schema_: not necessarily parent object/array; in the example above the parent schema for `{type: 'string'}` is the root schema - _index/property_: index or property name in the array/object containing multiple schemas; in the example above for `{type: 'string'}` the property name is `'foo'` ## Traverse objects in all unknown keywords ```javascript const traverse = require('json-schema-traverse'); const schema = { mySchema: { minimum: 1, maximum: 2 } }; traverse(schema, {allKeys: true, cb}); // cb is called 2 times with: // 1. root schema // 2. mySchema ``` Without option `allKeys: true` callback will be called only with root schema. ## License [MIT](https://github.com/epoberezkin/json-schema-traverse/blob/master/LICENSE)
zhihu-crawler
/zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/json-schema-traverse/README.md
README.md
'use strict'; var traverse = module.exports = function (schema, opts, cb) { // Legacy support for v0.3.1 and earlier. if (typeof opts == 'function') { cb = opts; opts = {}; } cb = opts.cb || cb; var pre = (typeof cb == 'function') ? cb : cb.pre || function() {}; var post = cb.post || function() {}; _traverse(opts, pre, post, schema, '', schema); }; traverse.keywords = { additionalItems: true, items: true, contains: true, additionalProperties: true, propertyNames: true, not: true }; traverse.arrayKeywords = { items: true, allOf: true, anyOf: true, oneOf: true }; traverse.propsKeywords = { definitions: true, properties: true, patternProperties: true, dependencies: true }; traverse.skipKeywords = { default: true, enum: true, const: true, required: true, maximum: true, minimum: true, exclusiveMaximum: true, exclusiveMinimum: true, multipleOf: true, maxLength: true, minLength: true, pattern: true, format: true, maxItems: true, minItems: true, uniqueItems: true, maxProperties: true, minProperties: true }; function _traverse(opts, pre, post, schema, jsonPtr, rootSchema, parentJsonPtr, parentKeyword, parentSchema, keyIndex) { if (schema && typeof schema == 'object' && !Array.isArray(schema)) { pre(schema, jsonPtr, rootSchema, parentJsonPtr, parentKeyword, parentSchema, keyIndex); for (var key in schema) { var sch = schema[key]; if (Array.isArray(sch)) { if (key in traverse.arrayKeywords) { for (var i=0; i<sch.length; i++) _traverse(opts, pre, post, sch[i], jsonPtr + '/' + key + '/' + i, rootSchema, jsonPtr, key, schema, i); } } else if (key in traverse.propsKeywords) { if (sch && typeof sch == 'object') { for (var prop in sch) _traverse(opts, pre, post, sch[prop], jsonPtr + '/' + key + '/' + escapeJsonPtr(prop), rootSchema, jsonPtr, key, schema, prop); } } else if (key in traverse.keywords || (opts.allKeys && !(key in traverse.skipKeywords))) { _traverse(opts, pre, post, sch, jsonPtr + '/' + key, rootSchema, jsonPtr, key, schema); } } post(schema, jsonPtr, rootSchema, parentJsonPtr, parentKeyword, parentSchema, keyIndex); } } function escapeJsonPtr(str) { return str.replace(/~/g, '~0').replace(/\//g, '~1'); }
zhihu-crawler
/zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/json-schema-traverse/index.js
index.js
'use strict'; var traverse = require('json-schema-traverse'); var assert = require('assert'); describe('json-schema-traverse', function() { var calls; beforeEach(function() { calls = []; }); it('should traverse all keywords containing schemas recursively', function() { var schema = require('json-schema-traverse/spec/fixtures/schema').schema; var expectedCalls = require('json-schema-traverse/spec/fixtures/schema').expectedCalls; traverse(schema, {cb: callback}); assert.deepStrictEqual(calls, expectedCalls); }); describe('Legacy v0.3.1 API', function() { it('should traverse all keywords containing schemas recursively', function() { var schema = require('json-schema-traverse/spec/fixtures/schema').schema; var expectedCalls = require('json-schema-traverse/spec/fixtures/schema').expectedCalls; traverse(schema, callback); assert.deepStrictEqual(calls, expectedCalls); }); it('should work when an options object is provided', function() { // schema, jsonPtr, rootSchema, parentJsonPtr, parentKeyword, parentSchema, keyIndex var schema = require('json-schema-traverse/spec/fixtures/schema').schema; var expectedCalls = require('json-schema-traverse/spec/fixtures/schema').expectedCalls; traverse(schema, {}, callback); assert.deepStrictEqual(calls, expectedCalls); }); }); describe('allKeys option', function() { var schema = { someObject: { minimum: 1, maximum: 2 } }; it('should traverse objects with allKeys: true option', function() { // schema, jsonPtr, rootSchema, parentJsonPtr, parentKeyword, parentSchema, keyIndex var expectedCalls = [ [schema, '', schema, undefined, undefined, undefined, undefined], [schema.someObject, '/someObject', schema, '', 'someObject', schema, undefined] ]; traverse(schema, {allKeys: true, cb: callback}); assert.deepStrictEqual(calls, expectedCalls); }); it('should NOT traverse objects with allKeys: false option', function() { // schema, jsonPtr, rootSchema, parentJsonPtr, parentKeyword, parentSchema, keyIndex var expectedCalls = [ [schema, '', schema, undefined, undefined, undefined, undefined] ]; traverse(schema, {allKeys: false, cb: callback}); assert.deepStrictEqual(calls, expectedCalls); }); it('should NOT traverse objects without allKeys option', function() { // schema, jsonPtr, rootSchema, parentJsonPtr, parentKeyword, parentSchema, keyIndex var expectedCalls = [ [schema, '', schema, undefined, undefined, undefined, undefined] ]; traverse(schema, {cb: callback}); assert.deepStrictEqual(calls, expectedCalls); }); it('should NOT travers objects in standard keywords which value is not a schema', function() { var schema2 = { const: {foo: 'bar'}, enum: ['a', 'b'], required: ['foo'], another: { }, patternProperties: {}, // will not traverse - no properties dependencies: true, // will not traverse - invalid properties: { smaller: { type: 'number' }, larger: { type: 'number', minimum: {$data: '1/smaller'} } } }; // schema, jsonPtr, rootSchema, parentJsonPtr, parentKeyword, parentSchema, keyIndex var expectedCalls = [ [schema2, '', schema2, undefined, undefined, undefined, undefined], [schema2.another, '/another', schema2, '', 'another', schema2, undefined], [schema2.properties.smaller, '/properties/smaller', schema2, '', 'properties', schema2, 'smaller'], [schema2.properties.larger, '/properties/larger', schema2, '', 'properties', schema2, 'larger'], ]; traverse(schema2, {allKeys: true, cb: callback}); assert.deepStrictEqual(calls, expectedCalls); }); }); describe('pre and post', function() { var schema = { type: 'object', properties: { name: {type: 'string'}, age: {type: 'number'} } }; it('should traverse schema in pre-order', function() { traverse(schema, {cb: {pre}}); var expectedCalls = [ ['pre', schema, '', schema, undefined, undefined, undefined, undefined], ['pre', schema.properties.name, '/properties/name', schema, '', 'properties', schema, 'name'], ['pre', schema.properties.age, '/properties/age', schema, '', 'properties', schema, 'age'], ]; assert.deepStrictEqual(calls, expectedCalls); }); it('should traverse schema in post-order', function() { traverse(schema, {cb: {post}}); var expectedCalls = [ ['post', schema.properties.name, '/properties/name', schema, '', 'properties', schema, 'name'], ['post', schema.properties.age, '/properties/age', schema, '', 'properties', schema, 'age'], ['post', schema, '', schema, undefined, undefined, undefined, undefined], ]; assert.deepStrictEqual(calls, expectedCalls); }); it('should traverse schema in pre- and post-order at the same time', function() { traverse(schema, {cb: {pre, post}}); var expectedCalls = [ ['pre', schema, '', schema, undefined, undefined, undefined, undefined], ['pre', schema.properties.name, '/properties/name', schema, '', 'properties', schema, 'name'], ['post', schema.properties.name, '/properties/name', schema, '', 'properties', schema, 'name'], ['pre', schema.properties.age, '/properties/age', schema, '', 'properties', schema, 'age'], ['post', schema.properties.age, '/properties/age', schema, '', 'properties', schema, 'age'], ['post', schema, '', schema, undefined, undefined, undefined, undefined], ]; assert.deepStrictEqual(calls, expectedCalls); }); }); function callback() { calls.push(Array.prototype.slice.call(arguments)); } function pre() { calls.push(['pre'].concat(Array.prototype.slice.call(arguments))); } function post() { calls.push(['post'].concat(Array.prototype.slice.call(arguments))); } });
zhihu-crawler
/zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/json-schema-traverse/spec/index.spec.js
index.spec.js
'use strict'; var schema = { additionalItems: subschema('additionalItems'), items: subschema('items'), contains: subschema('contains'), additionalProperties: subschema('additionalProperties'), propertyNames: subschema('propertyNames'), not: subschema('not'), allOf: [ subschema('allOf_0'), subschema('allOf_1'), { items: [ subschema('items_0'), subschema('items_1'), ] } ], anyOf: [ subschema('anyOf_0'), subschema('anyOf_1'), ], oneOf: [ subschema('oneOf_0'), subschema('oneOf_1'), ], definitions: { foo: subschema('definitions_foo'), bar: subschema('definitions_bar'), }, properties: { foo: subschema('properties_foo'), bar: subschema('properties_bar'), }, patternProperties: { foo: subschema('patternProperties_foo'), bar: subschema('patternProperties_bar'), }, dependencies: { foo: subschema('dependencies_foo'), bar: subschema('dependencies_bar'), }, required: ['foo', 'bar'] }; function subschema(keyword) { var sch = { properties: {}, additionalProperties: false, additionalItems: false, anyOf: [ {format: 'email'}, {format: 'hostname'} ] }; sch.properties['foo_' + keyword] = {title: 'foo'}; sch.properties['bar_' + keyword] = {title: 'bar'}; return sch; } module.exports = { schema: schema, // schema, jsonPtr, rootSchema, parentJsonPtr, parentKeyword, parentSchema, keyIndex expectedCalls: [[schema, '', schema, undefined, undefined, undefined, undefined]] .concat(expectedCalls('additionalItems')) .concat(expectedCalls('items')) .concat(expectedCalls('contains')) .concat(expectedCalls('additionalProperties')) .concat(expectedCalls('propertyNames')) .concat(expectedCalls('not')) .concat(expectedCallsChild('allOf', 0)) .concat(expectedCallsChild('allOf', 1)) .concat([ [schema.allOf[2], '/allOf/2', schema, '', 'allOf', schema, 2], [schema.allOf[2].items[0], '/allOf/2/items/0', schema, '/allOf/2', 'items', schema.allOf[2], 0], [schema.allOf[2].items[0].properties.foo_items_0, '/allOf/2/items/0/properties/foo_items_0', schema, '/allOf/2/items/0', 'properties', schema.allOf[2].items[0], 'foo_items_0'], [schema.allOf[2].items[0].properties.bar_items_0, '/allOf/2/items/0/properties/bar_items_0', schema, '/allOf/2/items/0', 'properties', schema.allOf[2].items[0], 'bar_items_0'], [schema.allOf[2].items[0].anyOf[0], '/allOf/2/items/0/anyOf/0', schema, '/allOf/2/items/0', 'anyOf', schema.allOf[2].items[0], 0], [schema.allOf[2].items[0].anyOf[1], '/allOf/2/items/0/anyOf/1', schema, '/allOf/2/items/0', 'anyOf', schema.allOf[2].items[0], 1], [schema.allOf[2].items[1], '/allOf/2/items/1', schema, '/allOf/2', 'items', schema.allOf[2], 1], [schema.allOf[2].items[1].properties.foo_items_1, '/allOf/2/items/1/properties/foo_items_1', schema, '/allOf/2/items/1', 'properties', schema.allOf[2].items[1], 'foo_items_1'], [schema.allOf[2].items[1].properties.bar_items_1, '/allOf/2/items/1/properties/bar_items_1', schema, '/allOf/2/items/1', 'properties', schema.allOf[2].items[1], 'bar_items_1'], [schema.allOf[2].items[1].anyOf[0], '/allOf/2/items/1/anyOf/0', schema, '/allOf/2/items/1', 'anyOf', schema.allOf[2].items[1], 0], [schema.allOf[2].items[1].anyOf[1], '/allOf/2/items/1/anyOf/1', schema, '/allOf/2/items/1', 'anyOf', schema.allOf[2].items[1], 1] ]) .concat(expectedCallsChild('anyOf', 0)) .concat(expectedCallsChild('anyOf', 1)) .concat(expectedCallsChild('oneOf', 0)) .concat(expectedCallsChild('oneOf', 1)) .concat(expectedCallsChild('definitions', 'foo')) .concat(expectedCallsChild('definitions', 'bar')) .concat(expectedCallsChild('properties', 'foo')) .concat(expectedCallsChild('properties', 'bar')) .concat(expectedCallsChild('patternProperties', 'foo')) .concat(expectedCallsChild('patternProperties', 'bar')) .concat(expectedCallsChild('dependencies', 'foo')) .concat(expectedCallsChild('dependencies', 'bar')) }; function expectedCalls(keyword) { return [ [schema[keyword], `/${keyword}`, schema, '', keyword, schema, undefined], [schema[keyword].properties[`foo_${keyword}`], `/${keyword}/properties/foo_${keyword}`, schema, `/${keyword}`, 'properties', schema[keyword], `foo_${keyword}`], [schema[keyword].properties[`bar_${keyword}`], `/${keyword}/properties/bar_${keyword}`, schema, `/${keyword}`, 'properties', schema[keyword], `bar_${keyword}`], [schema[keyword].anyOf[0], `/${keyword}/anyOf/0`, schema, `/${keyword}`, 'anyOf', schema[keyword], 0], [schema[keyword].anyOf[1], `/${keyword}/anyOf/1`, schema, `/${keyword}`, 'anyOf', schema[keyword], 1] ]; } function expectedCallsChild(keyword, i) { return [ [schema[keyword][i], `/${keyword}/${i}`, schema, '', keyword, schema, i], [schema[keyword][i].properties[`foo_${keyword}_${i}`], `/${keyword}/${i}/properties/foo_${keyword}_${i}`, schema, `/${keyword}/${i}`, 'properties', schema[keyword][i], `foo_${keyword}_${i}`], [schema[keyword][i].properties[`bar_${keyword}_${i}`], `/${keyword}/${i}/properties/bar_${keyword}_${i}`, schema, `/${keyword}/${i}`, 'properties', schema[keyword][i], `bar_${keyword}_${i}`], [schema[keyword][i].anyOf[0], `/${keyword}/${i}/anyOf/0`, schema, `/${keyword}/${i}`, 'anyOf', schema[keyword][i], 0], [schema[keyword][i].anyOf[1], `/${keyword}/${i}/anyOf/1`, schema, `/${keyword}/${i}`, 'anyOf', schema[keyword][i], 1] ]; }
zhihu-crawler
/zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/json-schema-traverse/spec/fixtures/schema.js
schema.js
# Determine the Encoding of a HTML Byte Stream This package implements the HTML Standard's [encoding sniffing algorithm](https://html.spec.whatwg.org/multipage/syntax.html#encoding-sniffing-algorithm) in all its glory. The most interesting part of this is how it pre-scans the first 1024 bytes in order to search for certain `<meta charset>`-related patterns. ```js const htmlEncodingSniffer = require("html-encoding-sniffer"); const fs = require("fs"); const htmlBuffer = fs.readFileSync("./html-page.html"); const sniffedEncoding = htmlEncodingSniffer(htmlBuffer); ``` The returned value will be a canonical [encoding name](https://encoding.spec.whatwg.org/#names-and-labels) (not a label). You might then combine this with the [whatwg-encoding](https://github.com/jsdom/whatwg-encoding) package to decode the result: ```js const whatwgEncoding = require("whatwg-encoding"); const htmlString = whatwgEncoding.decode(htmlBuffer, sniffedEncoding); ``` ## Options You can pass two potential options to `htmlEncodingSniffer`: ```js const sniffedEncoding = htmlEncodingSniffer(htmlBuffer, { transportLayerEncodingLabel, defaultEncoding }); ``` These represent two possible inputs into the [encoding sniffing algorithm](https://html.spec.whatwg.org/multipage/syntax.html#encoding-sniffing-algorithm): - `transportLayerEncodingLabel` is an encoding label that is obtained from the "transport layer" (probably a HTTP `Content-Type` header), which overrides everything but a BOM. - `defaultEncoding` is the ultimate fallback encoding used if no valid encoding is supplied by the transport layer, and no encoding is sniffed from the bytes. It defaults to `"windows-1252"`, as recommended by the algorithm's table of suggested defaults for "All other locales" (including the `en` locale). ## Credits This package was originally based on the excellent work of [@nicolashenry](https://github.com/nicolashenry), [in jsdom](https://github.com/tmpvar/jsdom/blob/16fd85618f2705d181232f6552125872a37164bc/lib/jsdom/living/helpers/encoding.js). It has since been pulled out into this separate package.
zhihu-crawler
/zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/html-encoding-sniffer/README.md
README.md
"use strict"; const whatwgEncoding = require("whatwg-encoding"); // https://html.spec.whatwg.org/#encoding-sniffing-algorithm module.exports = (buffer, { transportLayerEncodingLabel, defaultEncoding = "windows-1252" } = {}) => { let encoding = whatwgEncoding.getBOMEncoding(buffer); // see https://github.com/whatwg/html/issues/1910 if (encoding === null && transportLayerEncodingLabel !== undefined) { encoding = whatwgEncoding.labelToName(transportLayerEncodingLabel); } if (encoding === null) { encoding = prescanMetaCharset(buffer); } if (encoding === null) { encoding = defaultEncoding; } return encoding; }; // https://html.spec.whatwg.org/multipage/syntax.html#prescan-a-byte-stream-to-determine-its-encoding function prescanMetaCharset(buffer) { const l = Math.min(buffer.length, 1024); for (let i = 0; i < l; i++) { let c = buffer[i]; if (c === 0x3C) { // "<" const c1 = buffer[i + 1]; const c2 = buffer[i + 2]; const c3 = buffer[i + 3]; const c4 = buffer[i + 4]; const c5 = buffer[i + 5]; // !-- (comment start) if (c1 === 0x21 && c2 === 0x2D && c3 === 0x2D) { i += 4; for (; i < l; i++) { c = buffer[i]; const cMinus1 = buffer[i - 1]; const cMinus2 = buffer[i - 2]; // --> (comment end) if (c === 0x3E && cMinus1 === 0x2D && cMinus2 === 0x2D) { break; } } } else if ((c1 === 0x4D || c1 === 0x6D) && (c2 === 0x45 || c2 === 0x65) && (c3 === 0x54 || c3 === 0x74) && (c4 === 0x41 || c4 === 0x61) && (isSpaceCharacter(c5) || c5 === 0x2F)) { // "meta" + space or / i += 6; const attributeList = new Set(); let gotPragma = false; let needPragma = null; let charset = null; let attrRes; do { attrRes = getAttribute(buffer, i, l); if (attrRes.attr && !attributeList.has(attrRes.attr.name)) { attributeList.add(attrRes.attr.name); if (attrRes.attr.name === "http-equiv") { gotPragma = attrRes.attr.value === "content-type"; } else if (attrRes.attr.name === "content" && !charset) { charset = extractCharacterEncodingFromMeta(attrRes.attr.value); if (charset !== null) { needPragma = true; } } else if (attrRes.attr.name === "charset") { charset = whatwgEncoding.labelToName(attrRes.attr.value); needPragma = false; } } i = attrRes.i; } while (attrRes.attr); if (needPragma === null) { continue; } if (needPragma === true && gotPragma === false) { continue; } if (charset === null) { continue; } if (charset === "UTF-16LE" || charset === "UTF-16BE") { charset = "UTF-8"; } if (charset === "x-user-defined") { charset = "windows-1252"; } return charset; } else if ((c1 >= 0x41 && c1 <= 0x5A) || (c1 >= 0x61 && c1 <= 0x7A)) { // a-z or A-Z for (i += 2; i < l; i++) { c = buffer[i]; // space or > if (isSpaceCharacter(c) || c === 0x3E) { break; } } let attrRes; do { attrRes = getAttribute(buffer, i, l); i = attrRes.i; } while (attrRes.attr); } else if (c1 === 0x21 || c1 === 0x2F || c1 === 0x3F) { // ! or / or ? for (i += 2; i < l; i++) { c = buffer[i]; // > if (c === 0x3E) { break; } } } } } return null; } // https://html.spec.whatwg.org/multipage/syntax.html#concept-get-attributes-when-sniffing function getAttribute(buffer, i, l) { for (; i < l; i++) { let c = buffer[i]; // space or / if (isSpaceCharacter(c) || c === 0x2F) { continue; } // ">" if (c === 0x3E) { break; } let name = ""; let value = ""; nameLoop:for (; i < l; i++) { c = buffer[i]; // "=" if (c === 0x3D && name !== "") { i++; break; } // space if (isSpaceCharacter(c)) { for (i++; i < l; i++) { c = buffer[i]; // space if (isSpaceCharacter(c)) { continue; } // not "=" if (c !== 0x3D) { return { attr: { name, value }, i }; } i++; break nameLoop; } break; } // / or > if (c === 0x2F || c === 0x3E) { return { attr: { name, value }, i }; } // A-Z if (c >= 0x41 && c <= 0x5A) { name += String.fromCharCode(c + 0x20); // lowercase } else { name += String.fromCharCode(c); } } c = buffer[i]; // space if (isSpaceCharacter(c)) { for (i++; i < l; i++) { c = buffer[i]; // space if (isSpaceCharacter(c)) { continue; } else { break; } } } // " or ' if (c === 0x22 || c === 0x27) { const quote = c; for (i++; i < l; i++) { c = buffer[i]; if (c === quote) { i++; return { attr: { name, value }, i }; } // A-Z if (c >= 0x41 && c <= 0x5A) { value += String.fromCharCode(c + 0x20); // lowercase } else { value += String.fromCharCode(c); } } } // > if (c === 0x3E) { return { attr: { name, value }, i }; } // A-Z if (c >= 0x41 && c <= 0x5A) { value += String.fromCharCode(c + 0x20); // lowercase } else { value += String.fromCharCode(c); } for (i++; i < l; i++) { c = buffer[i]; // space or > if (isSpaceCharacter(c) || c === 0x3E) { return { attr: { name, value }, i }; } // A-Z if (c >= 0x41 && c <= 0x5A) { value += String.fromCharCode(c + 0x20); // lowercase } else { value += String.fromCharCode(c); } } } return { i }; } function extractCharacterEncodingFromMeta(string) { let position = 0; while (true) { const indexOfCharset = string.substring(position).search(/charset/i); if (indexOfCharset === -1) { return null; } let subPosition = position + indexOfCharset + "charset".length; while (isSpaceCharacter(string[subPosition].charCodeAt(0))) { ++subPosition; } if (string[subPosition] !== "=") { position = subPosition - 1; continue; } ++subPosition; while (isSpaceCharacter(string[subPosition].charCodeAt(0))) { ++subPosition; } position = subPosition; break; } if (string[position] === "\"" || string[position] === "'") { const nextIndex = string.indexOf(string[position], position + 1); if (nextIndex !== -1) { return whatwgEncoding.labelToName(string.substring(position + 1, nextIndex)); } // It is an unmatched quotation mark return null; } if (string.length === position + 1) { return null; } const indexOfASCIIWhitespaceOrSemicolon = string.substring(position + 1).search(/\x09|\x0A|\x0C|\x0D|\x20|;/); const end = indexOfASCIIWhitespaceOrSemicolon === -1 ? string.length : position + indexOfASCIIWhitespaceOrSemicolon + 1; return whatwgEncoding.labelToName(string.substring(position, end)); } function isSpaceCharacter(c) { return c === 0x09 || c === 0x0A || c === 0x0C || c === 0x0D || c === 0x20; }
zhihu-crawler
/zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/html-encoding-sniffer/lib/html-encoding-sniffer.js
html-encoding-sniffer.js
The MIT License (MIT) ===================== Copyright (c) 2015 Rod Vagg --------------------------- Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
zhihu-crawler
/zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/isstream/LICENSE.md
LICENSE.md
# isStream [![Build Status](https://secure.travis-ci.org/rvagg/isstream.png)](http://travis-ci.org/rvagg/isstream) **Test if an object is a `Stream`** [![NPM](https://nodei.co/npm/isstream.svg)](https://nodei.co/npm/isstream/) The missing `Stream.isStream(obj)`: determine if an object is standard Node.js `Stream`. Works for Node-core `Stream` objects (for 0.8, 0.10, 0.11, and in theory, older and newer versions) and all versions of **[readable-stream](https://github.com/isaacs/readable-stream)**. ## Usage: ```js var isStream = require('isstream') var Stream = require('stream') isStream(new Stream()) // true isStream({}) // false isStream(new Stream.Readable()) // true isStream(new Stream.Writable()) // true isStream(new Stream.Duplex()) // true isStream(new Stream.Transform()) // true isStream(new Stream.PassThrough()) // true ``` ## But wait! There's more! You can also test for `isReadable(obj)`, `isWritable(obj)` and `isDuplex(obj)` to test for implementations of Streams2 (and Streams3) base classes. ```js var isReadable = require('isstream').isReadable var isWritable = require('isstream').isWritable var isDuplex = require('isstream').isDuplex var Stream = require('stream') isReadable(new Stream()) // false isWritable(new Stream()) // false isDuplex(new Stream()) // false isReadable(new Stream.Readable()) // true isReadable(new Stream.Writable()) // false isReadable(new Stream.Duplex()) // true isReadable(new Stream.Transform()) // true isReadable(new Stream.PassThrough()) // true isWritable(new Stream.Readable()) // false isWritable(new Stream.Writable()) // true isWritable(new Stream.Duplex()) // true isWritable(new Stream.Transform()) // true isWritable(new Stream.PassThrough()) // true isDuplex(new Stream.Readable()) // false isDuplex(new Stream.Writable()) // false isDuplex(new Stream.Duplex()) // true isDuplex(new Stream.Transform()) // true isDuplex(new Stream.PassThrough()) // true ``` *Reminder: when implementing your own streams, please [use **readable-stream** rather than core streams](http://r.va.gg/2014/06/why-i-dont-use-nodes-core-stream-module.html).* ## License **isStream** is Copyright (c) 2015 Rod Vagg [@rvagg](https://twitter.com/rvagg) and licenced under the MIT licence. All rights not explicitly granted in the MIT license are reserved. See the included LICENSE.md file for more details.
zhihu-crawler
/zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/isstream/README.md
README.md
# node-http-signature changelog ## 1.1.1 - Version of dependency `assert-plus` updated: old version was missing some license information - Corrected examples in `http_signing.md`, added auto-tests to automatically validate these examples ## 1.1.0 - Bump version of `sshpk` dependency, remove peerDependency on it since it now supports exchanging objects between multiple versions of itself where possible ## 1.0.2 - Bump min version of `jsprim` dependency, to include fixes for using http-signature with `browserify` ## 1.0.1 - Bump minimum version of `sshpk` dependency, to include fixes for whitespace tolerance in key parsing. ## 1.0.0 - First semver release. - #36: Ensure verifySignature does not leak useful timing information - #42: Bring the library up to the latest version of the spec (including the request-target changes) - Support for ECDSA keys and signatures. - Now uses `sshpk` for key parsing, validation and conversion. - Fixes for #21, #47, #39 and compatibility with node 0.8 ## 0.11.0 - Split up HMAC and Signature verification to avoid vulnerabilities where a key intended for use with one can be validated against the other method instead. ## 0.10.2 - Updated versions of most dependencies. - Utility functions exported for PEM => SSH-RSA conversion. - Improvements to tests and examples.
zhihu-crawler
/zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/http-signature/CHANGES.md
CHANGES.md
# Abstract This document describes a way to add origin authentication, message integrity, and replay resistance to HTTP REST requests. It is intended to be used over the HTTPS protocol. # Copyright Notice Copyright (c) 2011 Joyent, Inc. and the persons identified as document authors. All rights reserved. Code Components extracted from this document must include MIT License text. # Introduction This protocol is intended to provide a standard way for clients to sign HTTP requests. RFC2617 (HTTP Authentication) defines Basic and Digest authentication mechanisms, and RFC5246 (TLS 1.2) defines client-auth, both of which are widely employed on the Internet today. However, it is common place that the burdens of PKI prevent web service operators from deploying that methodology, and so many fall back to Basic authentication, which has poor security characteristics. Additionally, OAuth provides a fully-specified alternative for authorization of web service requests, but is not (always) ideal for machine to machine communication, as the key acquisition steps (generally) imply a fixed infrastructure that may not make sense to a service provider (e.g., symmetric keys). Several web service providers have invented their own schemes for signing HTTP requests, but to date, none have been placed in the public domain as a standard. This document serves that purpose. There are no techniques in this proposal that are novel beyond previous art, however, this aims to be a simple mechanism for signing these requests. # Signature Authentication Scheme The "signature" authentication scheme is based on the model that the client must authenticate itself with a digital signature produced by either a private asymmetric key (e.g., RSA) or a shared symmetric key (e.g., HMAC). The scheme is parameterized enough such that it is not bound to any particular key type or signing algorithm. However, it does explicitly assume that clients can send an HTTP `Date` header. ## Authorization Header The client is expected to send an Authorization header (as defined in RFC 2617) with the following parameterization: credentials := "Signature" params params := 1#(keyId | algorithm | [headers] | [ext] | signature) digitalSignature := plain-string keyId := "keyId" "=" <"> plain-string <"> algorithm := "algorithm" "=" <"> plain-string <"> headers := "headers" "=" <"> 1#headers-value <"> ext := "ext" "=" <"> plain-string <"> signature := "signature" "=" <"> plain-string <"> headers-value := plain-string plain-string = 1*( %x20-21 / %x23-5B / %x5D-7E ) ### Signature Parameters #### keyId REQUIRED. The `keyId` field is an opaque string that the server can use to look up the component they need to validate the signature. It could be an SSH key fingerprint, an LDAP DN, etc. Management of keys and assignment of `keyId` is out of scope for this document. #### algorithm REQUIRED. The `algorithm` parameter is used if the client and server agree on a non-standard digital signature algorithm. The full list of supported signature mechanisms is listed below. #### headers OPTIONAL. The `headers` parameter is used to specify the list of HTTP headers used to sign the request. If specified, it should be a quoted list of HTTP header names, separated by a single space character. By default, only one HTTP header is signed, which is the `Date` header. Note that the list MUST be specified in the order the values are concatenated together during signing. To include the HTTP request line in the signature calculation, use the special `request-line` value. While this is overloading the definition of `headers` in HTTP linguism, the request-line is defined in RFC 2616, and as the outlier from headers in useful signature calculation, it is deemed simpler to simply use `request-line` than to add a separate parameter for it. #### extensions OPTIONAL. The `extensions` parameter is used to include additional information which is covered by the request. The content and format of the string is out of scope for this document, and expected to be specified by implementors. #### signature REQUIRED. The `signature` parameter is a `Base64` encoded digital signature generated by the client. The client uses the `algorithm` and `headers` request parameters to form a canonicalized `signing string`. This `signing string` is then signed with the key associated with `keyId` and the algorithm corresponding to `algorithm`. The `signature` parameter is then set to the `Base64` encoding of the signature. ### Signing String Composition In order to generate the string that is signed with a key, the client MUST take the values of each HTTP header specified by `headers` in the order they appear. 1. If the header name is not `request-line` then append the lowercased header name followed with an ASCII colon `:` and an ASCII space ` `. 2. If the header name is `request-line` then append the HTTP request line, otherwise append the header value. 3. If value is not the last value then append an ASCII newline `\n`. The string MUST NOT include a trailing ASCII newline. # Example Requests All requests refer to the following request (body omitted): POST /foo HTTP/1.1 Host: example.org Date: Tue, 07 Jun 2014 20:51:35 GMT Content-Type: application/json Digest: SHA-256=X48E9qOokqqrvdts8nOJRJN3OWDUoyWxBf7kbu9DBPE= Content-Length: 18 The "rsa-key-1" keyId refers to a private key known to the client and a public key known to the server. The "hmac-key-1" keyId refers to key known to the client and server. ## Default parameterization The authorization header and signature would be generated as: Authorization: Signature keyId="rsa-key-1",algorithm="rsa-sha256",signature="Base64(RSA-SHA256(signing string))" The client would compose the signing string as: date: Tue, 07 Jun 2014 20:51:35 GMT ## Header List The authorization header and signature would be generated as: Authorization: Signature keyId="rsa-key-1",algorithm="rsa-sha256",headers="(request-target) date content-type digest",signature="Base64(RSA-SHA256(signing string))" The client would compose the signing string as (`+ "\n"` inserted for readability): (request-target) post /foo + "\n" date: Tue, 07 Jun 2011 20:51:35 GMT + "\n" content-type: application/json + "\n" digest: SHA-256=Base64(SHA256(Body)) ## Algorithm The authorization header and signature would be generated as: Authorization: Signature keyId="hmac-key-1",algorithm="hmac-sha1",signature="Base64(HMAC-SHA1(signing string))" The client would compose the signing string as: date: Tue, 07 Jun 2011 20:51:35 GMT # Signing Algorithms Currently supported algorithm names are: * rsa-sha1 * rsa-sha256 * rsa-sha512 * dsa-sha1 * hmac-sha1 * hmac-sha256 * hmac-sha512 # Security Considerations ## Default Parameters Note the default parameterization of the `Signature` scheme is only safe if all requests are carried over a secure transport (i.e., TLS). Sending the default scheme over a non-secure transport will leave the request vulnerable to spoofing, tampering, replay/repudiation, and integrity violations (if using the STRIDE threat-modeling methodology). ## Insecure Transports If sending the request over plain HTTP, service providers SHOULD require clients to sign ALL HTTP headers, and the `request-line`. Additionally, service providers SHOULD require `Content-MD5` calculations to be performed to ensure against any tampering from clients. ## Nonces Nonces are out of scope for this document simply because many service providers fail to implement them correctly, or do not adopt security specifications because of the infrastructure complexity. Given the `header` parameterization, a service provider is fully enabled to add nonce semantics into this scheme by using something like an `x-request-nonce` header, and ensuring it is signed with the `Date` header. ## Clock Skew As the default scheme is to sign the `Date` header, service providers SHOULD protect against logged replay attacks by enforcing a clock skew. The server SHOULD be synchronized with NTP, and the recommendation in this specification is to allow 300s of clock skew (in either direction). ## Required Headers to Sign It is out of scope for this document to dictate what headers a service provider will want to enforce, but service providers SHOULD at minimum include the `Date` header. # References ## Normative References * [RFC2616] Hypertext Transfer Protocol -- HTTP/1.1 * [RFC2617] HTTP Authentication: Basic and Digest Access Authentication * [RFC5246] The Transport Layer Security (TLS) Protocol Version 1.2 ## Informative References Name: Mark Cavage (editor) Company: Joyent, Inc. Email: [email protected] URI: http://www.joyent.com # Appendix A - Test Values The following test data uses the RSA (1024b) keys, which we will refer to as `keyId=Test` in the following samples: -----BEGIN PUBLIC KEY----- MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDCFENGw33yGihy92pDjZQhl0C3 6rPJj+CvfSC8+q28hxA161QFNUd13wuCTUcq0Qd2qsBe/2hFyc2DCJJg0h1L78+6 Z4UMR7EOcpfdUE9Hf3m/hs+FUR45uBJeDK1HSFHD8bHKD6kv8FPGfJTotc+2xjJw oYi+1hqp1fIekaxsyQIDAQAB -----END PUBLIC KEY----- -----BEGIN RSA PRIVATE KEY----- MIICXgIBAAKBgQDCFENGw33yGihy92pDjZQhl0C36rPJj+CvfSC8+q28hxA161QF NUd13wuCTUcq0Qd2qsBe/2hFyc2DCJJg0h1L78+6Z4UMR7EOcpfdUE9Hf3m/hs+F UR45uBJeDK1HSFHD8bHKD6kv8FPGfJTotc+2xjJwoYi+1hqp1fIekaxsyQIDAQAB AoGBAJR8ZkCUvx5kzv+utdl7T5MnordT1TvoXXJGXK7ZZ+UuvMNUCdN2QPc4sBiA QWvLw1cSKt5DsKZ8UETpYPy8pPYnnDEz2dDYiaew9+xEpubyeW2oH4Zx71wqBtOK kqwrXa/pzdpiucRRjk6vE6YY7EBBs/g7uanVpGibOVAEsqH1AkEA7DkjVH28WDUg f1nqvfn2Kj6CT7nIcE3jGJsZZ7zlZmBmHFDONMLUrXR/Zm3pR5m0tCmBqa5RK95u 412jt1dPIwJBANJT3v8pnkth48bQo/fKel6uEYyboRtA5/uHuHkZ6FQF7OUkGogc mSJluOdc5t6hI1VsLn0QZEjQZMEOWr+wKSMCQQCC4kXJEsHAve77oP6HtG/IiEn7 kpyUXRNvFsDE0czpJJBvL/aRFUJxuRK91jhjC68sA7NsKMGg5OXb5I5Jj36xAkEA gIT7aFOYBFwGgQAQkWNKLvySgKbAZRTeLBacpHMuQdl1DfdntvAyqpAZ0lY0RKmW G6aFKaqQfOXKCyWoUiVknQJAXrlgySFci/2ueKlIE1QqIiLSZ8V8OlpFLRnb1pzI 7U1yQXnTAEFYM560yJlzUpOb1V4cScGd365tiSMvxLOvTA== -----END RSA PRIVATE KEY----- And all examples use this request: <!-- httpreq --> POST /foo?param=value&pet=dog HTTP/1.1 Host: example.com Date: Thu, 05 Jan 2014 21:31:40 GMT Content-Type: application/json Digest: SHA-256=X48E9qOokqqrvdts8nOJRJN3OWDUoyWxBf7kbu9DBPE= Content-Length: 18 {"hello": "world"} <!-- /httpreq --> ### Default The string to sign would be: <!-- sign {"name": "Default", "options": {"keyId":"Test", "algorithm": "rsa-sha256"}} --> <!-- signstring --> date: Thu, 05 Jan 2014 21:31:40 GMT <!-- /signstring --> The Authorization header would be: <!-- authz --> Authorization: Signature keyId="Test",algorithm="rsa-sha256",headers="date",signature="jKyvPcxB4JbmYY4mByyBY7cZfNl4OW9HpFQlG7N4YcJPteKTu4MWCLyk+gIr0wDgqtLWf9NLpMAMimdfsH7FSWGfbMFSrsVTHNTk0rK3usrfFnti1dxsM4jl0kYJCKTGI/UWkqiaxwNiKqGcdlEDrTcUhhsFsOIo8VhddmZTZ8w=" <!-- /authz --> ### All Headers Parameterized to include all headers, the string to sign would be (`+ "\n"` inserted for readability): <!-- sign {"name": "All Headers", "options": {"keyId":"Test", "algorithm": "rsa-sha256", "headers": ["(request-target)", "host", "date", "content-type", "digest", "content-length"]}} --> <!-- signstring --> (request-target): post /foo?param=value&pet=dog host: example.com date: Thu, 05 Jan 2014 21:31:40 GMT content-type: application/json digest: SHA-256=X48E9qOokqqrvdts8nOJRJN3OWDUoyWxBf7kbu9DBPE= content-length: 18 <!-- /signstring --> The Authorization header would be: <!-- authz --> Authorization: Signature keyId="Test",algorithm="rsa-sha256",headers="(request-target) host date content-type digest content-length",signature="Ef7MlxLXoBovhil3AlyjtBwAL9g4TN3tibLj7uuNB3CROat/9KaeQ4hW2NiJ+pZ6HQEOx9vYZAyi+7cmIkmJszJCut5kQLAwuX+Ms/mUFvpKlSo9StS2bMXDBNjOh4Auj774GFj4gwjS+3NhFeoqyr/MuN6HsEnkvn6zdgfE2i0=" <!-- /authz --> ## Generating and verifying signatures using `openssl` The `openssl` commandline tool can be used to generate or verify the signatures listed above. Compose the signing string as usual, and pipe it into the the `openssl dgst` command, then into `openssl enc -base64`, as follows: $ printf 'date: Thu, 05 Jan 2014 21:31:40 GMT' | \ openssl dgst -binary -sign /path/to/private.pem -sha256 | \ openssl enc -base64 jKyvPcxB4JbmYY4mByyBY7cZfNl4OW9Hp... $ The `-sha256` option is necessary to produce an `rsa-sha256` signature. You can select other hash algorithms such as `sha1` by changing this argument. To verify a signature, first save the signature data, Base64-decoded, into a file, then use `openssl dgst` again with the `-verify` option: $ echo 'jKyvPcxB4JbmYY4mByy...' | openssl enc -A -d -base64 > signature $ printf 'date: Thu, 05 Jan 2014 21:31:40 GMT' | \ openssl dgst -sha256 -verify /path/to/public.pem -signature ./signature Verified OK $ ## Generating and verifying signatures using `sshpk-sign` You can also generate and check signatures using the `sshpk-sign` tool which is included with the `sshpk` package in `npm`. Compose the signing string as above, and pipe it into `sshpk-sign` as follows: $ printf 'date: Thu, 05 Jan 2014 21:31:40 GMT' | \ sshpk-sign -i /path/to/private.pem jKyvPcxB4JbmYY4mByyBY7cZfNl4OW9Hp... $ This will produce an `rsa-sha256` signature by default, as you can see using the `-v` option: sshpk-sign: using rsa-sha256 with a 1024 bit key You can also use `sshpk-verify` in a similar manner: $ printf 'date: Thu, 05 Jan 2014 21:31:40 GMT' | \ sshpk-verify -i ./public.pem -s 'jKyvPcxB4JbmYY...' OK $
zhihu-crawler
/zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/http-signature/http_signing.md
http_signing.md
# node-http-signature node-http-signature is a node.js library that has client and server components for Joyent's [HTTP Signature Scheme](http_signing.md). ## Usage Note the example below signs a request with the same key/cert used to start an HTTP server. This is almost certainly not what you actually want, but is just used to illustrate the API calls; you will need to provide your own key management in addition to this library. ### Client ```js var fs = require('fs'); var https = require('https'); var httpSignature = require('http-signature'); var key = fs.readFileSync('./key.pem', 'ascii'); var options = { host: 'localhost', port: 8443, path: '/', method: 'GET', headers: {} }; // Adds a 'Date' header in, signs it, and adds the // 'Authorization' header in. var req = https.request(options, function(res) { console.log(res.statusCode); }); httpSignature.sign(req, { key: key, keyId: './cert.pem' }); req.end(); ``` ### Server ```js var fs = require('fs'); var https = require('https'); var httpSignature = require('http-signature'); var options = { key: fs.readFileSync('./key.pem'), cert: fs.readFileSync('./cert.pem') }; https.createServer(options, function (req, res) { var rc = 200; var parsed = httpSignature.parseRequest(req); var pub = fs.readFileSync(parsed.keyId, 'ascii'); if (!httpSignature.verifySignature(parsed, pub)) rc = 401; res.writeHead(rc); res.end(); }).listen(8443); ``` ## Installation npm install http-signature ## License MIT. ## Bugs See <https://github.com/joyent/node-http-signature/issues>.
zhihu-crawler
/zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/http-signature/README.md
README.md
var assert = require('assert-plus'); var crypto = require('crypto'); var sshpk = require('sshpk'); var utils = require('http-signature/lib/utils'); var HASH_ALGOS = utils.HASH_ALGOS; var PK_ALGOS = utils.PK_ALGOS; var InvalidAlgorithmError = utils.InvalidAlgorithmError; var HttpSignatureError = utils.HttpSignatureError; var validateAlgorithm = utils.validateAlgorithm; ///--- Exported API module.exports = { /** * Verify RSA/DSA signature against public key. You are expected to pass in * an object that was returned from `parse()`. * * @param {Object} parsedSignature the object you got from `parse`. * @param {String} pubkey RSA/DSA private key PEM. * @return {Boolean} true if valid, false otherwise. * @throws {TypeError} if you pass in bad arguments. * @throws {InvalidAlgorithmError} */ verifySignature: function verifySignature(parsedSignature, pubkey) { assert.object(parsedSignature, 'parsedSignature'); if (typeof (pubkey) === 'string' || Buffer.isBuffer(pubkey)) pubkey = sshpk.parseKey(pubkey); assert.ok(sshpk.Key.isKey(pubkey, [1, 1]), 'pubkey must be a sshpk.Key'); var alg = validateAlgorithm(parsedSignature.algorithm); if (alg[0] === 'hmac' || alg[0] !== pubkey.type) return (false); var v = pubkey.createVerify(alg[1]); v.update(parsedSignature.signingString); return (v.verify(parsedSignature.params.signature, 'base64')); }, /** * Verify HMAC against shared secret. You are expected to pass in an object * that was returned from `parse()`. * * @param {Object} parsedSignature the object you got from `parse`. * @param {String} secret HMAC shared secret. * @return {Boolean} true if valid, false otherwise. * @throws {TypeError} if you pass in bad arguments. * @throws {InvalidAlgorithmError} */ verifyHMAC: function verifyHMAC(parsedSignature, secret) { assert.object(parsedSignature, 'parsedHMAC'); assert.string(secret, 'secret'); var alg = validateAlgorithm(parsedSignature.algorithm); if (alg[0] !== 'hmac') return (false); var hashAlg = alg[1].toUpperCase(); var hmac = crypto.createHmac(hashAlg, secret); hmac.update(parsedSignature.signingString); /* * Now double-hash to avoid leaking timing information - there's * no easy constant-time compare in JS, so we use this approach * instead. See for more info: * https://www.isecpartners.com/blog/2011/february/double-hmac- * verification.aspx */ var h1 = crypto.createHmac(hashAlg, secret); h1.update(hmac.digest()); h1 = h1.digest(); var h2 = crypto.createHmac(hashAlg, secret); h2.update(new Buffer(parsedSignature.params.signature, 'base64')); h2 = h2.digest(); /* Node 0.8 returns strings from .digest(). */ if (typeof (h1) === 'string') return (h1 === h2); /* And node 0.10 lacks the .equals() method on Buffers. */ if (Buffer.isBuffer(h1) && !h1.equals) return (h1.toString('binary') === h2.toString('binary')); return (h1.equals(h2)); } };
zhihu-crawler
/zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/http-signature/lib/verify.js
verify.js
var assert = require('assert-plus'); var sshpk = require('sshpk'); var util = require('util'); var HASH_ALGOS = { 'sha1': true, 'sha256': true, 'sha512': true }; var PK_ALGOS = { 'rsa': true, 'dsa': true, 'ecdsa': true }; function HttpSignatureError(message, caller) { if (Error.captureStackTrace) Error.captureStackTrace(this, caller || HttpSignatureError); this.message = message; this.name = caller.name; } util.inherits(HttpSignatureError, Error); function InvalidAlgorithmError(message) { HttpSignatureError.call(this, message, InvalidAlgorithmError); } util.inherits(InvalidAlgorithmError, HttpSignatureError); function validateAlgorithm(algorithm) { var alg = algorithm.toLowerCase().split('-'); if (alg.length !== 2) { throw (new InvalidAlgorithmError(alg[0].toUpperCase() + ' is not a ' + 'valid algorithm')); } if (alg[0] !== 'hmac' && !PK_ALGOS[alg[0]]) { throw (new InvalidAlgorithmError(alg[0].toUpperCase() + ' type keys ' + 'are not supported')); } if (!HASH_ALGOS[alg[1]]) { throw (new InvalidAlgorithmError(alg[1].toUpperCase() + ' is not a ' + 'supported hash algorithm')); } return (alg); } ///--- API module.exports = { HASH_ALGOS: HASH_ALGOS, PK_ALGOS: PK_ALGOS, HttpSignatureError: HttpSignatureError, InvalidAlgorithmError: InvalidAlgorithmError, validateAlgorithm: validateAlgorithm, /** * Converts an OpenSSH public key (rsa only) to a PKCS#8 PEM file. * * The intent of this module is to interoperate with OpenSSL only, * specifically the node crypto module's `verify` method. * * @param {String} key an OpenSSH public key. * @return {String} PEM encoded form of the RSA public key. * @throws {TypeError} on bad input. * @throws {Error} on invalid ssh key formatted data. */ sshKeyToPEM: function sshKeyToPEM(key) { assert.string(key, 'ssh_key'); var k = sshpk.parseKey(key, 'ssh'); return (k.toString('pem')); }, /** * Generates an OpenSSH fingerprint from an ssh public key. * * @param {String} key an OpenSSH public key. * @return {String} key fingerprint. * @throws {TypeError} on bad input. * @throws {Error} if what you passed doesn't look like an ssh public key. */ fingerprint: function fingerprint(key) { assert.string(key, 'ssh_key'); var k = sshpk.parseKey(key, 'ssh'); return (k.fingerprint('md5').toString('hex')); }, /** * Converts a PKGCS#8 PEM file to an OpenSSH public key (rsa) * * The reverse of the above function. */ pemToRsaSSHKey: function pemToRsaSSHKey(pem, comment) { assert.equal('string', typeof (pem), 'typeof pem'); var k = sshpk.parseKey(pem, 'pem'); k.comment = comment; return (k.toString('ssh')); } };
zhihu-crawler
/zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/http-signature/lib/utils.js
utils.js
var assert = require('assert-plus'); var util = require('util'); var utils = require('http-signature/lib/utils'); ///--- Globals var HASH_ALGOS = utils.HASH_ALGOS; var PK_ALGOS = utils.PK_ALGOS; var HttpSignatureError = utils.HttpSignatureError; var InvalidAlgorithmError = utils.InvalidAlgorithmError; var validateAlgorithm = utils.validateAlgorithm; var State = { New: 0, Params: 1 }; var ParamsState = { Name: 0, Quote: 1, Value: 2, Comma: 3 }; ///--- Specific Errors function ExpiredRequestError(message) { HttpSignatureError.call(this, message, ExpiredRequestError); } util.inherits(ExpiredRequestError, HttpSignatureError); function InvalidHeaderError(message) { HttpSignatureError.call(this, message, InvalidHeaderError); } util.inherits(InvalidHeaderError, HttpSignatureError); function InvalidParamsError(message) { HttpSignatureError.call(this, message, InvalidParamsError); } util.inherits(InvalidParamsError, HttpSignatureError); function MissingHeaderError(message) { HttpSignatureError.call(this, message, MissingHeaderError); } util.inherits(MissingHeaderError, HttpSignatureError); function StrictParsingError(message) { HttpSignatureError.call(this, message, StrictParsingError); } util.inherits(StrictParsingError, HttpSignatureError); ///--- Exported API module.exports = { /** * Parses the 'Authorization' header out of an http.ServerRequest object. * * Note that this API will fully validate the Authorization header, and throw * on any error. It will not however check the signature, or the keyId format * as those are specific to your environment. You can use the options object * to pass in extra constraints. * * As a response object you can expect this: * * { * "scheme": "Signature", * "params": { * "keyId": "foo", * "algorithm": "rsa-sha256", * "headers": [ * "date" or "x-date", * "digest" * ], * "signature": "base64" * }, * "signingString": "ready to be passed to crypto.verify()" * } * * @param {Object} request an http.ServerRequest. * @param {Object} options an optional options object with: * - clockSkew: allowed clock skew in seconds (default 300). * - headers: required header names (def: date or x-date) * - algorithms: algorithms to support (default: all). * - strict: should enforce latest spec parsing * (default: false). * @return {Object} parsed out object (see above). * @throws {TypeError} on invalid input. * @throws {InvalidHeaderError} on an invalid Authorization header error. * @throws {InvalidParamsError} if the params in the scheme are invalid. * @throws {MissingHeaderError} if the params indicate a header not present, * either in the request headers from the params, * or not in the params from a required header * in options. * @throws {StrictParsingError} if old attributes are used in strict parsing * mode. * @throws {ExpiredRequestError} if the value of date or x-date exceeds skew. */ parseRequest: function parseRequest(request, options) { assert.object(request, 'request'); assert.object(request.headers, 'request.headers'); if (options === undefined) { options = {}; } if (options.headers === undefined) { options.headers = [request.headers['x-date'] ? 'x-date' : 'date']; } assert.object(options, 'options'); assert.arrayOfString(options.headers, 'options.headers'); assert.optionalFinite(options.clockSkew, 'options.clockSkew'); var authzHeaderName = options.authorizationHeaderName || 'authorization'; if (!request.headers[authzHeaderName]) { throw new MissingHeaderError('no ' + authzHeaderName + ' header ' + 'present in the request'); } options.clockSkew = options.clockSkew || 300; var i = 0; var state = State.New; var substate = ParamsState.Name; var tmpName = ''; var tmpValue = ''; var parsed = { scheme: '', params: {}, signingString: '' }; var authz = request.headers[authzHeaderName]; for (i = 0; i < authz.length; i++) { var c = authz.charAt(i); switch (Number(state)) { case State.New: if (c !== ' ') parsed.scheme += c; else state = State.Params; break; case State.Params: switch (Number(substate)) { case ParamsState.Name: var code = c.charCodeAt(0); // restricted name of A-Z / a-z if ((code >= 0x41 && code <= 0x5a) || // A-Z (code >= 0x61 && code <= 0x7a)) { // a-z tmpName += c; } else if (c === '=') { if (tmpName.length === 0) throw new InvalidHeaderError('bad param format'); substate = ParamsState.Quote; } else { throw new InvalidHeaderError('bad param format'); } break; case ParamsState.Quote: if (c === '"') { tmpValue = ''; substate = ParamsState.Value; } else { throw new InvalidHeaderError('bad param format'); } break; case ParamsState.Value: if (c === '"') { parsed.params[tmpName] = tmpValue; substate = ParamsState.Comma; } else { tmpValue += c; } break; case ParamsState.Comma: if (c === ',') { tmpName = ''; substate = ParamsState.Name; } else { throw new InvalidHeaderError('bad param format'); } break; default: throw new Error('Invalid substate'); } break; default: throw new Error('Invalid substate'); } } if (!parsed.params.headers || parsed.params.headers === '') { if (request.headers['x-date']) { parsed.params.headers = ['x-date']; } else { parsed.params.headers = ['date']; } } else { parsed.params.headers = parsed.params.headers.split(' '); } // Minimally validate the parsed object if (!parsed.scheme || parsed.scheme !== 'Signature') throw new InvalidHeaderError('scheme was not "Signature"'); if (!parsed.params.keyId) throw new InvalidHeaderError('keyId was not specified'); if (!parsed.params.algorithm) throw new InvalidHeaderError('algorithm was not specified'); if (!parsed.params.signature) throw new InvalidHeaderError('signature was not specified'); // Check the algorithm against the official list parsed.params.algorithm = parsed.params.algorithm.toLowerCase(); try { validateAlgorithm(parsed.params.algorithm); } catch (e) { if (e instanceof InvalidAlgorithmError) throw (new InvalidParamsError(parsed.params.algorithm + ' is not ' + 'supported')); else throw (e); } // Build the signingString for (i = 0; i < parsed.params.headers.length; i++) { var h = parsed.params.headers[i].toLowerCase(); parsed.params.headers[i] = h; if (h === 'request-line') { if (!options.strict) { /* * We allow headers from the older spec drafts if strict parsing isn't * specified in options. */ parsed.signingString += request.method + ' ' + request.url + ' HTTP/' + request.httpVersion; } else { /* Strict parsing doesn't allow older draft headers. */ throw (new StrictParsingError('request-line is not a valid header ' + 'with strict parsing enabled.')); } } else if (h === '(request-target)') { parsed.signingString += '(request-target): ' + request.method.toLowerCase() + ' ' + request.url; } else { var value = request.headers[h]; if (value === undefined) throw new MissingHeaderError(h + ' was not in the request'); parsed.signingString += h + ': ' + value; } if ((i + 1) < parsed.params.headers.length) parsed.signingString += '\n'; } // Check against the constraints var date; if (request.headers.date || request.headers['x-date']) { if (request.headers['x-date']) { date = new Date(request.headers['x-date']); } else { date = new Date(request.headers.date); } var now = new Date(); var skew = Math.abs(now.getTime() - date.getTime()); if (skew > options.clockSkew * 1000) { throw new ExpiredRequestError('clock skew of ' + (skew / 1000) + 's was greater than ' + options.clockSkew + 's'); } } options.headers.forEach(function (hdr) { // Remember that we already checked any headers in the params // were in the request, so if this passes we're good. if (parsed.params.headers.indexOf(hdr.toLowerCase()) < 0) throw new MissingHeaderError(hdr + ' was not a signed header'); }); if (options.algorithms) { if (options.algorithms.indexOf(parsed.params.algorithm) === -1) throw new InvalidParamsError(parsed.params.algorithm + ' is not a supported algorithm'); } parsed.algorithm = parsed.params.algorithm.toUpperCase(); parsed.keyId = parsed.params.keyId; return parsed; } };
zhihu-crawler
/zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/http-signature/lib/parser.js
parser.js
var assert = require('assert-plus'); var crypto = require('crypto'); var http = require('http'); var util = require('util'); var sshpk = require('sshpk'); var jsprim = require('jsprim'); var utils = require('http-signature/lib/utils'); var sprintf = require('util').format; var HASH_ALGOS = utils.HASH_ALGOS; var PK_ALGOS = utils.PK_ALGOS; var InvalidAlgorithmError = utils.InvalidAlgorithmError; var HttpSignatureError = utils.HttpSignatureError; var validateAlgorithm = utils.validateAlgorithm; ///--- Globals var AUTHZ_FMT = 'Signature keyId="%s",algorithm="%s",headers="%s",signature="%s"'; ///--- Specific Errors function MissingHeaderError(message) { HttpSignatureError.call(this, message, MissingHeaderError); } util.inherits(MissingHeaderError, HttpSignatureError); function StrictParsingError(message) { HttpSignatureError.call(this, message, StrictParsingError); } util.inherits(StrictParsingError, HttpSignatureError); /* See createSigner() */ function RequestSigner(options) { assert.object(options, 'options'); var alg = []; if (options.algorithm !== undefined) { assert.string(options.algorithm, 'options.algorithm'); alg = validateAlgorithm(options.algorithm); } this.rs_alg = alg; /* * RequestSigners come in two varieties: ones with an rs_signFunc, and ones * with an rs_signer. * * rs_signFunc-based RequestSigners have to build up their entire signing * string within the rs_lines array and give it to rs_signFunc as a single * concat'd blob. rs_signer-based RequestSigners can add a line at a time to * their signing state by using rs_signer.update(), thus only needing to * buffer the hash function state and one line at a time. */ if (options.sign !== undefined) { assert.func(options.sign, 'options.sign'); this.rs_signFunc = options.sign; } else if (alg[0] === 'hmac' && options.key !== undefined) { assert.string(options.keyId, 'options.keyId'); this.rs_keyId = options.keyId; if (typeof (options.key) !== 'string' && !Buffer.isBuffer(options.key)) throw (new TypeError('options.key for HMAC must be a string or Buffer')); /* * Make an rs_signer for HMACs, not a rs_signFunc -- HMACs digest their * data in chunks rather than requiring it all to be given in one go * at the end, so they are more similar to signers than signFuncs. */ this.rs_signer = crypto.createHmac(alg[1].toUpperCase(), options.key); this.rs_signer.sign = function () { var digest = this.digest('base64'); return ({ hashAlgorithm: alg[1], toString: function () { return (digest); } }); }; } else if (options.key !== undefined) { var key = options.key; if (typeof (key) === 'string' || Buffer.isBuffer(key)) key = sshpk.parsePrivateKey(key); assert.ok(sshpk.PrivateKey.isPrivateKey(key, [1, 2]), 'options.key must be a sshpk.PrivateKey'); this.rs_key = key; assert.string(options.keyId, 'options.keyId'); this.rs_keyId = options.keyId; if (!PK_ALGOS[key.type]) { throw (new InvalidAlgorithmError(key.type.toUpperCase() + ' type ' + 'keys are not supported')); } if (alg[0] !== undefined && key.type !== alg[0]) { throw (new InvalidAlgorithmError('options.key must be a ' + alg[0].toUpperCase() + ' key, was given a ' + key.type.toUpperCase() + ' key instead')); } this.rs_signer = key.createSign(alg[1]); } else { throw (new TypeError('options.sign (func) or options.key is required')); } this.rs_headers = []; this.rs_lines = []; } /** * Adds a header to be signed, with its value, into this signer. * * @param {String} header * @param {String} value * @return {String} value written */ RequestSigner.prototype.writeHeader = function (header, value) { assert.string(header, 'header'); header = header.toLowerCase(); assert.string(value, 'value'); this.rs_headers.push(header); if (this.rs_signFunc) { this.rs_lines.push(header + ': ' + value); } else { var line = header + ': ' + value; if (this.rs_headers.length > 0) line = '\n' + line; this.rs_signer.update(line); } return (value); }; /** * Adds a default Date header, returning its value. * * @return {String} */ RequestSigner.prototype.writeDateHeader = function () { return (this.writeHeader('date', jsprim.rfc1123(new Date()))); }; /** * Adds the request target line to be signed. * * @param {String} method, HTTP method (e.g. 'get', 'post', 'put') * @param {String} path */ RequestSigner.prototype.writeTarget = function (method, path) { assert.string(method, 'method'); assert.string(path, 'path'); method = method.toLowerCase(); this.writeHeader('(request-target)', method + ' ' + path); }; /** * Calculate the value for the Authorization header on this request * asynchronously. * * @param {Func} callback (err, authz) */ RequestSigner.prototype.sign = function (cb) { assert.func(cb, 'callback'); if (this.rs_headers.length < 1) throw (new Error('At least one header must be signed')); var alg, authz; if (this.rs_signFunc) { var data = this.rs_lines.join('\n'); var self = this; this.rs_signFunc(data, function (err, sig) { if (err) { cb(err); return; } try { assert.object(sig, 'signature'); assert.string(sig.keyId, 'signature.keyId'); assert.string(sig.algorithm, 'signature.algorithm'); assert.string(sig.signature, 'signature.signature'); alg = validateAlgorithm(sig.algorithm); authz = sprintf(AUTHZ_FMT, sig.keyId, sig.algorithm, self.rs_headers.join(' '), sig.signature); } catch (e) { cb(e); return; } cb(null, authz); }); } else { try { var sigObj = this.rs_signer.sign(); } catch (e) { cb(e); return; } alg = (this.rs_alg[0] || this.rs_key.type) + '-' + sigObj.hashAlgorithm; var signature = sigObj.toString(); authz = sprintf(AUTHZ_FMT, this.rs_keyId, alg, this.rs_headers.join(' '), signature); cb(null, authz); } }; ///--- Exported API module.exports = { /** * Identifies whether a given object is a request signer or not. * * @param {Object} object, the object to identify * @returns {Boolean} */ isSigner: function (obj) { if (typeof (obj) === 'object' && obj instanceof RequestSigner) return (true); return (false); }, /** * Creates a request signer, used to asynchronously build a signature * for a request (does not have to be an http.ClientRequest). * * @param {Object} options, either: * - {String} keyId * - {String|Buffer} key * - {String} algorithm (optional, required for HMAC) * or: * - {Func} sign (data, cb) * @return {RequestSigner} */ createSigner: function createSigner(options) { return (new RequestSigner(options)); }, /** * Adds an 'Authorization' header to an http.ClientRequest object. * * Note that this API will add a Date header if it's not already set. Any * other headers in the options.headers array MUST be present, or this * will throw. * * You shouldn't need to check the return type; it's just there if you want * to be pedantic. * * The optional flag indicates whether parsing should use strict enforcement * of the version draft-cavage-http-signatures-04 of the spec or beyond. * The default is to be loose and support * older versions for compatibility. * * @param {Object} request an instance of http.ClientRequest. * @param {Object} options signing parameters object: * - {String} keyId required. * - {String} key required (either a PEM or HMAC key). * - {Array} headers optional; defaults to ['date']. * - {String} algorithm optional (unless key is HMAC); * default is the same as the sshpk default * signing algorithm for the type of key given * - {String} httpVersion optional; defaults to '1.1'. * - {Boolean} strict optional; defaults to 'false'. * @return {Boolean} true if Authorization (and optionally Date) were added. * @throws {TypeError} on bad parameter types (input). * @throws {InvalidAlgorithmError} if algorithm was bad or incompatible with * the given key. * @throws {sshpk.KeyParseError} if key was bad. * @throws {MissingHeaderError} if a header to be signed was specified but * was not present. */ signRequest: function signRequest(request, options) { assert.object(request, 'request'); assert.object(options, 'options'); assert.optionalString(options.algorithm, 'options.algorithm'); assert.string(options.keyId, 'options.keyId'); assert.optionalArrayOfString(options.headers, 'options.headers'); assert.optionalString(options.httpVersion, 'options.httpVersion'); if (!request.getHeader('Date')) request.setHeader('Date', jsprim.rfc1123(new Date())); if (!options.headers) options.headers = ['date']; if (!options.httpVersion) options.httpVersion = '1.1'; var alg = []; if (options.algorithm) { options.algorithm = options.algorithm.toLowerCase(); alg = validateAlgorithm(options.algorithm); } var i; var stringToSign = ''; for (i = 0; i < options.headers.length; i++) { if (typeof (options.headers[i]) !== 'string') throw new TypeError('options.headers must be an array of Strings'); var h = options.headers[i].toLowerCase(); if (h === 'request-line') { if (!options.strict) { /** * We allow headers from the older spec drafts if strict parsing isn't * specified in options. */ stringToSign += request.method + ' ' + request.path + ' HTTP/' + options.httpVersion; } else { /* Strict parsing doesn't allow older draft headers. */ throw (new StrictParsingError('request-line is not a valid header ' + 'with strict parsing enabled.')); } } else if (h === '(request-target)') { stringToSign += '(request-target): ' + request.method.toLowerCase() + ' ' + request.path; } else { var value = request.getHeader(h); if (value === undefined || value === '') { throw new MissingHeaderError(h + ' was not in the request'); } stringToSign += h + ': ' + value; } if ((i + 1) < options.headers.length) stringToSign += '\n'; } /* This is just for unit tests. */ if (request.hasOwnProperty('_stringToSign')) { request._stringToSign = stringToSign; } var signature; if (alg[0] === 'hmac') { if (typeof (options.key) !== 'string' && !Buffer.isBuffer(options.key)) throw (new TypeError('options.key must be a string or Buffer')); var hmac = crypto.createHmac(alg[1].toUpperCase(), options.key); hmac.update(stringToSign); signature = hmac.digest('base64'); } else { var key = options.key; if (typeof (key) === 'string' || Buffer.isBuffer(key)) key = sshpk.parsePrivateKey(options.key); assert.ok(sshpk.PrivateKey.isPrivateKey(key, [1, 2]), 'options.key must be a sshpk.PrivateKey'); if (!PK_ALGOS[key.type]) { throw (new InvalidAlgorithmError(key.type.toUpperCase() + ' type ' + 'keys are not supported')); } if (alg[0] !== undefined && key.type !== alg[0]) { throw (new InvalidAlgorithmError('options.key must be a ' + alg[0].toUpperCase() + ' key, was given a ' + key.type.toUpperCase() + ' key instead')); } var signer = key.createSign(alg[1]); signer.update(stringToSign); var sigObj = signer.sign(); if (!HASH_ALGOS[sigObj.hashAlgorithm]) { throw (new InvalidAlgorithmError(sigObj.hashAlgorithm.toUpperCase() + ' is not a supported hash algorithm')); } options.algorithm = key.type + '-' + sigObj.hashAlgorithm; signature = sigObj.toString(); assert.notStrictEqual(signature, '', 'empty signature produced'); } var authzHeaderName = options.authorizationHeaderName || 'Authorization'; request.setHeader(authzHeaderName, sprintf(AUTHZ_FMT, options.keyId, options.algorithm, options.headers.join(' '), signature)); return true; } };
zhihu-crawler
/zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/http-signature/lib/signer.js
signer.js
;(function (globalScope) { 'use strict'; /* * decimal.js v10.2.0 * An arbitrary-precision Decimal type for JavaScript. * https://github.com/MikeMcl/decimal.js * Copyright (c) 2019 Michael Mclaughlin <[email protected]> * MIT Licence */ // ----------------------------------- EDITABLE DEFAULTS ------------------------------------ // // The maximum exponent magnitude. // The limit on the value of `toExpNeg`, `toExpPos`, `minE` and `maxE`. var EXP_LIMIT = 9e15, // 0 to 9e15 // The limit on the value of `precision`, and on the value of the first argument to // `toDecimalPlaces`, `toExponential`, `toFixed`, `toPrecision` and `toSignificantDigits`. MAX_DIGITS = 1e9, // 0 to 1e9 // Base conversion alphabet. NUMERALS = '0123456789abcdef', // The natural logarithm of 10 (1025 digits). LN10 = '2.3025850929940456840179914546843642076011014886287729760333279009675726096773524802359972050895982983419677840422862486334095254650828067566662873690987816894829072083255546808437998948262331985283935053089653777326288461633662222876982198867465436674744042432743651550489343149393914796194044002221051017141748003688084012647080685567743216228355220114804663715659121373450747856947683463616792101806445070648000277502684916746550586856935673420670581136429224554405758925724208241314695689016758940256776311356919292033376587141660230105703089634572075440370847469940168269282808481184289314848524948644871927809676271275775397027668605952496716674183485704422507197965004714951050492214776567636938662976979522110718264549734772662425709429322582798502585509785265383207606726317164309505995087807523710333101197857547331541421808427543863591778117054309827482385045648019095610299291824318237525357709750539565187697510374970888692180205189339507238539205144634197265287286965110862571492198849978748873771345686209167058', // Pi (1025 digits). PI = '3.1415926535897932384626433832795028841971693993751058209749445923078164062862089986280348253421170679821480865132823066470938446095505822317253594081284811174502841027019385211055596446229489549303819644288109756659334461284756482337867831652712019091456485669234603486104543266482133936072602491412737245870066063155881748815209209628292540917153643678925903600113305305488204665213841469519415116094330572703657595919530921861173819326117931051185480744623799627495673518857527248912279381830119491298336733624406566430860213949463952247371907021798609437027705392171762931767523846748184676694051320005681271452635608277857713427577896091736371787214684409012249534301465495853710507922796892589235420199561121290219608640344181598136297747713099605187072113499999983729780499510597317328160963185950244594553469083026425223082533446850352619311881710100031378387528865875332083814206171776691473035982534904287554687311595628638823537875937519577818577805321712268066130019278766111959092164201989380952572010654858632789', // The initial configuration properties of the Decimal constructor. DEFAULTS = { // These values must be integers within the stated ranges (inclusive). // Most of these values can be changed at run-time using the `Decimal.config` method. // The maximum number of significant digits of the result of a calculation or base conversion. // E.g. `Decimal.config({ precision: 20 });` precision: 20, // 1 to MAX_DIGITS // The rounding mode used when rounding to `precision`. // // ROUND_UP 0 Away from zero. // ROUND_DOWN 1 Towards zero. // ROUND_CEIL 2 Towards +Infinity. // ROUND_FLOOR 3 Towards -Infinity. // ROUND_HALF_UP 4 Towards nearest neighbour. If equidistant, up. // ROUND_HALF_DOWN 5 Towards nearest neighbour. If equidistant, down. // ROUND_HALF_EVEN 6 Towards nearest neighbour. If equidistant, towards even neighbour. // ROUND_HALF_CEIL 7 Towards nearest neighbour. If equidistant, towards +Infinity. // ROUND_HALF_FLOOR 8 Towards nearest neighbour. If equidistant, towards -Infinity. // // E.g. // `Decimal.rounding = 4;` // `Decimal.rounding = Decimal.ROUND_HALF_UP;` rounding: 4, // 0 to 8 // The modulo mode used when calculating the modulus: a mod n. // The quotient (q = a / n) is calculated according to the corresponding rounding mode. // The remainder (r) is calculated as: r = a - n * q. // // UP 0 The remainder is positive if the dividend is negative, else is negative. // DOWN 1 The remainder has the same sign as the dividend (JavaScript %). // FLOOR 3 The remainder has the same sign as the divisor (Python %). // HALF_EVEN 6 The IEEE 754 remainder function. // EUCLID 9 Euclidian division. q = sign(n) * floor(a / abs(n)). Always positive. // // Truncated division (1), floored division (3), the IEEE 754 remainder (6), and Euclidian // division (9) are commonly used for the modulus operation. The other rounding modes can also // be used, but they may not give useful results. modulo: 1, // 0 to 9 // The exponent value at and beneath which `toString` returns exponential notation. // JavaScript numbers: -7 toExpNeg: -7, // 0 to -EXP_LIMIT // The exponent value at and above which `toString` returns exponential notation. // JavaScript numbers: 21 toExpPos: 21, // 0 to EXP_LIMIT // The minimum exponent value, beneath which underflow to zero occurs. // JavaScript numbers: -324 (5e-324) minE: -EXP_LIMIT, // -1 to -EXP_LIMIT // The maximum exponent value, above which overflow to Infinity occurs. // JavaScript numbers: 308 (1.7976931348623157e+308) maxE: EXP_LIMIT, // 1 to EXP_LIMIT // Whether to use cryptographically-secure random number generation, if available. crypto: false // true/false }, // ----------------------------------- END OF EDITABLE DEFAULTS ------------------------------- // Decimal, inexact, noConflict, quadrant, external = true, decimalError = '[DecimalError] ', invalidArgument = decimalError + 'Invalid argument: ', precisionLimitExceeded = decimalError + 'Precision limit exceeded', cryptoUnavailable = decimalError + 'crypto unavailable', mathfloor = Math.floor, mathpow = Math.pow, isBinary = /^0b([01]+(\.[01]*)?|\.[01]+)(p[+-]?\d+)?$/i, isHex = /^0x([0-9a-f]+(\.[0-9a-f]*)?|\.[0-9a-f]+)(p[+-]?\d+)?$/i, isOctal = /^0o([0-7]+(\.[0-7]*)?|\.[0-7]+)(p[+-]?\d+)?$/i, isDecimal = /^(\d+(\.\d*)?|\.\d+)(e[+-]?\d+)?$/i, BASE = 1e7, LOG_BASE = 7, MAX_SAFE_INTEGER = 9007199254740991, LN10_PRECISION = LN10.length - 1, PI_PRECISION = PI.length - 1, // Decimal.prototype object P = { name: '[object Decimal]' }; // Decimal prototype methods /* * absoluteValue abs * ceil * comparedTo cmp * cosine cos * cubeRoot cbrt * decimalPlaces dp * dividedBy div * dividedToIntegerBy divToInt * equals eq * floor * greaterThan gt * greaterThanOrEqualTo gte * hyperbolicCosine cosh * hyperbolicSine sinh * hyperbolicTangent tanh * inverseCosine acos * inverseHyperbolicCosine acosh * inverseHyperbolicSine asinh * inverseHyperbolicTangent atanh * inverseSine asin * inverseTangent atan * isFinite * isInteger isInt * isNaN * isNegative isNeg * isPositive isPos * isZero * lessThan lt * lessThanOrEqualTo lte * logarithm log * [maximum] [max] * [minimum] [min] * minus sub * modulo mod * naturalExponential exp * naturalLogarithm ln * negated neg * plus add * precision sd * round * sine sin * squareRoot sqrt * tangent tan * times mul * toBinary * toDecimalPlaces toDP * toExponential * toFixed * toFraction * toHexadecimal toHex * toNearest * toNumber * toOctal * toPower pow * toPrecision * toSignificantDigits toSD * toString * truncated trunc * valueOf toJSON */ /* * Return a new Decimal whose value is the absolute value of this Decimal. * */ P.absoluteValue = P.abs = function () { var x = new this.constructor(this); if (x.s < 0) x.s = 1; return finalise(x); }; /* * Return a new Decimal whose value is the value of this Decimal rounded to a whole number in the * direction of positive Infinity. * */ P.ceil = function () { return finalise(new this.constructor(this), this.e + 1, 2); }; /* * Return * 1 if the value of this Decimal is greater than the value of `y`, * -1 if the value of this Decimal is less than the value of `y`, * 0 if they have the same value, * NaN if the value of either Decimal is NaN. * */ P.comparedTo = P.cmp = function (y) { var i, j, xdL, ydL, x = this, xd = x.d, yd = (y = new x.constructor(y)).d, xs = x.s, ys = y.s; // Either NaN or ±Infinity? if (!xd || !yd) { return !xs || !ys ? NaN : xs !== ys ? xs : xd === yd ? 0 : !xd ^ xs < 0 ? 1 : -1; } // Either zero? if (!xd[0] || !yd[0]) return xd[0] ? xs : yd[0] ? -ys : 0; // Signs differ? if (xs !== ys) return xs; // Compare exponents. if (x.e !== y.e) return x.e > y.e ^ xs < 0 ? 1 : -1; xdL = xd.length; ydL = yd.length; // Compare digit by digit. for (i = 0, j = xdL < ydL ? xdL : ydL; i < j; ++i) { if (xd[i] !== yd[i]) return xd[i] > yd[i] ^ xs < 0 ? 1 : -1; } // Compare lengths. return xdL === ydL ? 0 : xdL > ydL ^ xs < 0 ? 1 : -1; }; /* * Return a new Decimal whose value is the cosine of the value in radians of this Decimal. * * Domain: [-Infinity, Infinity] * Range: [-1, 1] * * cos(0) = 1 * cos(-0) = 1 * cos(Infinity) = NaN * cos(-Infinity) = NaN * cos(NaN) = NaN * */ P.cosine = P.cos = function () { var pr, rm, x = this, Ctor = x.constructor; if (!x.d) return new Ctor(NaN); // cos(0) = cos(-0) = 1 if (!x.d[0]) return new Ctor(1); pr = Ctor.precision; rm = Ctor.rounding; Ctor.precision = pr + Math.max(x.e, x.sd()) + LOG_BASE; Ctor.rounding = 1; x = cosine(Ctor, toLessThanHalfPi(Ctor, x)); Ctor.precision = pr; Ctor.rounding = rm; return finalise(quadrant == 2 || quadrant == 3 ? x.neg() : x, pr, rm, true); }; /* * * Return a new Decimal whose value is the cube root of the value of this Decimal, rounded to * `precision` significant digits using rounding mode `rounding`. * * cbrt(0) = 0 * cbrt(-0) = -0 * cbrt(1) = 1 * cbrt(-1) = -1 * cbrt(N) = N * cbrt(-I) = -I * cbrt(I) = I * * Math.cbrt(x) = (x < 0 ? -Math.pow(-x, 1/3) : Math.pow(x, 1/3)) * */ P.cubeRoot = P.cbrt = function () { var e, m, n, r, rep, s, sd, t, t3, t3plusx, x = this, Ctor = x.constructor; if (!x.isFinite() || x.isZero()) return new Ctor(x); external = false; // Initial estimate. s = x.s * mathpow(x.s * x, 1 / 3); // Math.cbrt underflow/overflow? // Pass x to Math.pow as integer, then adjust the exponent of the result. if (!s || Math.abs(s) == 1 / 0) { n = digitsToString(x.d); e = x.e; // Adjust n exponent so it is a multiple of 3 away from x exponent. if (s = (e - n.length + 1) % 3) n += (s == 1 || s == -2 ? '0' : '00'); s = mathpow(n, 1 / 3); // Rarely, e may be one less than the result exponent value. e = mathfloor((e + 1) / 3) - (e % 3 == (e < 0 ? -1 : 2)); if (s == 1 / 0) { n = '5e' + e; } else { n = s.toExponential(); n = n.slice(0, n.indexOf('e') + 1) + e; } r = new Ctor(n); r.s = x.s; } else { r = new Ctor(s.toString()); } sd = (e = Ctor.precision) + 3; // Halley's method. // TODO? Compare Newton's method. for (;;) { t = r; t3 = t.times(t).times(t); t3plusx = t3.plus(x); r = divide(t3plusx.plus(x).times(t), t3plusx.plus(t3), sd + 2, 1); // TODO? Replace with for-loop and checkRoundingDigits. if (digitsToString(t.d).slice(0, sd) === (n = digitsToString(r.d)).slice(0, sd)) { n = n.slice(sd - 3, sd + 1); // The 4th rounding digit may be in error by -1 so if the 4 rounding digits are 9999 or 4999 // , i.e. approaching a rounding boundary, continue the iteration. if (n == '9999' || !rep && n == '4999') { // On the first iteration only, check to see if rounding up gives the exact result as the // nines may infinitely repeat. if (!rep) { finalise(t, e + 1, 0); if (t.times(t).times(t).eq(x)) { r = t; break; } } sd += 4; rep = 1; } else { // If the rounding digits are null, 0{0,4} or 50{0,3}, check for an exact result. // If not, then there are further digits and m will be truthy. if (!+n || !+n.slice(1) && n.charAt(0) == '5') { // Truncate to the first rounding digit. finalise(r, e + 1, 1); m = !r.times(r).times(r).eq(x); } break; } } } external = true; return finalise(r, e, Ctor.rounding, m); }; /* * Return the number of decimal places of the value of this Decimal. * */ P.decimalPlaces = P.dp = function () { var w, d = this.d, n = NaN; if (d) { w = d.length - 1; n = (w - mathfloor(this.e / LOG_BASE)) * LOG_BASE; // Subtract the number of trailing zeros of the last word. w = d[w]; if (w) for (; w % 10 == 0; w /= 10) n--; if (n < 0) n = 0; } return n; }; /* * n / 0 = I * n / N = N * n / I = 0 * 0 / n = 0 * 0 / 0 = N * 0 / N = N * 0 / I = 0 * N / n = N * N / 0 = N * N / N = N * N / I = N * I / n = I * I / 0 = I * I / N = N * I / I = N * * Return a new Decimal whose value is the value of this Decimal divided by `y`, rounded to * `precision` significant digits using rounding mode `rounding`. * */ P.dividedBy = P.div = function (y) { return divide(this, new this.constructor(y)); }; /* * Return a new Decimal whose value is the integer part of dividing the value of this Decimal * by the value of `y`, rounded to `precision` significant digits using rounding mode `rounding`. * */ P.dividedToIntegerBy = P.divToInt = function (y) { var x = this, Ctor = x.constructor; return finalise(divide(x, new Ctor(y), 0, 1, 1), Ctor.precision, Ctor.rounding); }; /* * Return true if the value of this Decimal is equal to the value of `y`, otherwise return false. * */ P.equals = P.eq = function (y) { return this.cmp(y) === 0; }; /* * Return a new Decimal whose value is the value of this Decimal rounded to a whole number in the * direction of negative Infinity. * */ P.floor = function () { return finalise(new this.constructor(this), this.e + 1, 3); }; /* * Return true if the value of this Decimal is greater than the value of `y`, otherwise return * false. * */ P.greaterThan = P.gt = function (y) { return this.cmp(y) > 0; }; /* * Return true if the value of this Decimal is greater than or equal to the value of `y`, * otherwise return false. * */ P.greaterThanOrEqualTo = P.gte = function (y) { var k = this.cmp(y); return k == 1 || k === 0; }; /* * Return a new Decimal whose value is the hyperbolic cosine of the value in radians of this * Decimal. * * Domain: [-Infinity, Infinity] * Range: [1, Infinity] * * cosh(x) = 1 + x^2/2! + x^4/4! + x^6/6! + ... * * cosh(0) = 1 * cosh(-0) = 1 * cosh(Infinity) = Infinity * cosh(-Infinity) = Infinity * cosh(NaN) = NaN * * x time taken (ms) result * 1000 9 9.8503555700852349694e+433 * 10000 25 4.4034091128314607936e+4342 * 100000 171 1.4033316802130615897e+43429 * 1000000 3817 1.5166076984010437725e+434294 * 10000000 abandoned after 2 minute wait * * TODO? Compare performance of cosh(x) = 0.5 * (exp(x) + exp(-x)) * */ P.hyperbolicCosine = P.cosh = function () { var k, n, pr, rm, len, x = this, Ctor = x.constructor, one = new Ctor(1); if (!x.isFinite()) return new Ctor(x.s ? 1 / 0 : NaN); if (x.isZero()) return one; pr = Ctor.precision; rm = Ctor.rounding; Ctor.precision = pr + Math.max(x.e, x.sd()) + 4; Ctor.rounding = 1; len = x.d.length; // Argument reduction: cos(4x) = 1 - 8cos^2(x) + 8cos^4(x) + 1 // i.e. cos(x) = 1 - cos^2(x/4)(8 - 8cos^2(x/4)) // Estimate the optimum number of times to use the argument reduction. // TODO? Estimation reused from cosine() and may not be optimal here. if (len < 32) { k = Math.ceil(len / 3); n = (1 / tinyPow(4, k)).toString(); } else { k = 16; n = '2.3283064365386962890625e-10'; } x = taylorSeries(Ctor, 1, x.times(n), new Ctor(1), true); // Reverse argument reduction var cosh2_x, i = k, d8 = new Ctor(8); for (; i--;) { cosh2_x = x.times(x); x = one.minus(cosh2_x.times(d8.minus(cosh2_x.times(d8)))); } return finalise(x, Ctor.precision = pr, Ctor.rounding = rm, true); }; /* * Return a new Decimal whose value is the hyperbolic sine of the value in radians of this * Decimal. * * Domain: [-Infinity, Infinity] * Range: [-Infinity, Infinity] * * sinh(x) = x + x^3/3! + x^5/5! + x^7/7! + ... * * sinh(0) = 0 * sinh(-0) = -0 * sinh(Infinity) = Infinity * sinh(-Infinity) = -Infinity * sinh(NaN) = NaN * * x time taken (ms) * 10 2 ms * 100 5 ms * 1000 14 ms * 10000 82 ms * 100000 886 ms 1.4033316802130615897e+43429 * 200000 2613 ms * 300000 5407 ms * 400000 8824 ms * 500000 13026 ms 8.7080643612718084129e+217146 * 1000000 48543 ms * * TODO? Compare performance of sinh(x) = 0.5 * (exp(x) - exp(-x)) * */ P.hyperbolicSine = P.sinh = function () { var k, pr, rm, len, x = this, Ctor = x.constructor; if (!x.isFinite() || x.isZero()) return new Ctor(x); pr = Ctor.precision; rm = Ctor.rounding; Ctor.precision = pr + Math.max(x.e, x.sd()) + 4; Ctor.rounding = 1; len = x.d.length; if (len < 3) { x = taylorSeries(Ctor, 2, x, x, true); } else { // Alternative argument reduction: sinh(3x) = sinh(x)(3 + 4sinh^2(x)) // i.e. sinh(x) = sinh(x/3)(3 + 4sinh^2(x/3)) // 3 multiplications and 1 addition // Argument reduction: sinh(5x) = sinh(x)(5 + sinh^2(x)(20 + 16sinh^2(x))) // i.e. sinh(x) = sinh(x/5)(5 + sinh^2(x/5)(20 + 16sinh^2(x/5))) // 4 multiplications and 2 additions // Estimate the optimum number of times to use the argument reduction. k = 1.4 * Math.sqrt(len); k = k > 16 ? 16 : k | 0; x = x.times(1 / tinyPow(5, k)); x = taylorSeries(Ctor, 2, x, x, true); // Reverse argument reduction var sinh2_x, d5 = new Ctor(5), d16 = new Ctor(16), d20 = new Ctor(20); for (; k--;) { sinh2_x = x.times(x); x = x.times(d5.plus(sinh2_x.times(d16.times(sinh2_x).plus(d20)))); } } Ctor.precision = pr; Ctor.rounding = rm; return finalise(x, pr, rm, true); }; /* * Return a new Decimal whose value is the hyperbolic tangent of the value in radians of this * Decimal. * * Domain: [-Infinity, Infinity] * Range: [-1, 1] * * tanh(x) = sinh(x) / cosh(x) * * tanh(0) = 0 * tanh(-0) = -0 * tanh(Infinity) = 1 * tanh(-Infinity) = -1 * tanh(NaN) = NaN * */ P.hyperbolicTangent = P.tanh = function () { var pr, rm, x = this, Ctor = x.constructor; if (!x.isFinite()) return new Ctor(x.s); if (x.isZero()) return new Ctor(x); pr = Ctor.precision; rm = Ctor.rounding; Ctor.precision = pr + 7; Ctor.rounding = 1; return divide(x.sinh(), x.cosh(), Ctor.precision = pr, Ctor.rounding = rm); }; /* * Return a new Decimal whose value is the arccosine (inverse cosine) in radians of the value of * this Decimal. * * Domain: [-1, 1] * Range: [0, pi] * * acos(x) = pi/2 - asin(x) * * acos(0) = pi/2 * acos(-0) = pi/2 * acos(1) = 0 * acos(-1) = pi * acos(1/2) = pi/3 * acos(-1/2) = 2*pi/3 * acos(|x| > 1) = NaN * acos(NaN) = NaN * */ P.inverseCosine = P.acos = function () { var halfPi, x = this, Ctor = x.constructor, k = x.abs().cmp(1), pr = Ctor.precision, rm = Ctor.rounding; if (k !== -1) { return k === 0 // |x| is 1 ? x.isNeg() ? getPi(Ctor, pr, rm) : new Ctor(0) // |x| > 1 or x is NaN : new Ctor(NaN); } if (x.isZero()) return getPi(Ctor, pr + 4, rm).times(0.5); // TODO? Special case acos(0.5) = pi/3 and acos(-0.5) = 2*pi/3 Ctor.precision = pr + 6; Ctor.rounding = 1; x = x.asin(); halfPi = getPi(Ctor, pr + 4, rm).times(0.5); Ctor.precision = pr; Ctor.rounding = rm; return halfPi.minus(x); }; /* * Return a new Decimal whose value is the inverse of the hyperbolic cosine in radians of the * value of this Decimal. * * Domain: [1, Infinity] * Range: [0, Infinity] * * acosh(x) = ln(x + sqrt(x^2 - 1)) * * acosh(x < 1) = NaN * acosh(NaN) = NaN * acosh(Infinity) = Infinity * acosh(-Infinity) = NaN * acosh(0) = NaN * acosh(-0) = NaN * acosh(1) = 0 * acosh(-1) = NaN * */ P.inverseHyperbolicCosine = P.acosh = function () { var pr, rm, x = this, Ctor = x.constructor; if (x.lte(1)) return new Ctor(x.eq(1) ? 0 : NaN); if (!x.isFinite()) return new Ctor(x); pr = Ctor.precision; rm = Ctor.rounding; Ctor.precision = pr + Math.max(Math.abs(x.e), x.sd()) + 4; Ctor.rounding = 1; external = false; x = x.times(x).minus(1).sqrt().plus(x); external = true; Ctor.precision = pr; Ctor.rounding = rm; return x.ln(); }; /* * Return a new Decimal whose value is the inverse of the hyperbolic sine in radians of the value * of this Decimal. * * Domain: [-Infinity, Infinity] * Range: [-Infinity, Infinity] * * asinh(x) = ln(x + sqrt(x^2 + 1)) * * asinh(NaN) = NaN * asinh(Infinity) = Infinity * asinh(-Infinity) = -Infinity * asinh(0) = 0 * asinh(-0) = -0 * */ P.inverseHyperbolicSine = P.asinh = function () { var pr, rm, x = this, Ctor = x.constructor; if (!x.isFinite() || x.isZero()) return new Ctor(x); pr = Ctor.precision; rm = Ctor.rounding; Ctor.precision = pr + 2 * Math.max(Math.abs(x.e), x.sd()) + 6; Ctor.rounding = 1; external = false; x = x.times(x).plus(1).sqrt().plus(x); external = true; Ctor.precision = pr; Ctor.rounding = rm; return x.ln(); }; /* * Return a new Decimal whose value is the inverse of the hyperbolic tangent in radians of the * value of this Decimal. * * Domain: [-1, 1] * Range: [-Infinity, Infinity] * * atanh(x) = 0.5 * ln((1 + x) / (1 - x)) * * atanh(|x| > 1) = NaN * atanh(NaN) = NaN * atanh(Infinity) = NaN * atanh(-Infinity) = NaN * atanh(0) = 0 * atanh(-0) = -0 * atanh(1) = Infinity * atanh(-1) = -Infinity * */ P.inverseHyperbolicTangent = P.atanh = function () { var pr, rm, wpr, xsd, x = this, Ctor = x.constructor; if (!x.isFinite()) return new Ctor(NaN); if (x.e >= 0) return new Ctor(x.abs().eq(1) ? x.s / 0 : x.isZero() ? x : NaN); pr = Ctor.precision; rm = Ctor.rounding; xsd = x.sd(); if (Math.max(xsd, pr) < 2 * -x.e - 1) return finalise(new Ctor(x), pr, rm, true); Ctor.precision = wpr = xsd - x.e; x = divide(x.plus(1), new Ctor(1).minus(x), wpr + pr, 1); Ctor.precision = pr + 4; Ctor.rounding = 1; x = x.ln(); Ctor.precision = pr; Ctor.rounding = rm; return x.times(0.5); }; /* * Return a new Decimal whose value is the arcsine (inverse sine) in radians of the value of this * Decimal. * * Domain: [-Infinity, Infinity] * Range: [-pi/2, pi/2] * * asin(x) = 2*atan(x/(1 + sqrt(1 - x^2))) * * asin(0) = 0 * asin(-0) = -0 * asin(1/2) = pi/6 * asin(-1/2) = -pi/6 * asin(1) = pi/2 * asin(-1) = -pi/2 * asin(|x| > 1) = NaN * asin(NaN) = NaN * * TODO? Compare performance of Taylor series. * */ P.inverseSine = P.asin = function () { var halfPi, k, pr, rm, x = this, Ctor = x.constructor; if (x.isZero()) return new Ctor(x); k = x.abs().cmp(1); pr = Ctor.precision; rm = Ctor.rounding; if (k !== -1) { // |x| is 1 if (k === 0) { halfPi = getPi(Ctor, pr + 4, rm).times(0.5); halfPi.s = x.s; return halfPi; } // |x| > 1 or x is NaN return new Ctor(NaN); } // TODO? Special case asin(1/2) = pi/6 and asin(-1/2) = -pi/6 Ctor.precision = pr + 6; Ctor.rounding = 1; x = x.div(new Ctor(1).minus(x.times(x)).sqrt().plus(1)).atan(); Ctor.precision = pr; Ctor.rounding = rm; return x.times(2); }; /* * Return a new Decimal whose value is the arctangent (inverse tangent) in radians of the value * of this Decimal. * * Domain: [-Infinity, Infinity] * Range: [-pi/2, pi/2] * * atan(x) = x - x^3/3 + x^5/5 - x^7/7 + ... * * atan(0) = 0 * atan(-0) = -0 * atan(1) = pi/4 * atan(-1) = -pi/4 * atan(Infinity) = pi/2 * atan(-Infinity) = -pi/2 * atan(NaN) = NaN * */ P.inverseTangent = P.atan = function () { var i, j, k, n, px, t, r, wpr, x2, x = this, Ctor = x.constructor, pr = Ctor.precision, rm = Ctor.rounding; if (!x.isFinite()) { if (!x.s) return new Ctor(NaN); if (pr + 4 <= PI_PRECISION) { r = getPi(Ctor, pr + 4, rm).times(0.5); r.s = x.s; return r; } } else if (x.isZero()) { return new Ctor(x); } else if (x.abs().eq(1) && pr + 4 <= PI_PRECISION) { r = getPi(Ctor, pr + 4, rm).times(0.25); r.s = x.s; return r; } Ctor.precision = wpr = pr + 10; Ctor.rounding = 1; // TODO? if (x >= 1 && pr <= PI_PRECISION) atan(x) = halfPi * x.s - atan(1 / x); // Argument reduction // Ensure |x| < 0.42 // atan(x) = 2 * atan(x / (1 + sqrt(1 + x^2))) k = Math.min(28, wpr / LOG_BASE + 2 | 0); for (i = k; i; --i) x = x.div(x.times(x).plus(1).sqrt().plus(1)); external = false; j = Math.ceil(wpr / LOG_BASE); n = 1; x2 = x.times(x); r = new Ctor(x); px = x; // atan(x) = x - x^3/3 + x^5/5 - x^7/7 + ... for (; i !== -1;) { px = px.times(x2); t = r.minus(px.div(n += 2)); px = px.times(x2); r = t.plus(px.div(n += 2)); if (r.d[j] !== void 0) for (i = j; r.d[i] === t.d[i] && i--;); } if (k) r = r.times(2 << (k - 1)); external = true; return finalise(r, Ctor.precision = pr, Ctor.rounding = rm, true); }; /* * Return true if the value of this Decimal is a finite number, otherwise return false. * */ P.isFinite = function () { return !!this.d; }; /* * Return true if the value of this Decimal is an integer, otherwise return false. * */ P.isInteger = P.isInt = function () { return !!this.d && mathfloor(this.e / LOG_BASE) > this.d.length - 2; }; /* * Return true if the value of this Decimal is NaN, otherwise return false. * */ P.isNaN = function () { return !this.s; }; /* * Return true if the value of this Decimal is negative, otherwise return false. * */ P.isNegative = P.isNeg = function () { return this.s < 0; }; /* * Return true if the value of this Decimal is positive, otherwise return false. * */ P.isPositive = P.isPos = function () { return this.s > 0; }; /* * Return true if the value of this Decimal is 0 or -0, otherwise return false. * */ P.isZero = function () { return !!this.d && this.d[0] === 0; }; /* * Return true if the value of this Decimal is less than `y`, otherwise return false. * */ P.lessThan = P.lt = function (y) { return this.cmp(y) < 0; }; /* * Return true if the value of this Decimal is less than or equal to `y`, otherwise return false. * */ P.lessThanOrEqualTo = P.lte = function (y) { return this.cmp(y) < 1; }; /* * Return the logarithm of the value of this Decimal to the specified base, rounded to `precision` * significant digits using rounding mode `rounding`. * * If no base is specified, return log[10](arg). * * log[base](arg) = ln(arg) / ln(base) * * The result will always be correctly rounded if the base of the log is 10, and 'almost always' * otherwise: * * Depending on the rounding mode, the result may be incorrectly rounded if the first fifteen * rounding digits are [49]99999999999999 or [50]00000000000000. In that case, the maximum error * between the result and the correctly rounded result will be one ulp (unit in the last place). * * log[-b](a) = NaN * log[0](a) = NaN * log[1](a) = NaN * log[NaN](a) = NaN * log[Infinity](a) = NaN * log[b](0) = -Infinity * log[b](-0) = -Infinity * log[b](-a) = NaN * log[b](1) = 0 * log[b](Infinity) = Infinity * log[b](NaN) = NaN * * [base] {number|string|Decimal} The base of the logarithm. * */ P.logarithm = P.log = function (base) { var isBase10, d, denominator, k, inf, num, sd, r, arg = this, Ctor = arg.constructor, pr = Ctor.precision, rm = Ctor.rounding, guard = 5; // Default base is 10. if (base == null) { base = new Ctor(10); isBase10 = true; } else { base = new Ctor(base); d = base.d; // Return NaN if base is negative, or non-finite, or is 0 or 1. if (base.s < 0 || !d || !d[0] || base.eq(1)) return new Ctor(NaN); isBase10 = base.eq(10); } d = arg.d; // Is arg negative, non-finite, 0 or 1? if (arg.s < 0 || !d || !d[0] || arg.eq(1)) { return new Ctor(d && !d[0] ? -1 / 0 : arg.s != 1 ? NaN : d ? 0 : 1 / 0); } // The result will have a non-terminating decimal expansion if base is 10 and arg is not an // integer power of 10. if (isBase10) { if (d.length > 1) { inf = true; } else { for (k = d[0]; k % 10 === 0;) k /= 10; inf = k !== 1; } } external = false; sd = pr + guard; num = naturalLogarithm(arg, sd); denominator = isBase10 ? getLn10(Ctor, sd + 10) : naturalLogarithm(base, sd); // The result will have 5 rounding digits. r = divide(num, denominator, sd, 1); // If at a rounding boundary, i.e. the result's rounding digits are [49]9999 or [50]0000, // calculate 10 further digits. // // If the result is known to have an infinite decimal expansion, repeat this until it is clear // that the result is above or below the boundary. Otherwise, if after calculating the 10 // further digits, the last 14 are nines, round up and assume the result is exact. // Also assume the result is exact if the last 14 are zero. // // Example of a result that will be incorrectly rounded: // log[1048576](4503599627370502) = 2.60000000000000009610279511444746... // The above result correctly rounded using ROUND_CEIL to 1 decimal place should be 2.7, but it // will be given as 2.6 as there are 15 zeros immediately after the requested decimal place, so // the exact result would be assumed to be 2.6, which rounded using ROUND_CEIL to 1 decimal // place is still 2.6. if (checkRoundingDigits(r.d, k = pr, rm)) { do { sd += 10; num = naturalLogarithm(arg, sd); denominator = isBase10 ? getLn10(Ctor, sd + 10) : naturalLogarithm(base, sd); r = divide(num, denominator, sd, 1); if (!inf) { // Check for 14 nines from the 2nd rounding digit, as the first may be 4. if (+digitsToString(r.d).slice(k + 1, k + 15) + 1 == 1e14) { r = finalise(r, pr + 1, 0); } break; } } while (checkRoundingDigits(r.d, k += 10, rm)); } external = true; return finalise(r, pr, rm); }; /* * Return a new Decimal whose value is the maximum of the arguments and the value of this Decimal. * * arguments {number|string|Decimal} * P.max = function () { Array.prototype.push.call(arguments, this); return maxOrMin(this.constructor, arguments, 'lt'); }; */ /* * Return a new Decimal whose value is the minimum of the arguments and the value of this Decimal. * * arguments {number|string|Decimal} * P.min = function () { Array.prototype.push.call(arguments, this); return maxOrMin(this.constructor, arguments, 'gt'); }; */ /* * n - 0 = n * n - N = N * n - I = -I * 0 - n = -n * 0 - 0 = 0 * 0 - N = N * 0 - I = -I * N - n = N * N - 0 = N * N - N = N * N - I = N * I - n = I * I - 0 = I * I - N = N * I - I = N * * Return a new Decimal whose value is the value of this Decimal minus `y`, rounded to `precision` * significant digits using rounding mode `rounding`. * */ P.minus = P.sub = function (y) { var d, e, i, j, k, len, pr, rm, xd, xe, xLTy, yd, x = this, Ctor = x.constructor; y = new Ctor(y); // If either is not finite... if (!x.d || !y.d) { // Return NaN if either is NaN. if (!x.s || !y.s) y = new Ctor(NaN); // Return y negated if x is finite and y is ±Infinity. else if (x.d) y.s = -y.s; // Return x if y is finite and x is ±Infinity. // Return x if both are ±Infinity with different signs. // Return NaN if both are ±Infinity with the same sign. else y = new Ctor(y.d || x.s !== y.s ? x : NaN); return y; } // If signs differ... if (x.s != y.s) { y.s = -y.s; return x.plus(y); } xd = x.d; yd = y.d; pr = Ctor.precision; rm = Ctor.rounding; // If either is zero... if (!xd[0] || !yd[0]) { // Return y negated if x is zero and y is non-zero. if (yd[0]) y.s = -y.s; // Return x if y is zero and x is non-zero. else if (xd[0]) y = new Ctor(x); // Return zero if both are zero. // From IEEE 754 (2008) 6.3: 0 - 0 = -0 - -0 = -0 when rounding to -Infinity. else return new Ctor(rm === 3 ? -0 : 0); return external ? finalise(y, pr, rm) : y; } // x and y are finite, non-zero numbers with the same sign. // Calculate base 1e7 exponents. e = mathfloor(y.e / LOG_BASE); xe = mathfloor(x.e / LOG_BASE); xd = xd.slice(); k = xe - e; // If base 1e7 exponents differ... if (k) { xLTy = k < 0; if (xLTy) { d = xd; k = -k; len = yd.length; } else { d = yd; e = xe; len = xd.length; } // Numbers with massively different exponents would result in a very high number of // zeros needing to be prepended, but this can be avoided while still ensuring correct // rounding by limiting the number of zeros to `Math.ceil(pr / LOG_BASE) + 2`. i = Math.max(Math.ceil(pr / LOG_BASE), len) + 2; if (k > i) { k = i; d.length = 1; } // Prepend zeros to equalise exponents. d.reverse(); for (i = k; i--;) d.push(0); d.reverse(); // Base 1e7 exponents equal. } else { // Check digits to determine which is the bigger number. i = xd.length; len = yd.length; xLTy = i < len; if (xLTy) len = i; for (i = 0; i < len; i++) { if (xd[i] != yd[i]) { xLTy = xd[i] < yd[i]; break; } } k = 0; } if (xLTy) { d = xd; xd = yd; yd = d; y.s = -y.s; } len = xd.length; // Append zeros to `xd` if shorter. // Don't add zeros to `yd` if shorter as subtraction only needs to start at `yd` length. for (i = yd.length - len; i > 0; --i) xd[len++] = 0; // Subtract yd from xd. for (i = yd.length; i > k;) { if (xd[--i] < yd[i]) { for (j = i; j && xd[--j] === 0;) xd[j] = BASE - 1; --xd[j]; xd[i] += BASE; } xd[i] -= yd[i]; } // Remove trailing zeros. for (; xd[--len] === 0;) xd.pop(); // Remove leading zeros and adjust exponent accordingly. for (; xd[0] === 0; xd.shift()) --e; // Zero? if (!xd[0]) return new Ctor(rm === 3 ? -0 : 0); y.d = xd; y.e = getBase10Exponent(xd, e); return external ? finalise(y, pr, rm) : y; }; /* * n % 0 = N * n % N = N * n % I = n * 0 % n = 0 * -0 % n = -0 * 0 % 0 = N * 0 % N = N * 0 % I = 0 * N % n = N * N % 0 = N * N % N = N * N % I = N * I % n = N * I % 0 = N * I % N = N * I % I = N * * Return a new Decimal whose value is the value of this Decimal modulo `y`, rounded to * `precision` significant digits using rounding mode `rounding`. * * The result depends on the modulo mode. * */ P.modulo = P.mod = function (y) { var q, x = this, Ctor = x.constructor; y = new Ctor(y); // Return NaN if x is ±Infinity or NaN, or y is NaN or ±0. if (!x.d || !y.s || y.d && !y.d[0]) return new Ctor(NaN); // Return x if y is ±Infinity or x is ±0. if (!y.d || x.d && !x.d[0]) { return finalise(new Ctor(x), Ctor.precision, Ctor.rounding); } // Prevent rounding of intermediate calculations. external = false; if (Ctor.modulo == 9) { // Euclidian division: q = sign(y) * floor(x / abs(y)) // result = x - q * y where 0 <= result < abs(y) q = divide(x, y.abs(), 0, 3, 1); q.s *= y.s; } else { q = divide(x, y, 0, Ctor.modulo, 1); } q = q.times(y); external = true; return x.minus(q); }; /* * Return a new Decimal whose value is the natural exponential of the value of this Decimal, * i.e. the base e raised to the power the value of this Decimal, rounded to `precision` * significant digits using rounding mode `rounding`. * */ P.naturalExponential = P.exp = function () { return naturalExponential(this); }; /* * Return a new Decimal whose value is the natural logarithm of the value of this Decimal, * rounded to `precision` significant digits using rounding mode `rounding`. * */ P.naturalLogarithm = P.ln = function () { return naturalLogarithm(this); }; /* * Return a new Decimal whose value is the value of this Decimal negated, i.e. as if multiplied by * -1. * */ P.negated = P.neg = function () { var x = new this.constructor(this); x.s = -x.s; return finalise(x); }; /* * n + 0 = n * n + N = N * n + I = I * 0 + n = n * 0 + 0 = 0 * 0 + N = N * 0 + I = I * N + n = N * N + 0 = N * N + N = N * N + I = N * I + n = I * I + 0 = I * I + N = N * I + I = I * * Return a new Decimal whose value is the value of this Decimal plus `y`, rounded to `precision` * significant digits using rounding mode `rounding`. * */ P.plus = P.add = function (y) { var carry, d, e, i, k, len, pr, rm, xd, yd, x = this, Ctor = x.constructor; y = new Ctor(y); // If either is not finite... if (!x.d || !y.d) { // Return NaN if either is NaN. if (!x.s || !y.s) y = new Ctor(NaN); // Return x if y is finite and x is ±Infinity. // Return x if both are ±Infinity with the same sign. // Return NaN if both are ±Infinity with different signs. // Return y if x is finite and y is ±Infinity. else if (!x.d) y = new Ctor(y.d || x.s === y.s ? x : NaN); return y; } // If signs differ... if (x.s != y.s) { y.s = -y.s; return x.minus(y); } xd = x.d; yd = y.d; pr = Ctor.precision; rm = Ctor.rounding; // If either is zero... if (!xd[0] || !yd[0]) { // Return x if y is zero. // Return y if y is non-zero. if (!yd[0]) y = new Ctor(x); return external ? finalise(y, pr, rm) : y; } // x and y are finite, non-zero numbers with the same sign. // Calculate base 1e7 exponents. k = mathfloor(x.e / LOG_BASE); e = mathfloor(y.e / LOG_BASE); xd = xd.slice(); i = k - e; // If base 1e7 exponents differ... if (i) { if (i < 0) { d = xd; i = -i; len = yd.length; } else { d = yd; e = k; len = xd.length; } // Limit number of zeros prepended to max(ceil(pr / LOG_BASE), len) + 1. k = Math.ceil(pr / LOG_BASE); len = k > len ? k + 1 : len + 1; if (i > len) { i = len; d.length = 1; } // Prepend zeros to equalise exponents. Note: Faster to use reverse then do unshifts. d.reverse(); for (; i--;) d.push(0); d.reverse(); } len = xd.length; i = yd.length; // If yd is longer than xd, swap xd and yd so xd points to the longer array. if (len - i < 0) { i = len; d = yd; yd = xd; xd = d; } // Only start adding at yd.length - 1 as the further digits of xd can be left as they are. for (carry = 0; i;) { carry = (xd[--i] = xd[i] + yd[i] + carry) / BASE | 0; xd[i] %= BASE; } if (carry) { xd.unshift(carry); ++e; } // Remove trailing zeros. // No need to check for zero, as +x + +y != 0 && -x + -y != 0 for (len = xd.length; xd[--len] == 0;) xd.pop(); y.d = xd; y.e = getBase10Exponent(xd, e); return external ? finalise(y, pr, rm) : y; }; /* * Return the number of significant digits of the value of this Decimal. * * [z] {boolean|number} Whether to count integer-part trailing zeros: true, false, 1 or 0. * */ P.precision = P.sd = function (z) { var k, x = this; if (z !== void 0 && z !== !!z && z !== 1 && z !== 0) throw Error(invalidArgument + z); if (x.d) { k = getPrecision(x.d); if (z && x.e + 1 > k) k = x.e + 1; } else { k = NaN; } return k; }; /* * Return a new Decimal whose value is the value of this Decimal rounded to a whole number using * rounding mode `rounding`. * */ P.round = function () { var x = this, Ctor = x.constructor; return finalise(new Ctor(x), x.e + 1, Ctor.rounding); }; /* * Return a new Decimal whose value is the sine of the value in radians of this Decimal. * * Domain: [-Infinity, Infinity] * Range: [-1, 1] * * sin(x) = x - x^3/3! + x^5/5! - ... * * sin(0) = 0 * sin(-0) = -0 * sin(Infinity) = NaN * sin(-Infinity) = NaN * sin(NaN) = NaN * */ P.sine = P.sin = function () { var pr, rm, x = this, Ctor = x.constructor; if (!x.isFinite()) return new Ctor(NaN); if (x.isZero()) return new Ctor(x); pr = Ctor.precision; rm = Ctor.rounding; Ctor.precision = pr + Math.max(x.e, x.sd()) + LOG_BASE; Ctor.rounding = 1; x = sine(Ctor, toLessThanHalfPi(Ctor, x)); Ctor.precision = pr; Ctor.rounding = rm; return finalise(quadrant > 2 ? x.neg() : x, pr, rm, true); }; /* * Return a new Decimal whose value is the square root of this Decimal, rounded to `precision` * significant digits using rounding mode `rounding`. * * sqrt(-n) = N * sqrt(N) = N * sqrt(-I) = N * sqrt(I) = I * sqrt(0) = 0 * sqrt(-0) = -0 * */ P.squareRoot = P.sqrt = function () { var m, n, sd, r, rep, t, x = this, d = x.d, e = x.e, s = x.s, Ctor = x.constructor; // Negative/NaN/Infinity/zero? if (s !== 1 || !d || !d[0]) { return new Ctor(!s || s < 0 && (!d || d[0]) ? NaN : d ? x : 1 / 0); } external = false; // Initial estimate. s = Math.sqrt(+x); // Math.sqrt underflow/overflow? // Pass x to Math.sqrt as integer, then adjust the exponent of the result. if (s == 0 || s == 1 / 0) { n = digitsToString(d); if ((n.length + e) % 2 == 0) n += '0'; s = Math.sqrt(n); e = mathfloor((e + 1) / 2) - (e < 0 || e % 2); if (s == 1 / 0) { n = '1e' + e; } else { n = s.toExponential(); n = n.slice(0, n.indexOf('e') + 1) + e; } r = new Ctor(n); } else { r = new Ctor(s.toString()); } sd = (e = Ctor.precision) + 3; // Newton-Raphson iteration. for (;;) { t = r; r = t.plus(divide(x, t, sd + 2, 1)).times(0.5); // TODO? Replace with for-loop and checkRoundingDigits. if (digitsToString(t.d).slice(0, sd) === (n = digitsToString(r.d)).slice(0, sd)) { n = n.slice(sd - 3, sd + 1); // The 4th rounding digit may be in error by -1 so if the 4 rounding digits are 9999 or // 4999, i.e. approaching a rounding boundary, continue the iteration. if (n == '9999' || !rep && n == '4999') { // On the first iteration only, check to see if rounding up gives the exact result as the // nines may infinitely repeat. if (!rep) { finalise(t, e + 1, 0); if (t.times(t).eq(x)) { r = t; break; } } sd += 4; rep = 1; } else { // If the rounding digits are null, 0{0,4} or 50{0,3}, check for an exact result. // If not, then there are further digits and m will be truthy. if (!+n || !+n.slice(1) && n.charAt(0) == '5') { // Truncate to the first rounding digit. finalise(r, e + 1, 1); m = !r.times(r).eq(x); } break; } } } external = true; return finalise(r, e, Ctor.rounding, m); }; /* * Return a new Decimal whose value is the tangent of the value in radians of this Decimal. * * Domain: [-Infinity, Infinity] * Range: [-Infinity, Infinity] * * tan(0) = 0 * tan(-0) = -0 * tan(Infinity) = NaN * tan(-Infinity) = NaN * tan(NaN) = NaN * */ P.tangent = P.tan = function () { var pr, rm, x = this, Ctor = x.constructor; if (!x.isFinite()) return new Ctor(NaN); if (x.isZero()) return new Ctor(x); pr = Ctor.precision; rm = Ctor.rounding; Ctor.precision = pr + 10; Ctor.rounding = 1; x = x.sin(); x.s = 1; x = divide(x, new Ctor(1).minus(x.times(x)).sqrt(), pr + 10, 0); Ctor.precision = pr; Ctor.rounding = rm; return finalise(quadrant == 2 || quadrant == 4 ? x.neg() : x, pr, rm, true); }; /* * n * 0 = 0 * n * N = N * n * I = I * 0 * n = 0 * 0 * 0 = 0 * 0 * N = N * 0 * I = N * N * n = N * N * 0 = N * N * N = N * N * I = N * I * n = I * I * 0 = N * I * N = N * I * I = I * * Return a new Decimal whose value is this Decimal times `y`, rounded to `precision` significant * digits using rounding mode `rounding`. * */ P.times = P.mul = function (y) { var carry, e, i, k, r, rL, t, xdL, ydL, x = this, Ctor = x.constructor, xd = x.d, yd = (y = new Ctor(y)).d; y.s *= x.s; // If either is NaN, ±Infinity or ±0... if (!xd || !xd[0] || !yd || !yd[0]) { return new Ctor(!y.s || xd && !xd[0] && !yd || yd && !yd[0] && !xd // Return NaN if either is NaN. // Return NaN if x is ±0 and y is ±Infinity, or y is ±0 and x is ±Infinity. ? NaN // Return ±Infinity if either is ±Infinity. // Return ±0 if either is ±0. : !xd || !yd ? y.s / 0 : y.s * 0); } e = mathfloor(x.e / LOG_BASE) + mathfloor(y.e / LOG_BASE); xdL = xd.length; ydL = yd.length; // Ensure xd points to the longer array. if (xdL < ydL) { r = xd; xd = yd; yd = r; rL = xdL; xdL = ydL; ydL = rL; } // Initialise the result array with zeros. r = []; rL = xdL + ydL; for (i = rL; i--;) r.push(0); // Multiply! for (i = ydL; --i >= 0;) { carry = 0; for (k = xdL + i; k > i;) { t = r[k] + yd[i] * xd[k - i - 1] + carry; r[k--] = t % BASE | 0; carry = t / BASE | 0; } r[k] = (r[k] + carry) % BASE | 0; } // Remove trailing zeros. for (; !r[--rL];) r.pop(); if (carry) ++e; else r.shift(); y.d = r; y.e = getBase10Exponent(r, e); return external ? finalise(y, Ctor.precision, Ctor.rounding) : y; }; /* * Return a string representing the value of this Decimal in base 2, round to `sd` significant * digits using rounding mode `rm`. * * If the optional `sd` argument is present then return binary exponential notation. * * [sd] {number} Significant digits. Integer, 1 to MAX_DIGITS inclusive. * [rm] {number} Rounding mode. Integer, 0 to 8 inclusive. * */ P.toBinary = function (sd, rm) { return toStringBinary(this, 2, sd, rm); }; /* * Return a new Decimal whose value is the value of this Decimal rounded to a maximum of `dp` * decimal places using rounding mode `rm` or `rounding` if `rm` is omitted. * * If `dp` is omitted, return a new Decimal whose value is the value of this Decimal. * * [dp] {number} Decimal places. Integer, 0 to MAX_DIGITS inclusive. * [rm] {number} Rounding mode. Integer, 0 to 8 inclusive. * */ P.toDecimalPlaces = P.toDP = function (dp, rm) { var x = this, Ctor = x.constructor; x = new Ctor(x); if (dp === void 0) return x; checkInt32(dp, 0, MAX_DIGITS); if (rm === void 0) rm = Ctor.rounding; else checkInt32(rm, 0, 8); return finalise(x, dp + x.e + 1, rm); }; /* * Return a string representing the value of this Decimal in exponential notation rounded to * `dp` fixed decimal places using rounding mode `rounding`. * * [dp] {number} Decimal places. Integer, 0 to MAX_DIGITS inclusive. * [rm] {number} Rounding mode. Integer, 0 to 8 inclusive. * */ P.toExponential = function (dp, rm) { var str, x = this, Ctor = x.constructor; if (dp === void 0) { str = finiteToString(x, true); } else { checkInt32(dp, 0, MAX_DIGITS); if (rm === void 0) rm = Ctor.rounding; else checkInt32(rm, 0, 8); x = finalise(new Ctor(x), dp + 1, rm); str = finiteToString(x, true, dp + 1); } return x.isNeg() && !x.isZero() ? '-' + str : str; }; /* * Return a string representing the value of this Decimal in normal (fixed-point) notation to * `dp` fixed decimal places and rounded using rounding mode `rm` or `rounding` if `rm` is * omitted. * * As with JavaScript numbers, (-0).toFixed(0) is '0', but e.g. (-0.00001).toFixed(0) is '-0'. * * [dp] {number} Decimal places. Integer, 0 to MAX_DIGITS inclusive. * [rm] {number} Rounding mode. Integer, 0 to 8 inclusive. * * (-0).toFixed(0) is '0', but (-0.1).toFixed(0) is '-0'. * (-0).toFixed(1) is '0.0', but (-0.01).toFixed(1) is '-0.0'. * (-0).toFixed(3) is '0.000'. * (-0.5).toFixed(0) is '-0'. * */ P.toFixed = function (dp, rm) { var str, y, x = this, Ctor = x.constructor; if (dp === void 0) { str = finiteToString(x); } else { checkInt32(dp, 0, MAX_DIGITS); if (rm === void 0) rm = Ctor.rounding; else checkInt32(rm, 0, 8); y = finalise(new Ctor(x), dp + x.e + 1, rm); str = finiteToString(y, false, dp + y.e + 1); } // To determine whether to add the minus sign look at the value before it was rounded, // i.e. look at `x` rather than `y`. return x.isNeg() && !x.isZero() ? '-' + str : str; }; /* * Return an array representing the value of this Decimal as a simple fraction with an integer * numerator and an integer denominator. * * The denominator will be a positive non-zero value less than or equal to the specified maximum * denominator. If a maximum denominator is not specified, the denominator will be the lowest * value necessary to represent the number exactly. * * [maxD] {number|string|Decimal} Maximum denominator. Integer >= 1 and < Infinity. * */ P.toFraction = function (maxD) { var d, d0, d1, d2, e, k, n, n0, n1, pr, q, r, x = this, xd = x.d, Ctor = x.constructor; if (!xd) return new Ctor(x); n1 = d0 = new Ctor(1); d1 = n0 = new Ctor(0); d = new Ctor(d1); e = d.e = getPrecision(xd) - x.e - 1; k = e % LOG_BASE; d.d[0] = mathpow(10, k < 0 ? LOG_BASE + k : k); if (maxD == null) { // d is 10**e, the minimum max-denominator needed. maxD = e > 0 ? d : n1; } else { n = new Ctor(maxD); if (!n.isInt() || n.lt(n1)) throw Error(invalidArgument + n); maxD = n.gt(d) ? (e > 0 ? d : n1) : n; } external = false; n = new Ctor(digitsToString(xd)); pr = Ctor.precision; Ctor.precision = e = xd.length * LOG_BASE * 2; for (;;) { q = divide(n, d, 0, 1, 1); d2 = d0.plus(q.times(d1)); if (d2.cmp(maxD) == 1) break; d0 = d1; d1 = d2; d2 = n1; n1 = n0.plus(q.times(d2)); n0 = d2; d2 = d; d = n.minus(q.times(d2)); n = d2; } d2 = divide(maxD.minus(d0), d1, 0, 1, 1); n0 = n0.plus(d2.times(n1)); d0 = d0.plus(d2.times(d1)); n0.s = n1.s = x.s; // Determine which fraction is closer to x, n0/d0 or n1/d1? r = divide(n1, d1, e, 1).minus(x).abs().cmp(divide(n0, d0, e, 1).minus(x).abs()) < 1 ? [n1, d1] : [n0, d0]; Ctor.precision = pr; external = true; return r; }; /* * Return a string representing the value of this Decimal in base 16, round to `sd` significant * digits using rounding mode `rm`. * * If the optional `sd` argument is present then return binary exponential notation. * * [sd] {number} Significant digits. Integer, 1 to MAX_DIGITS inclusive. * [rm] {number} Rounding mode. Integer, 0 to 8 inclusive. * */ P.toHexadecimal = P.toHex = function (sd, rm) { return toStringBinary(this, 16, sd, rm); }; /* * Returns a new Decimal whose value is the nearest multiple of `y` in the direction of rounding * mode `rm`, or `Decimal.rounding` if `rm` is omitted, to the value of this Decimal. * * The return value will always have the same sign as this Decimal, unless either this Decimal * or `y` is NaN, in which case the return value will be also be NaN. * * The return value is not affected by the value of `precision`. * * y {number|string|Decimal} The magnitude to round to a multiple of. * [rm] {number} Rounding mode. Integer, 0 to 8 inclusive. * * 'toNearest() rounding mode not an integer: {rm}' * 'toNearest() rounding mode out of range: {rm}' * */ P.toNearest = function (y, rm) { var x = this, Ctor = x.constructor; x = new Ctor(x); if (y == null) { // If x is not finite, return x. if (!x.d) return x; y = new Ctor(1); rm = Ctor.rounding; } else { y = new Ctor(y); if (rm === void 0) { rm = Ctor.rounding; } else { checkInt32(rm, 0, 8); } // If x is not finite, return x if y is not NaN, else NaN. if (!x.d) return y.s ? x : y; // If y is not finite, return Infinity with the sign of x if y is Infinity, else NaN. if (!y.d) { if (y.s) y.s = x.s; return y; } } // If y is not zero, calculate the nearest multiple of y to x. if (y.d[0]) { external = false; x = divide(x, y, 0, rm, 1).times(y); external = true; finalise(x); // If y is zero, return zero with the sign of x. } else { y.s = x.s; x = y; } return x; }; /* * Return the value of this Decimal converted to a number primitive. * Zero keeps its sign. * */ P.toNumber = function () { return +this; }; /* * Return a string representing the value of this Decimal in base 8, round to `sd` significant * digits using rounding mode `rm`. * * If the optional `sd` argument is present then return binary exponential notation. * * [sd] {number} Significant digits. Integer, 1 to MAX_DIGITS inclusive. * [rm] {number} Rounding mode. Integer, 0 to 8 inclusive. * */ P.toOctal = function (sd, rm) { return toStringBinary(this, 8, sd, rm); }; /* * Return a new Decimal whose value is the value of this Decimal raised to the power `y`, rounded * to `precision` significant digits using rounding mode `rounding`. * * ECMAScript compliant. * * pow(x, NaN) = NaN * pow(x, ±0) = 1 * pow(NaN, non-zero) = NaN * pow(abs(x) > 1, +Infinity) = +Infinity * pow(abs(x) > 1, -Infinity) = +0 * pow(abs(x) == 1, ±Infinity) = NaN * pow(abs(x) < 1, +Infinity) = +0 * pow(abs(x) < 1, -Infinity) = +Infinity * pow(+Infinity, y > 0) = +Infinity * pow(+Infinity, y < 0) = +0 * pow(-Infinity, odd integer > 0) = -Infinity * pow(-Infinity, even integer > 0) = +Infinity * pow(-Infinity, odd integer < 0) = -0 * pow(-Infinity, even integer < 0) = +0 * pow(+0, y > 0) = +0 * pow(+0, y < 0) = +Infinity * pow(-0, odd integer > 0) = -0 * pow(-0, even integer > 0) = +0 * pow(-0, odd integer < 0) = -Infinity * pow(-0, even integer < 0) = +Infinity * pow(finite x < 0, finite non-integer) = NaN * * For non-integer or very large exponents pow(x, y) is calculated using * * x^y = exp(y*ln(x)) * * Assuming the first 15 rounding digits are each equally likely to be any digit 0-9, the * probability of an incorrectly rounded result * P([49]9{14} | [50]0{14}) = 2 * 0.2 * 10^-14 = 4e-15 = 1/2.5e+14 * i.e. 1 in 250,000,000,000,000 * * If a result is incorrectly rounded the maximum error will be 1 ulp (unit in last place). * * y {number|string|Decimal} The power to which to raise this Decimal. * */ P.toPower = P.pow = function (y) { var e, k, pr, r, rm, s, x = this, Ctor = x.constructor, yn = +(y = new Ctor(y)); // Either ±Infinity, NaN or ±0? if (!x.d || !y.d || !x.d[0] || !y.d[0]) return new Ctor(mathpow(+x, yn)); x = new Ctor(x); if (x.eq(1)) return x; pr = Ctor.precision; rm = Ctor.rounding; if (y.eq(1)) return finalise(x, pr, rm); // y exponent e = mathfloor(y.e / LOG_BASE); // If y is a small integer use the 'exponentiation by squaring' algorithm. if (e >= y.d.length - 1 && (k = yn < 0 ? -yn : yn) <= MAX_SAFE_INTEGER) { r = intPow(Ctor, x, k, pr); return y.s < 0 ? new Ctor(1).div(r) : finalise(r, pr, rm); } s = x.s; // if x is negative if (s < 0) { // if y is not an integer if (e < y.d.length - 1) return new Ctor(NaN); // Result is positive if x is negative and the last digit of integer y is even. if ((y.d[e] & 1) == 0) s = 1; // if x.eq(-1) if (x.e == 0 && x.d[0] == 1 && x.d.length == 1) { x.s = s; return x; } } // Estimate result exponent. // x^y = 10^e, where e = y * log10(x) // log10(x) = log10(x_significand) + x_exponent // log10(x_significand) = ln(x_significand) / ln(10) k = mathpow(+x, yn); e = k == 0 || !isFinite(k) ? mathfloor(yn * (Math.log('0.' + digitsToString(x.d)) / Math.LN10 + x.e + 1)) : new Ctor(k + '').e; // Exponent estimate may be incorrect e.g. x: 0.999999999999999999, y: 2.29, e: 0, r.e: -1. // Overflow/underflow? if (e > Ctor.maxE + 1 || e < Ctor.minE - 1) return new Ctor(e > 0 ? s / 0 : 0); external = false; Ctor.rounding = x.s = 1; // Estimate the extra guard digits needed to ensure five correct rounding digits from // naturalLogarithm(x). Example of failure without these extra digits (precision: 10): // new Decimal(2.32456).pow('2087987436534566.46411') // should be 1.162377823e+764914905173815, but is 1.162355823e+764914905173815 k = Math.min(12, (e + '').length); // r = x^y = exp(y*ln(x)) r = naturalExponential(y.times(naturalLogarithm(x, pr + k)), pr); // r may be Infinity, e.g. (0.9999999999999999).pow(-1e+40) if (r.d) { // Truncate to the required precision plus five rounding digits. r = finalise(r, pr + 5, 1); // If the rounding digits are [49]9999 or [50]0000 increase the precision by 10 and recalculate // the result. if (checkRoundingDigits(r.d, pr, rm)) { e = pr + 10; // Truncate to the increased precision plus five rounding digits. r = finalise(naturalExponential(y.times(naturalLogarithm(x, e + k)), e), e + 5, 1); // Check for 14 nines from the 2nd rounding digit (the first rounding digit may be 4 or 9). if (+digitsToString(r.d).slice(pr + 1, pr + 15) + 1 == 1e14) { r = finalise(r, pr + 1, 0); } } } r.s = s; external = true; Ctor.rounding = rm; return finalise(r, pr, rm); }; /* * Return a string representing the value of this Decimal rounded to `sd` significant digits * using rounding mode `rounding`. * * Return exponential notation if `sd` is less than the number of digits necessary to represent * the integer part of the value in normal notation. * * [sd] {number} Significant digits. Integer, 1 to MAX_DIGITS inclusive. * [rm] {number} Rounding mode. Integer, 0 to 8 inclusive. * */ P.toPrecision = function (sd, rm) { var str, x = this, Ctor = x.constructor; if (sd === void 0) { str = finiteToString(x, x.e <= Ctor.toExpNeg || x.e >= Ctor.toExpPos); } else { checkInt32(sd, 1, MAX_DIGITS); if (rm === void 0) rm = Ctor.rounding; else checkInt32(rm, 0, 8); x = finalise(new Ctor(x), sd, rm); str = finiteToString(x, sd <= x.e || x.e <= Ctor.toExpNeg, sd); } return x.isNeg() && !x.isZero() ? '-' + str : str; }; /* * Return a new Decimal whose value is the value of this Decimal rounded to a maximum of `sd` * significant digits using rounding mode `rm`, or to `precision` and `rounding` respectively if * omitted. * * [sd] {number} Significant digits. Integer, 1 to MAX_DIGITS inclusive. * [rm] {number} Rounding mode. Integer, 0 to 8 inclusive. * * 'toSD() digits out of range: {sd}' * 'toSD() digits not an integer: {sd}' * 'toSD() rounding mode not an integer: {rm}' * 'toSD() rounding mode out of range: {rm}' * */ P.toSignificantDigits = P.toSD = function (sd, rm) { var x = this, Ctor = x.constructor; if (sd === void 0) { sd = Ctor.precision; rm = Ctor.rounding; } else { checkInt32(sd, 1, MAX_DIGITS); if (rm === void 0) rm = Ctor.rounding; else checkInt32(rm, 0, 8); } return finalise(new Ctor(x), sd, rm); }; /* * Return a string representing the value of this Decimal. * * Return exponential notation if this Decimal has a positive exponent equal to or greater than * `toExpPos`, or a negative exponent equal to or less than `toExpNeg`. * */ P.toString = function () { var x = this, Ctor = x.constructor, str = finiteToString(x, x.e <= Ctor.toExpNeg || x.e >= Ctor.toExpPos); return x.isNeg() && !x.isZero() ? '-' + str : str; }; /* * Return a new Decimal whose value is the value of this Decimal truncated to a whole number. * */ P.truncated = P.trunc = function () { return finalise(new this.constructor(this), this.e + 1, 1); }; /* * Return a string representing the value of this Decimal. * Unlike `toString`, negative zero will include the minus sign. * */ P.valueOf = P.toJSON = function () { var x = this, Ctor = x.constructor, str = finiteToString(x, x.e <= Ctor.toExpNeg || x.e >= Ctor.toExpPos); return x.isNeg() ? '-' + str : str; }; /* // Add aliases to match BigDecimal method names. // P.add = P.plus; P.subtract = P.minus; P.multiply = P.times; P.divide = P.div; P.remainder = P.mod; P.compareTo = P.cmp; P.negate = P.neg; */ // Helper functions for Decimal.prototype (P) and/or Decimal methods, and their callers. /* * digitsToString P.cubeRoot, P.logarithm, P.squareRoot, P.toFraction, P.toPower, * finiteToString, naturalExponential, naturalLogarithm * checkInt32 P.toDecimalPlaces, P.toExponential, P.toFixed, P.toNearest, * P.toPrecision, P.toSignificantDigits, toStringBinary, random * checkRoundingDigits P.logarithm, P.toPower, naturalExponential, naturalLogarithm * convertBase toStringBinary, parseOther * cos P.cos * divide P.atanh, P.cubeRoot, P.dividedBy, P.dividedToIntegerBy, * P.logarithm, P.modulo, P.squareRoot, P.tan, P.tanh, P.toFraction, * P.toNearest, toStringBinary, naturalExponential, naturalLogarithm, * taylorSeries, atan2, parseOther * finalise P.absoluteValue, P.atan, P.atanh, P.ceil, P.cos, P.cosh, * P.cubeRoot, P.dividedToIntegerBy, P.floor, P.logarithm, P.minus, * P.modulo, P.negated, P.plus, P.round, P.sin, P.sinh, P.squareRoot, * P.tan, P.times, P.toDecimalPlaces, P.toExponential, P.toFixed, * P.toNearest, P.toPower, P.toPrecision, P.toSignificantDigits, * P.truncated, divide, getLn10, getPi, naturalExponential, * naturalLogarithm, ceil, floor, round, trunc * finiteToString P.toExponential, P.toFixed, P.toPrecision, P.toString, P.valueOf, * toStringBinary * getBase10Exponent P.minus, P.plus, P.times, parseOther * getLn10 P.logarithm, naturalLogarithm * getPi P.acos, P.asin, P.atan, toLessThanHalfPi, atan2 * getPrecision P.precision, P.toFraction * getZeroString digitsToString, finiteToString * intPow P.toPower, parseOther * isOdd toLessThanHalfPi * maxOrMin max, min * naturalExponential P.naturalExponential, P.toPower * naturalLogarithm P.acosh, P.asinh, P.atanh, P.logarithm, P.naturalLogarithm, * P.toPower, naturalExponential * nonFiniteToString finiteToString, toStringBinary * parseDecimal Decimal * parseOther Decimal * sin P.sin * taylorSeries P.cosh, P.sinh, cos, sin * toLessThanHalfPi P.cos, P.sin * toStringBinary P.toBinary, P.toHexadecimal, P.toOctal * truncate intPow * * Throws: P.logarithm, P.precision, P.toFraction, checkInt32, getLn10, getPi, * naturalLogarithm, config, parseOther, random, Decimal */ function digitsToString(d) { var i, k, ws, indexOfLastWord = d.length - 1, str = '', w = d[0]; if (indexOfLastWord > 0) { str += w; for (i = 1; i < indexOfLastWord; i++) { ws = d[i] + ''; k = LOG_BASE - ws.length; if (k) str += getZeroString(k); str += ws; } w = d[i]; ws = w + ''; k = LOG_BASE - ws.length; if (k) str += getZeroString(k); } else if (w === 0) { return '0'; } // Remove trailing zeros of last w. for (; w % 10 === 0;) w /= 10; return str + w; } function checkInt32(i, min, max) { if (i !== ~~i || i < min || i > max) { throw Error(invalidArgument + i); } } /* * Check 5 rounding digits if `repeating` is null, 4 otherwise. * `repeating == null` if caller is `log` or `pow`, * `repeating != null` if caller is `naturalLogarithm` or `naturalExponential`. */ function checkRoundingDigits(d, i, rm, repeating) { var di, k, r, rd; // Get the length of the first word of the array d. for (k = d[0]; k >= 10; k /= 10) --i; // Is the rounding digit in the first word of d? if (--i < 0) { i += LOG_BASE; di = 0; } else { di = Math.ceil((i + 1) / LOG_BASE); i %= LOG_BASE; } // i is the index (0 - 6) of the rounding digit. // E.g. if within the word 3487563 the first rounding digit is 5, // then i = 4, k = 1000, rd = 3487563 % 1000 = 563 k = mathpow(10, LOG_BASE - i); rd = d[di] % k | 0; if (repeating == null) { if (i < 3) { if (i == 0) rd = rd / 100 | 0; else if (i == 1) rd = rd / 10 | 0; r = rm < 4 && rd == 99999 || rm > 3 && rd == 49999 || rd == 50000 || rd == 0; } else { r = (rm < 4 && rd + 1 == k || rm > 3 && rd + 1 == k / 2) && (d[di + 1] / k / 100 | 0) == mathpow(10, i - 2) - 1 || (rd == k / 2 || rd == 0) && (d[di + 1] / k / 100 | 0) == 0; } } else { if (i < 4) { if (i == 0) rd = rd / 1000 | 0; else if (i == 1) rd = rd / 100 | 0; else if (i == 2) rd = rd / 10 | 0; r = (repeating || rm < 4) && rd == 9999 || !repeating && rm > 3 && rd == 4999; } else { r = ((repeating || rm < 4) && rd + 1 == k || (!repeating && rm > 3) && rd + 1 == k / 2) && (d[di + 1] / k / 1000 | 0) == mathpow(10, i - 3) - 1; } } return r; } // Convert string of `baseIn` to an array of numbers of `baseOut`. // Eg. convertBase('255', 10, 16) returns [15, 15]. // Eg. convertBase('ff', 16, 10) returns [2, 5, 5]. function convertBase(str, baseIn, baseOut) { var j, arr = [0], arrL, i = 0, strL = str.length; for (; i < strL;) { for (arrL = arr.length; arrL--;) arr[arrL] *= baseIn; arr[0] += NUMERALS.indexOf(str.charAt(i++)); for (j = 0; j < arr.length; j++) { if (arr[j] > baseOut - 1) { if (arr[j + 1] === void 0) arr[j + 1] = 0; arr[j + 1] += arr[j] / baseOut | 0; arr[j] %= baseOut; } } } return arr.reverse(); } /* * cos(x) = 1 - x^2/2! + x^4/4! - ... * |x| < pi/2 * */ function cosine(Ctor, x) { var k, y, len = x.d.length; // Argument reduction: cos(4x) = 8*(cos^4(x) - cos^2(x)) + 1 // i.e. cos(x) = 8*(cos^4(x/4) - cos^2(x/4)) + 1 // Estimate the optimum number of times to use the argument reduction. if (len < 32) { k = Math.ceil(len / 3); y = (1 / tinyPow(4, k)).toString(); } else { k = 16; y = '2.3283064365386962890625e-10'; } Ctor.precision += k; x = taylorSeries(Ctor, 1, x.times(y), new Ctor(1)); // Reverse argument reduction for (var i = k; i--;) { var cos2x = x.times(x); x = cos2x.times(cos2x).minus(cos2x).times(8).plus(1); } Ctor.precision -= k; return x; } /* * Perform division in the specified base. */ var divide = (function () { // Assumes non-zero x and k, and hence non-zero result. function multiplyInteger(x, k, base) { var temp, carry = 0, i = x.length; for (x = x.slice(); i--;) { temp = x[i] * k + carry; x[i] = temp % base | 0; carry = temp / base | 0; } if (carry) x.unshift(carry); return x; } function compare(a, b, aL, bL) { var i, r; if (aL != bL) { r = aL > bL ? 1 : -1; } else { for (i = r = 0; i < aL; i++) { if (a[i] != b[i]) { r = a[i] > b[i] ? 1 : -1; break; } } } return r; } function subtract(a, b, aL, base) { var i = 0; // Subtract b from a. for (; aL--;) { a[aL] -= i; i = a[aL] < b[aL] ? 1 : 0; a[aL] = i * base + a[aL] - b[aL]; } // Remove leading zeros. for (; !a[0] && a.length > 1;) a.shift(); } return function (x, y, pr, rm, dp, base) { var cmp, e, i, k, logBase, more, prod, prodL, q, qd, rem, remL, rem0, sd, t, xi, xL, yd0, yL, yz, Ctor = x.constructor, sign = x.s == y.s ? 1 : -1, xd = x.d, yd = y.d; // Either NaN, Infinity or 0? if (!xd || !xd[0] || !yd || !yd[0]) { return new Ctor(// Return NaN if either NaN, or both Infinity or 0. !x.s || !y.s || (xd ? yd && xd[0] == yd[0] : !yd) ? NaN : // Return ±0 if x is 0 or y is ±Infinity, or return ±Infinity as y is 0. xd && xd[0] == 0 || !yd ? sign * 0 : sign / 0); } if (base) { logBase = 1; e = x.e - y.e; } else { base = BASE; logBase = LOG_BASE; e = mathfloor(x.e / logBase) - mathfloor(y.e / logBase); } yL = yd.length; xL = xd.length; q = new Ctor(sign); qd = q.d = []; // Result exponent may be one less than e. // The digit array of a Decimal from toStringBinary may have trailing zeros. for (i = 0; yd[i] == (xd[i] || 0); i++); if (yd[i] > (xd[i] || 0)) e--; if (pr == null) { sd = pr = Ctor.precision; rm = Ctor.rounding; } else if (dp) { sd = pr + (x.e - y.e) + 1; } else { sd = pr; } if (sd < 0) { qd.push(1); more = true; } else { // Convert precision in number of base 10 digits to base 1e7 digits. sd = sd / logBase + 2 | 0; i = 0; // divisor < 1e7 if (yL == 1) { k = 0; yd = yd[0]; sd++; // k is the carry. for (; (i < xL || k) && sd--; i++) { t = k * base + (xd[i] || 0); qd[i] = t / yd | 0; k = t % yd | 0; } more = k || i < xL; // divisor >= 1e7 } else { // Normalise xd and yd so highest order digit of yd is >= base/2 k = base / (yd[0] + 1) | 0; if (k > 1) { yd = multiplyInteger(yd, k, base); xd = multiplyInteger(xd, k, base); yL = yd.length; xL = xd.length; } xi = yL; rem = xd.slice(0, yL); remL = rem.length; // Add zeros to make remainder as long as divisor. for (; remL < yL;) rem[remL++] = 0; yz = yd.slice(); yz.unshift(0); yd0 = yd[0]; if (yd[1] >= base / 2) ++yd0; do { k = 0; // Compare divisor and remainder. cmp = compare(yd, rem, yL, remL); // If divisor < remainder. if (cmp < 0) { // Calculate trial digit, k. rem0 = rem[0]; if (yL != remL) rem0 = rem0 * base + (rem[1] || 0); // k will be how many times the divisor goes into the current remainder. k = rem0 / yd0 | 0; // Algorithm: // 1. product = divisor * trial digit (k) // 2. if product > remainder: product -= divisor, k-- // 3. remainder -= product // 4. if product was < remainder at 2: // 5. compare new remainder and divisor // 6. If remainder > divisor: remainder -= divisor, k++ if (k > 1) { if (k >= base) k = base - 1; // product = divisor * trial digit. prod = multiplyInteger(yd, k, base); prodL = prod.length; remL = rem.length; // Compare product and remainder. cmp = compare(prod, rem, prodL, remL); // product > remainder. if (cmp == 1) { k--; // Subtract divisor from product. subtract(prod, yL < prodL ? yz : yd, prodL, base); } } else { // cmp is -1. // If k is 0, there is no need to compare yd and rem again below, so change cmp to 1 // to avoid it. If k is 1 there is a need to compare yd and rem again below. if (k == 0) cmp = k = 1; prod = yd.slice(); } prodL = prod.length; if (prodL < remL) prod.unshift(0); // Subtract product from remainder. subtract(rem, prod, remL, base); // If product was < previous remainder. if (cmp == -1) { remL = rem.length; // Compare divisor and new remainder. cmp = compare(yd, rem, yL, remL); // If divisor < new remainder, subtract divisor from remainder. if (cmp < 1) { k++; // Subtract divisor from remainder. subtract(rem, yL < remL ? yz : yd, remL, base); } } remL = rem.length; } else if (cmp === 0) { k++; rem = [0]; } // if cmp === 1, k will be 0 // Add the next digit, k, to the result array. qd[i++] = k; // Update the remainder. if (cmp && rem[0]) { rem[remL++] = xd[xi] || 0; } else { rem = [xd[xi]]; remL = 1; } } while ((xi++ < xL || rem[0] !== void 0) && sd--); more = rem[0] !== void 0; } // Leading zero? if (!qd[0]) qd.shift(); } // logBase is 1 when divide is being used for base conversion. if (logBase == 1) { q.e = e; inexact = more; } else { // To calculate q.e, first get the number of digits of qd[0]. for (i = 1, k = qd[0]; k >= 10; k /= 10) i++; q.e = i + e * logBase - 1; finalise(q, dp ? pr + q.e + 1 : pr, rm, more); } return q; }; })(); /* * Round `x` to `sd` significant digits using rounding mode `rm`. * Check for over/under-flow. */ function finalise(x, sd, rm, isTruncated) { var digits, i, j, k, rd, roundUp, w, xd, xdi, Ctor = x.constructor; // Don't round if sd is null or undefined. out: if (sd != null) { xd = x.d; // Infinity/NaN. if (!xd) return x; // rd: the rounding digit, i.e. the digit after the digit that may be rounded up. // w: the word of xd containing rd, a base 1e7 number. // xdi: the index of w within xd. // digits: the number of digits of w. // i: what would be the index of rd within w if all the numbers were 7 digits long (i.e. if // they had leading zeros) // j: if > 0, the actual index of rd within w (if < 0, rd is a leading zero). // Get the length of the first word of the digits array xd. for (digits = 1, k = xd[0]; k >= 10; k /= 10) digits++; i = sd - digits; // Is the rounding digit in the first word of xd? if (i < 0) { i += LOG_BASE; j = sd; w = xd[xdi = 0]; // Get the rounding digit at index j of w. rd = w / mathpow(10, digits - j - 1) % 10 | 0; } else { xdi = Math.ceil((i + 1) / LOG_BASE); k = xd.length; if (xdi >= k) { if (isTruncated) { // Needed by `naturalExponential`, `naturalLogarithm` and `squareRoot`. for (; k++ <= xdi;) xd.push(0); w = rd = 0; digits = 1; i %= LOG_BASE; j = i - LOG_BASE + 1; } else { break out; } } else { w = k = xd[xdi]; // Get the number of digits of w. for (digits = 1; k >= 10; k /= 10) digits++; // Get the index of rd within w. i %= LOG_BASE; // Get the index of rd within w, adjusted for leading zeros. // The number of leading zeros of w is given by LOG_BASE - digits. j = i - LOG_BASE + digits; // Get the rounding digit at index j of w. rd = j < 0 ? 0 : w / mathpow(10, digits - j - 1) % 10 | 0; } } // Are there any non-zero digits after the rounding digit? isTruncated = isTruncated || sd < 0 || xd[xdi + 1] !== void 0 || (j < 0 ? w : w % mathpow(10, digits - j - 1)); // The expression `w % mathpow(10, digits - j - 1)` returns all the digits of w to the right // of the digit at (left-to-right) index j, e.g. if w is 908714 and j is 2, the expression // will give 714. roundUp = rm < 4 ? (rd || isTruncated) && (rm == 0 || rm == (x.s < 0 ? 3 : 2)) : rd > 5 || rd == 5 && (rm == 4 || isTruncated || rm == 6 && // Check whether the digit to the left of the rounding digit is odd. ((i > 0 ? j > 0 ? w / mathpow(10, digits - j) : 0 : xd[xdi - 1]) % 10) & 1 || rm == (x.s < 0 ? 8 : 7)); if (sd < 1 || !xd[0]) { xd.length = 0; if (roundUp) { // Convert sd to decimal places. sd -= x.e + 1; // 1, 0.1, 0.01, 0.001, 0.0001 etc. xd[0] = mathpow(10, (LOG_BASE - sd % LOG_BASE) % LOG_BASE); x.e = -sd || 0; } else { // Zero. xd[0] = x.e = 0; } return x; } // Remove excess digits. if (i == 0) { xd.length = xdi; k = 1; xdi--; } else { xd.length = xdi + 1; k = mathpow(10, LOG_BASE - i); // E.g. 56700 becomes 56000 if 7 is the rounding digit. // j > 0 means i > number of leading zeros of w. xd[xdi] = j > 0 ? (w / mathpow(10, digits - j) % mathpow(10, j) | 0) * k : 0; } if (roundUp) { for (;;) { // Is the digit to be rounded up in the first word of xd? if (xdi == 0) { // i will be the length of xd[0] before k is added. for (i = 1, j = xd[0]; j >= 10; j /= 10) i++; j = xd[0] += k; for (k = 1; j >= 10; j /= 10) k++; // if i != k the length has increased. if (i != k) { x.e++; if (xd[0] == BASE) xd[0] = 1; } break; } else { xd[xdi] += k; if (xd[xdi] != BASE) break; xd[xdi--] = 0; k = 1; } } } // Remove trailing zeros. for (i = xd.length; xd[--i] === 0;) xd.pop(); } if (external) { // Overflow? if (x.e > Ctor.maxE) { // Infinity. x.d = null; x.e = NaN; // Underflow? } else if (x.e < Ctor.minE) { // Zero. x.e = 0; x.d = [0]; // Ctor.underflow = true; } // else Ctor.underflow = false; } return x; } function finiteToString(x, isExp, sd) { if (!x.isFinite()) return nonFiniteToString(x); var k, e = x.e, str = digitsToString(x.d), len = str.length; if (isExp) { if (sd && (k = sd - len) > 0) { str = str.charAt(0) + '.' + str.slice(1) + getZeroString(k); } else if (len > 1) { str = str.charAt(0) + '.' + str.slice(1); } str = str + (x.e < 0 ? 'e' : 'e+') + x.e; } else if (e < 0) { str = '0.' + getZeroString(-e - 1) + str; if (sd && (k = sd - len) > 0) str += getZeroString(k); } else if (e >= len) { str += getZeroString(e + 1 - len); if (sd && (k = sd - e - 1) > 0) str = str + '.' + getZeroString(k); } else { if ((k = e + 1) < len) str = str.slice(0, k) + '.' + str.slice(k); if (sd && (k = sd - len) > 0) { if (e + 1 === len) str += '.'; str += getZeroString(k); } } return str; } // Calculate the base 10 exponent from the base 1e7 exponent. function getBase10Exponent(digits, e) { var w = digits[0]; // Add the number of digits of the first word of the digits array. for ( e *= LOG_BASE; w >= 10; w /= 10) e++; return e; } function getLn10(Ctor, sd, pr) { if (sd > LN10_PRECISION) { // Reset global state in case the exception is caught. external = true; if (pr) Ctor.precision = pr; throw Error(precisionLimitExceeded); } return finalise(new Ctor(LN10), sd, 1, true); } function getPi(Ctor, sd, rm) { if (sd > PI_PRECISION) throw Error(precisionLimitExceeded); return finalise(new Ctor(PI), sd, rm, true); } function getPrecision(digits) { var w = digits.length - 1, len = w * LOG_BASE + 1; w = digits[w]; // If non-zero... if (w) { // Subtract the number of trailing zeros of the last word. for (; w % 10 == 0; w /= 10) len--; // Add the number of digits of the first word. for (w = digits[0]; w >= 10; w /= 10) len++; } return len; } function getZeroString(k) { var zs = ''; for (; k--;) zs += '0'; return zs; } /* * Return a new Decimal whose value is the value of Decimal `x` to the power `n`, where `n` is an * integer of type number. * * Implements 'exponentiation by squaring'. Called by `pow` and `parseOther`. * */ function intPow(Ctor, x, n, pr) { var isTruncated, r = new Ctor(1), // Max n of 9007199254740991 takes 53 loop iterations. // Maximum digits array length; leaves [28, 34] guard digits. k = Math.ceil(pr / LOG_BASE + 4); external = false; for (;;) { if (n % 2) { r = r.times(x); if (truncate(r.d, k)) isTruncated = true; } n = mathfloor(n / 2); if (n === 0) { // To ensure correct rounding when r.d is truncated, increment the last word if it is zero. n = r.d.length - 1; if (isTruncated && r.d[n] === 0) ++r.d[n]; break; } x = x.times(x); truncate(x.d, k); } external = true; return r; } function isOdd(n) { return n.d[n.d.length - 1] & 1; } /* * Handle `max` and `min`. `ltgt` is 'lt' or 'gt'. */ function maxOrMin(Ctor, args, ltgt) { var y, x = new Ctor(args[0]), i = 0; for (; ++i < args.length;) { y = new Ctor(args[i]); if (!y.s) { x = y; break; } else if (x[ltgt](y)) { x = y; } } return x; } /* * Return a new Decimal whose value is the natural exponential of `x` rounded to `sd` significant * digits. * * Taylor/Maclaurin series. * * exp(x) = x^0/0! + x^1/1! + x^2/2! + x^3/3! + ... * * Argument reduction: * Repeat x = x / 32, k += 5, until |x| < 0.1 * exp(x) = exp(x / 2^k)^(2^k) * * Previously, the argument was initially reduced by * exp(x) = exp(r) * 10^k where r = x - k * ln10, k = floor(x / ln10) * to first put r in the range [0, ln10], before dividing by 32 until |x| < 0.1, but this was * found to be slower than just dividing repeatedly by 32 as above. * * Max integer argument: exp('20723265836946413') = 6.3e+9000000000000000 * Min integer argument: exp('-20723265836946411') = 1.2e-9000000000000000 * (Math object integer min/max: Math.exp(709) = 8.2e+307, Math.exp(-745) = 5e-324) * * exp(Infinity) = Infinity * exp(-Infinity) = 0 * exp(NaN) = NaN * exp(±0) = 1 * * exp(x) is non-terminating for any finite, non-zero x. * * The result will always be correctly rounded. * */ function naturalExponential(x, sd) { var denominator, guard, j, pow, sum, t, wpr, rep = 0, i = 0, k = 0, Ctor = x.constructor, rm = Ctor.rounding, pr = Ctor.precision; // 0/NaN/Infinity? if (!x.d || !x.d[0] || x.e > 17) { return new Ctor(x.d ? !x.d[0] ? 1 : x.s < 0 ? 0 : 1 / 0 : x.s ? x.s < 0 ? 0 : x : 0 / 0); } if (sd == null) { external = false; wpr = pr; } else { wpr = sd; } t = new Ctor(0.03125); // while abs(x) >= 0.1 while (x.e > -2) { // x = x / 2^5 x = x.times(t); k += 5; } // Use 2 * log10(2^k) + 5 (empirically derived) to estimate the increase in precision // necessary to ensure the first 4 rounding digits are correct. guard = Math.log(mathpow(2, k)) / Math.LN10 * 2 + 5 | 0; wpr += guard; denominator = pow = sum = new Ctor(1); Ctor.precision = wpr; for (;;) { pow = finalise(pow.times(x), wpr, 1); denominator = denominator.times(++i); t = sum.plus(divide(pow, denominator, wpr, 1)); if (digitsToString(t.d).slice(0, wpr) === digitsToString(sum.d).slice(0, wpr)) { j = k; while (j--) sum = finalise(sum.times(sum), wpr, 1); // Check to see if the first 4 rounding digits are [49]999. // If so, repeat the summation with a higher precision, otherwise // e.g. with precision: 18, rounding: 1 // exp(18.404272462595034083567793919843761) = 98372560.1229999999 (should be 98372560.123) // `wpr - guard` is the index of first rounding digit. if (sd == null) { if (rep < 3 && checkRoundingDigits(sum.d, wpr - guard, rm, rep)) { Ctor.precision = wpr += 10; denominator = pow = t = new Ctor(1); i = 0; rep++; } else { return finalise(sum, Ctor.precision = pr, rm, external = true); } } else { Ctor.precision = pr; return sum; } } sum = t; } } /* * Return a new Decimal whose value is the natural logarithm of `x` rounded to `sd` significant * digits. * * ln(-n) = NaN * ln(0) = -Infinity * ln(-0) = -Infinity * ln(1) = 0 * ln(Infinity) = Infinity * ln(-Infinity) = NaN * ln(NaN) = NaN * * ln(n) (n != 1) is non-terminating. * */ function naturalLogarithm(y, sd) { var c, c0, denominator, e, numerator, rep, sum, t, wpr, x1, x2, n = 1, guard = 10, x = y, xd = x.d, Ctor = x.constructor, rm = Ctor.rounding, pr = Ctor.precision; // Is x negative or Infinity, NaN, 0 or 1? if (x.s < 0 || !xd || !xd[0] || !x.e && xd[0] == 1 && xd.length == 1) { return new Ctor(xd && !xd[0] ? -1 / 0 : x.s != 1 ? NaN : xd ? 0 : x); } if (sd == null) { external = false; wpr = pr; } else { wpr = sd; } Ctor.precision = wpr += guard; c = digitsToString(xd); c0 = c.charAt(0); if (Math.abs(e = x.e) < 1.5e15) { // Argument reduction. // The series converges faster the closer the argument is to 1, so using // ln(a^b) = b * ln(a), ln(a) = ln(a^b) / b // multiply the argument by itself until the leading digits of the significand are 7, 8, 9, // 10, 11, 12 or 13, recording the number of multiplications so the sum of the series can // later be divided by this number, then separate out the power of 10 using // ln(a*10^b) = ln(a) + b*ln(10). // max n is 21 (gives 0.9, 1.0 or 1.1) (9e15 / 21 = 4.2e14). //while (c0 < 9 && c0 != 1 || c0 == 1 && c.charAt(1) > 1) { // max n is 6 (gives 0.7 - 1.3) while (c0 < 7 && c0 != 1 || c0 == 1 && c.charAt(1) > 3) { x = x.times(y); c = digitsToString(x.d); c0 = c.charAt(0); n++; } e = x.e; if (c0 > 1) { x = new Ctor('0.' + c); e++; } else { x = new Ctor(c0 + '.' + c.slice(1)); } } else { // The argument reduction method above may result in overflow if the argument y is a massive // number with exponent >= 1500000000000000 (9e15 / 6 = 1.5e15), so instead recall this // function using ln(x*10^e) = ln(x) + e*ln(10). t = getLn10(Ctor, wpr + 2, pr).times(e + ''); x = naturalLogarithm(new Ctor(c0 + '.' + c.slice(1)), wpr - guard).plus(t); Ctor.precision = pr; return sd == null ? finalise(x, pr, rm, external = true) : x; } // x1 is x reduced to a value near 1. x1 = x; // Taylor series. // ln(y) = ln((1 + x)/(1 - x)) = 2(x + x^3/3 + x^5/5 + x^7/7 + ...) // where x = (y - 1)/(y + 1) (|x| < 1) sum = numerator = x = divide(x.minus(1), x.plus(1), wpr, 1); x2 = finalise(x.times(x), wpr, 1); denominator = 3; for (;;) { numerator = finalise(numerator.times(x2), wpr, 1); t = sum.plus(divide(numerator, new Ctor(denominator), wpr, 1)); if (digitsToString(t.d).slice(0, wpr) === digitsToString(sum.d).slice(0, wpr)) { sum = sum.times(2); // Reverse the argument reduction. Check that e is not 0 because, besides preventing an // unnecessary calculation, -0 + 0 = +0 and to ensure correct rounding -0 needs to stay -0. if (e !== 0) sum = sum.plus(getLn10(Ctor, wpr + 2, pr).times(e + '')); sum = divide(sum, new Ctor(n), wpr, 1); // Is rm > 3 and the first 4 rounding digits 4999, or rm < 4 (or the summation has // been repeated previously) and the first 4 rounding digits 9999? // If so, restart the summation with a higher precision, otherwise // e.g. with precision: 12, rounding: 1 // ln(135520028.6126091714265381533) = 18.7246299999 when it should be 18.72463. // `wpr - guard` is the index of first rounding digit. if (sd == null) { if (checkRoundingDigits(sum.d, wpr - guard, rm, rep)) { Ctor.precision = wpr += guard; t = numerator = x = divide(x1.minus(1), x1.plus(1), wpr, 1); x2 = finalise(x.times(x), wpr, 1); denominator = rep = 1; } else { return finalise(sum, Ctor.precision = pr, rm, external = true); } } else { Ctor.precision = pr; return sum; } } sum = t; denominator += 2; } } // ±Infinity, NaN. function nonFiniteToString(x) { // Unsigned. return String(x.s * x.s / 0); } /* * Parse the value of a new Decimal `x` from string `str`. */ function parseDecimal(x, str) { var e, i, len; // Decimal point? if ((e = str.indexOf('.')) > -1) str = str.replace('.', ''); // Exponential form? if ((i = str.search(/e/i)) > 0) { // Determine exponent. if (e < 0) e = i; e += +str.slice(i + 1); str = str.substring(0, i); } else if (e < 0) { // Integer. e = str.length; } // Determine leading zeros. for (i = 0; str.charCodeAt(i) === 48; i++); // Determine trailing zeros. for (len = str.length; str.charCodeAt(len - 1) === 48; --len); str = str.slice(i, len); if (str) { len -= i; x.e = e = e - i - 1; x.d = []; // Transform base // e is the base 10 exponent. // i is where to slice str to get the first word of the digits array. i = (e + 1) % LOG_BASE; if (e < 0) i += LOG_BASE; if (i < len) { if (i) x.d.push(+str.slice(0, i)); for (len -= LOG_BASE; i < len;) x.d.push(+str.slice(i, i += LOG_BASE)); str = str.slice(i); i = LOG_BASE - str.length; } else { i -= len; } for (; i--;) str += '0'; x.d.push(+str); if (external) { // Overflow? if (x.e > x.constructor.maxE) { // Infinity. x.d = null; x.e = NaN; // Underflow? } else if (x.e < x.constructor.minE) { // Zero. x.e = 0; x.d = [0]; // x.constructor.underflow = true; } // else x.constructor.underflow = false; } } else { // Zero. x.e = 0; x.d = [0]; } return x; } /* * Parse the value of a new Decimal `x` from a string `str`, which is not a decimal value. */ function parseOther(x, str) { var base, Ctor, divisor, i, isFloat, len, p, xd, xe; if (str === 'Infinity' || str === 'NaN') { if (!+str) x.s = NaN; x.e = NaN; x.d = null; return x; } if (isHex.test(str)) { base = 16; str = str.toLowerCase(); } else if (isBinary.test(str)) { base = 2; } else if (isOctal.test(str)) { base = 8; } else { throw Error(invalidArgument + str); } // Is there a binary exponent part? i = str.search(/p/i); if (i > 0) { p = +str.slice(i + 1); str = str.substring(2, i); } else { str = str.slice(2); } // Convert `str` as an integer then divide the result by `base` raised to a power such that the // fraction part will be restored. i = str.indexOf('.'); isFloat = i >= 0; Ctor = x.constructor; if (isFloat) { str = str.replace('.', ''); len = str.length; i = len - i; // log[10](16) = 1.2041... , log[10](88) = 1.9444.... divisor = intPow(Ctor, new Ctor(base), i, i * 2); } xd = convertBase(str, base, BASE); xe = xd.length - 1; // Remove trailing zeros. for (i = xe; xd[i] === 0; --i) xd.pop(); if (i < 0) return new Ctor(x.s * 0); x.e = getBase10Exponent(xd, xe); x.d = xd; external = false; // At what precision to perform the division to ensure exact conversion? // maxDecimalIntegerPartDigitCount = ceil(log[10](b) * otherBaseIntegerPartDigitCount) // log[10](2) = 0.30103, log[10](8) = 0.90309, log[10](16) = 1.20412 // E.g. ceil(1.2 * 3) = 4, so up to 4 decimal digits are needed to represent 3 hex int digits. // maxDecimalFractionPartDigitCount = {Hex:4|Oct:3|Bin:1} * otherBaseFractionPartDigitCount // Therefore using 4 * the number of digits of str will always be enough. if (isFloat) x = divide(x, divisor, len * 4); // Multiply by the binary exponent part if present. if (p) x = x.times(Math.abs(p) < 54 ? mathpow(2, p) : Decimal.pow(2, p)); external = true; return x; } /* * sin(x) = x - x^3/3! + x^5/5! - ... * |x| < pi/2 * */ function sine(Ctor, x) { var k, len = x.d.length; if (len < 3) return taylorSeries(Ctor, 2, x, x); // Argument reduction: sin(5x) = 16*sin^5(x) - 20*sin^3(x) + 5*sin(x) // i.e. sin(x) = 16*sin^5(x/5) - 20*sin^3(x/5) + 5*sin(x/5) // and sin(x) = sin(x/5)(5 + sin^2(x/5)(16sin^2(x/5) - 20)) // Estimate the optimum number of times to use the argument reduction. k = 1.4 * Math.sqrt(len); k = k > 16 ? 16 : k | 0; x = x.times(1 / tinyPow(5, k)); x = taylorSeries(Ctor, 2, x, x); // Reverse argument reduction var sin2_x, d5 = new Ctor(5), d16 = new Ctor(16), d20 = new Ctor(20); for (; k--;) { sin2_x = x.times(x); x = x.times(d5.plus(sin2_x.times(d16.times(sin2_x).minus(d20)))); } return x; } // Calculate Taylor series for `cos`, `cosh`, `sin` and `sinh`. function taylorSeries(Ctor, n, x, y, isHyperbolic) { var j, t, u, x2, i = 1, pr = Ctor.precision, k = Math.ceil(pr / LOG_BASE); external = false; x2 = x.times(x); u = new Ctor(y); for (;;) { t = divide(u.times(x2), new Ctor(n++ * n++), pr, 1); u = isHyperbolic ? y.plus(t) : y.minus(t); y = divide(t.times(x2), new Ctor(n++ * n++), pr, 1); t = u.plus(y); if (t.d[k] !== void 0) { for (j = k; t.d[j] === u.d[j] && j--;); if (j == -1) break; } j = u; u = y; y = t; t = j; i++; } external = true; t.d.length = k + 1; return t; } // Exponent e must be positive and non-zero. function tinyPow(b, e) { var n = b; while (--e) n *= b; return n; } // Return the absolute value of `x` reduced to less than or equal to half pi. function toLessThanHalfPi(Ctor, x) { var t, isNeg = x.s < 0, pi = getPi(Ctor, Ctor.precision, 1), halfPi = pi.times(0.5); x = x.abs(); if (x.lte(halfPi)) { quadrant = isNeg ? 4 : 1; return x; } t = x.divToInt(pi); if (t.isZero()) { quadrant = isNeg ? 3 : 2; } else { x = x.minus(t.times(pi)); // 0 <= x < pi if (x.lte(halfPi)) { quadrant = isOdd(t) ? (isNeg ? 2 : 3) : (isNeg ? 4 : 1); return x; } quadrant = isOdd(t) ? (isNeg ? 1 : 4) : (isNeg ? 3 : 2); } return x.minus(pi).abs(); } /* * Return the value of Decimal `x` as a string in base `baseOut`. * * If the optional `sd` argument is present include a binary exponent suffix. */ function toStringBinary(x, baseOut, sd, rm) { var base, e, i, k, len, roundUp, str, xd, y, Ctor = x.constructor, isExp = sd !== void 0; if (isExp) { checkInt32(sd, 1, MAX_DIGITS); if (rm === void 0) rm = Ctor.rounding; else checkInt32(rm, 0, 8); } else { sd = Ctor.precision; rm = Ctor.rounding; } if (!x.isFinite()) { str = nonFiniteToString(x); } else { str = finiteToString(x); i = str.indexOf('.'); // Use exponential notation according to `toExpPos` and `toExpNeg`? No, but if required: // maxBinaryExponent = floor((decimalExponent + 1) * log[2](10)) // minBinaryExponent = floor(decimalExponent * log[2](10)) // log[2](10) = 3.321928094887362347870319429489390175864 if (isExp) { base = 2; if (baseOut == 16) { sd = sd * 4 - 3; } else if (baseOut == 8) { sd = sd * 3 - 2; } } else { base = baseOut; } // Convert the number as an integer then divide the result by its base raised to a power such // that the fraction part will be restored. // Non-integer. if (i >= 0) { str = str.replace('.', ''); y = new Ctor(1); y.e = str.length - i; y.d = convertBase(finiteToString(y), 10, base); y.e = y.d.length; } xd = convertBase(str, 10, base); e = len = xd.length; // Remove trailing zeros. for (; xd[--len] == 0;) xd.pop(); if (!xd[0]) { str = isExp ? '0p+0' : '0'; } else { if (i < 0) { e--; } else { x = new Ctor(x); x.d = xd; x.e = e; x = divide(x, y, sd, rm, 0, base); xd = x.d; e = x.e; roundUp = inexact; } // The rounding digit, i.e. the digit after the digit that may be rounded up. i = xd[sd]; k = base / 2; roundUp = roundUp || xd[sd + 1] !== void 0; roundUp = rm < 4 ? (i !== void 0 || roundUp) && (rm === 0 || rm === (x.s < 0 ? 3 : 2)) : i > k || i === k && (rm === 4 || roundUp || rm === 6 && xd[sd - 1] & 1 || rm === (x.s < 0 ? 8 : 7)); xd.length = sd; if (roundUp) { // Rounding up may mean the previous digit has to be rounded up and so on. for (; ++xd[--sd] > base - 1;) { xd[sd] = 0; if (!sd) { ++e; xd.unshift(1); } } } // Determine trailing zeros. for (len = xd.length; !xd[len - 1]; --len); // E.g. [4, 11, 15] becomes 4bf. for (i = 0, str = ''; i < len; i++) str += NUMERALS.charAt(xd[i]); // Add binary exponent suffix? if (isExp) { if (len > 1) { if (baseOut == 16 || baseOut == 8) { i = baseOut == 16 ? 4 : 3; for (--len; len % i; len++) str += '0'; xd = convertBase(str, base, baseOut); for (len = xd.length; !xd[len - 1]; --len); // xd[0] will always be be 1 for (i = 1, str = '1.'; i < len; i++) str += NUMERALS.charAt(xd[i]); } else { str = str.charAt(0) + '.' + str.slice(1); } } str = str + (e < 0 ? 'p' : 'p+') + e; } else if (e < 0) { for (; ++e;) str = '0' + str; str = '0.' + str; } else { if (++e > len) for (e -= len; e-- ;) str += '0'; else if (e < len) str = str.slice(0, e) + '.' + str.slice(e); } } str = (baseOut == 16 ? '0x' : baseOut == 2 ? '0b' : baseOut == 8 ? '0o' : '') + str; } return x.s < 0 ? '-' + str : str; } // Does not strip trailing zeros. function truncate(arr, len) { if (arr.length > len) { arr.length = len; return true; } } // Decimal methods /* * abs * acos * acosh * add * asin * asinh * atan * atanh * atan2 * cbrt * ceil * clone * config * cos * cosh * div * exp * floor * hypot * ln * log * log2 * log10 * max * min * mod * mul * pow * random * round * set * sign * sin * sinh * sqrt * sub * tan * tanh * trunc */ /* * Return a new Decimal whose value is the absolute value of `x`. * * x {number|string|Decimal} * */ function abs(x) { return new this(x).abs(); } /* * Return a new Decimal whose value is the arccosine in radians of `x`. * * x {number|string|Decimal} * */ function acos(x) { return new this(x).acos(); } /* * Return a new Decimal whose value is the inverse of the hyperbolic cosine of `x`, rounded to * `precision` significant digits using rounding mode `rounding`. * * x {number|string|Decimal} A value in radians. * */ function acosh(x) { return new this(x).acosh(); } /* * Return a new Decimal whose value is the sum of `x` and `y`, rounded to `precision` significant * digits using rounding mode `rounding`. * * x {number|string|Decimal} * y {number|string|Decimal} * */ function add(x, y) { return new this(x).plus(y); } /* * Return a new Decimal whose value is the arcsine in radians of `x`, rounded to `precision` * significant digits using rounding mode `rounding`. * * x {number|string|Decimal} * */ function asin(x) { return new this(x).asin(); } /* * Return a new Decimal whose value is the inverse of the hyperbolic sine of `x`, rounded to * `precision` significant digits using rounding mode `rounding`. * * x {number|string|Decimal} A value in radians. * */ function asinh(x) { return new this(x).asinh(); } /* * Return a new Decimal whose value is the arctangent in radians of `x`, rounded to `precision` * significant digits using rounding mode `rounding`. * * x {number|string|Decimal} * */ function atan(x) { return new this(x).atan(); } /* * Return a new Decimal whose value is the inverse of the hyperbolic tangent of `x`, rounded to * `precision` significant digits using rounding mode `rounding`. * * x {number|string|Decimal} A value in radians. * */ function atanh(x) { return new this(x).atanh(); } /* * Return a new Decimal whose value is the arctangent in radians of `y/x` in the range -pi to pi * (inclusive), rounded to `precision` significant digits using rounding mode `rounding`. * * Domain: [-Infinity, Infinity] * Range: [-pi, pi] * * y {number|string|Decimal} The y-coordinate. * x {number|string|Decimal} The x-coordinate. * * atan2(±0, -0) = ±pi * atan2(±0, +0) = ±0 * atan2(±0, -x) = ±pi for x > 0 * atan2(±0, x) = ±0 for x > 0 * atan2(-y, ±0) = -pi/2 for y > 0 * atan2(y, ±0) = pi/2 for y > 0 * atan2(±y, -Infinity) = ±pi for finite y > 0 * atan2(±y, +Infinity) = ±0 for finite y > 0 * atan2(±Infinity, x) = ±pi/2 for finite x * atan2(±Infinity, -Infinity) = ±3*pi/4 * atan2(±Infinity, +Infinity) = ±pi/4 * atan2(NaN, x) = NaN * atan2(y, NaN) = NaN * */ function atan2(y, x) { y = new this(y); x = new this(x); var r, pr = this.precision, rm = this.rounding, wpr = pr + 4; // Either NaN if (!y.s || !x.s) { r = new this(NaN); // Both ±Infinity } else if (!y.d && !x.d) { r = getPi(this, wpr, 1).times(x.s > 0 ? 0.25 : 0.75); r.s = y.s; // x is ±Infinity or y is ±0 } else if (!x.d || y.isZero()) { r = x.s < 0 ? getPi(this, pr, rm) : new this(0); r.s = y.s; // y is ±Infinity or x is ±0 } else if (!y.d || x.isZero()) { r = getPi(this, wpr, 1).times(0.5); r.s = y.s; // Both non-zero and finite } else if (x.s < 0) { this.precision = wpr; this.rounding = 1; r = this.atan(divide(y, x, wpr, 1)); x = getPi(this, wpr, 1); this.precision = pr; this.rounding = rm; r = y.s < 0 ? r.minus(x) : r.plus(x); } else { r = this.atan(divide(y, x, wpr, 1)); } return r; } /* * Return a new Decimal whose value is the cube root of `x`, rounded to `precision` significant * digits using rounding mode `rounding`. * * x {number|string|Decimal} * */ function cbrt(x) { return new this(x).cbrt(); } /* * Return a new Decimal whose value is `x` rounded to an integer using `ROUND_CEIL`. * * x {number|string|Decimal} * */ function ceil(x) { return finalise(x = new this(x), x.e + 1, 2); } /* * Configure global settings for a Decimal constructor. * * `obj` is an object with one or more of the following properties, * * precision {number} * rounding {number} * toExpNeg {number} * toExpPos {number} * maxE {number} * minE {number} * modulo {number} * crypto {boolean|number} * defaults {true} * * E.g. Decimal.config({ precision: 20, rounding: 4 }) * */ function config(obj) { if (!obj || typeof obj !== 'object') throw Error(decimalError + 'Object expected'); var i, p, v, useDefaults = obj.defaults === true, ps = [ 'precision', 1, MAX_DIGITS, 'rounding', 0, 8, 'toExpNeg', -EXP_LIMIT, 0, 'toExpPos', 0, EXP_LIMIT, 'maxE', 0, EXP_LIMIT, 'minE', -EXP_LIMIT, 0, 'modulo', 0, 9 ]; for (i = 0; i < ps.length; i += 3) { if (p = ps[i], useDefaults) this[p] = DEFAULTS[p]; if ((v = obj[p]) !== void 0) { if (mathfloor(v) === v && v >= ps[i + 1] && v <= ps[i + 2]) this[p] = v; else throw Error(invalidArgument + p + ': ' + v); } } if (p = 'crypto', useDefaults) this[p] = DEFAULTS[p]; if ((v = obj[p]) !== void 0) { if (v === true || v === false || v === 0 || v === 1) { if (v) { if (typeof crypto != 'undefined' && crypto && (crypto.getRandomValues || crypto.randomBytes)) { this[p] = true; } else { throw Error(cryptoUnavailable); } } else { this[p] = false; } } else { throw Error(invalidArgument + p + ': ' + v); } } return this; } /* * Return a new Decimal whose value is the cosine of `x`, rounded to `precision` significant * digits using rounding mode `rounding`. * * x {number|string|Decimal} A value in radians. * */ function cos(x) { return new this(x).cos(); } /* * Return a new Decimal whose value is the hyperbolic cosine of `x`, rounded to precision * significant digits using rounding mode `rounding`. * * x {number|string|Decimal} A value in radians. * */ function cosh(x) { return new this(x).cosh(); } /* * Create and return a Decimal constructor with the same configuration properties as this Decimal * constructor. * */ function clone(obj) { var i, p, ps; /* * The Decimal constructor and exported function. * Return a new Decimal instance. * * v {number|string|Decimal} A numeric value. * */ function Decimal(v) { var e, i, t, x = this; // Decimal called without new. if (!(x instanceof Decimal)) return new Decimal(v); // Retain a reference to this Decimal constructor, and shadow Decimal.prototype.constructor // which points to Object. x.constructor = Decimal; // Duplicate. if (v instanceof Decimal) { x.s = v.s; if (external) { if (!v.d || v.e > Decimal.maxE) { // Infinity. x.e = NaN; x.d = null; } else if (v.e < Decimal.minE) { // Zero. x.e = 0; x.d = [0]; } else { x.e = v.e; x.d = v.d.slice(); } } else { x.e = v.e; x.d = v.d ? v.d.slice() : v.d; } return; } t = typeof v; if (t === 'number') { if (v === 0) { x.s = 1 / v < 0 ? -1 : 1; x.e = 0; x.d = [0]; return; } if (v < 0) { v = -v; x.s = -1; } else { x.s = 1; } // Fast path for small integers. if (v === ~~v && v < 1e7) { for (e = 0, i = v; i >= 10; i /= 10) e++; if (external) { if (e > Decimal.maxE) { x.e = NaN; x.d = null; } else if (e < Decimal.minE) { x.e = 0; x.d = [0]; } else { x.e = e; x.d = [v]; } } else { x.e = e; x.d = [v]; } return; // Infinity, NaN. } else if (v * 0 !== 0) { if (!v) x.s = NaN; x.e = NaN; x.d = null; return; } return parseDecimal(x, v.toString()); } else if (t !== 'string') { throw Error(invalidArgument + v); } // Minus sign? if ((i = v.charCodeAt(0)) === 45) { v = v.slice(1); x.s = -1; } else { // Plus sign? if (i === 43) v = v.slice(1); x.s = 1; } return isDecimal.test(v) ? parseDecimal(x, v) : parseOther(x, v); } Decimal.prototype = P; Decimal.ROUND_UP = 0; Decimal.ROUND_DOWN = 1; Decimal.ROUND_CEIL = 2; Decimal.ROUND_FLOOR = 3; Decimal.ROUND_HALF_UP = 4; Decimal.ROUND_HALF_DOWN = 5; Decimal.ROUND_HALF_EVEN = 6; Decimal.ROUND_HALF_CEIL = 7; Decimal.ROUND_HALF_FLOOR = 8; Decimal.EUCLID = 9; Decimal.config = Decimal.set = config; Decimal.clone = clone; Decimal.isDecimal = isDecimalInstance; Decimal.abs = abs; Decimal.acos = acos; Decimal.acosh = acosh; // ES6 Decimal.add = add; Decimal.asin = asin; Decimal.asinh = asinh; // ES6 Decimal.atan = atan; Decimal.atanh = atanh; // ES6 Decimal.atan2 = atan2; Decimal.cbrt = cbrt; // ES6 Decimal.ceil = ceil; Decimal.cos = cos; Decimal.cosh = cosh; // ES6 Decimal.div = div; Decimal.exp = exp; Decimal.floor = floor; Decimal.hypot = hypot; // ES6 Decimal.ln = ln; Decimal.log = log; Decimal.log10 = log10; // ES6 Decimal.log2 = log2; // ES6 Decimal.max = max; Decimal.min = min; Decimal.mod = mod; Decimal.mul = mul; Decimal.pow = pow; Decimal.random = random; Decimal.round = round; Decimal.sign = sign; // ES6 Decimal.sin = sin; Decimal.sinh = sinh; // ES6 Decimal.sqrt = sqrt; Decimal.sub = sub; Decimal.tan = tan; Decimal.tanh = tanh; // ES6 Decimal.trunc = trunc; // ES6 if (obj === void 0) obj = {}; if (obj) { if (obj.defaults !== true) { ps = ['precision', 'rounding', 'toExpNeg', 'toExpPos', 'maxE', 'minE', 'modulo', 'crypto']; for (i = 0; i < ps.length;) if (!obj.hasOwnProperty(p = ps[i++])) obj[p] = this[p]; } } Decimal.config(obj); return Decimal; } /* * Return a new Decimal whose value is `x` divided by `y`, rounded to `precision` significant * digits using rounding mode `rounding`. * * x {number|string|Decimal} * y {number|string|Decimal} * */ function div(x, y) { return new this(x).div(y); } /* * Return a new Decimal whose value is the natural exponential of `x`, rounded to `precision` * significant digits using rounding mode `rounding`. * * x {number|string|Decimal} The power to which to raise the base of the natural log. * */ function exp(x) { return new this(x).exp(); } /* * Return a new Decimal whose value is `x` round to an integer using `ROUND_FLOOR`. * * x {number|string|Decimal} * */ function floor(x) { return finalise(x = new this(x), x.e + 1, 3); } /* * Return a new Decimal whose value is the square root of the sum of the squares of the arguments, * rounded to `precision` significant digits using rounding mode `rounding`. * * hypot(a, b, ...) = sqrt(a^2 + b^2 + ...) * * arguments {number|string|Decimal} * */ function hypot() { var i, n, t = new this(0); external = false; for (i = 0; i < arguments.length;) { n = new this(arguments[i++]); if (!n.d) { if (n.s) { external = true; return new this(1 / 0); } t = n; } else if (t.d) { t = t.plus(n.times(n)); } } external = true; return t.sqrt(); } /* * Return true if object is a Decimal instance (where Decimal is any Decimal constructor), * otherwise return false. * */ function isDecimalInstance(obj) { return obj instanceof Decimal || obj && obj.name === '[object Decimal]' || false; } /* * Return a new Decimal whose value is the natural logarithm of `x`, rounded to `precision` * significant digits using rounding mode `rounding`. * * x {number|string|Decimal} * */ function ln(x) { return new this(x).ln(); } /* * Return a new Decimal whose value is the log of `x` to the base `y`, or to base 10 if no base * is specified, rounded to `precision` significant digits using rounding mode `rounding`. * * log[y](x) * * x {number|string|Decimal} The argument of the logarithm. * y {number|string|Decimal} The base of the logarithm. * */ function log(x, y) { return new this(x).log(y); } /* * Return a new Decimal whose value is the base 2 logarithm of `x`, rounded to `precision` * significant digits using rounding mode `rounding`. * * x {number|string|Decimal} * */ function log2(x) { return new this(x).log(2); } /* * Return a new Decimal whose value is the base 10 logarithm of `x`, rounded to `precision` * significant digits using rounding mode `rounding`. * * x {number|string|Decimal} * */ function log10(x) { return new this(x).log(10); } /* * Return a new Decimal whose value is the maximum of the arguments. * * arguments {number|string|Decimal} * */ function max() { return maxOrMin(this, arguments, 'lt'); } /* * Return a new Decimal whose value is the minimum of the arguments. * * arguments {number|string|Decimal} * */ function min() { return maxOrMin(this, arguments, 'gt'); } /* * Return a new Decimal whose value is `x` modulo `y`, rounded to `precision` significant digits * using rounding mode `rounding`. * * x {number|string|Decimal} * y {number|string|Decimal} * */ function mod(x, y) { return new this(x).mod(y); } /* * Return a new Decimal whose value is `x` multiplied by `y`, rounded to `precision` significant * digits using rounding mode `rounding`. * * x {number|string|Decimal} * y {number|string|Decimal} * */ function mul(x, y) { return new this(x).mul(y); } /* * Return a new Decimal whose value is `x` raised to the power `y`, rounded to precision * significant digits using rounding mode `rounding`. * * x {number|string|Decimal} The base. * y {number|string|Decimal} The exponent. * */ function pow(x, y) { return new this(x).pow(y); } /* * Returns a new Decimal with a random value equal to or greater than 0 and less than 1, and with * `sd`, or `Decimal.precision` if `sd` is omitted, significant digits (or less if trailing zeros * are produced). * * [sd] {number} Significant digits. Integer, 0 to MAX_DIGITS inclusive. * */ function random(sd) { var d, e, k, n, i = 0, r = new this(1), rd = []; if (sd === void 0) sd = this.precision; else checkInt32(sd, 1, MAX_DIGITS); k = Math.ceil(sd / LOG_BASE); if (!this.crypto) { for (; i < k;) rd[i++] = Math.random() * 1e7 | 0; // Browsers supporting crypto.getRandomValues. } else if (crypto.getRandomValues) { d = crypto.getRandomValues(new Uint32Array(k)); for (; i < k;) { n = d[i]; // 0 <= n < 4294967296 // Probability n >= 4.29e9, is 4967296 / 4294967296 = 0.00116 (1 in 865). if (n >= 4.29e9) { d[i] = crypto.getRandomValues(new Uint32Array(1))[0]; } else { // 0 <= n <= 4289999999 // 0 <= (n % 1e7) <= 9999999 rd[i++] = n % 1e7; } } // Node.js supporting crypto.randomBytes. } else if (crypto.randomBytes) { // buffer d = crypto.randomBytes(k *= 4); for (; i < k;) { // 0 <= n < 2147483648 n = d[i] + (d[i + 1] << 8) + (d[i + 2] << 16) + ((d[i + 3] & 0x7f) << 24); // Probability n >= 2.14e9, is 7483648 / 2147483648 = 0.0035 (1 in 286). if (n >= 2.14e9) { crypto.randomBytes(4).copy(d, i); } else { // 0 <= n <= 2139999999 // 0 <= (n % 1e7) <= 9999999 rd.push(n % 1e7); i += 4; } } i = k / 4; } else { throw Error(cryptoUnavailable); } k = rd[--i]; sd %= LOG_BASE; // Convert trailing digits to zeros according to sd. if (k && sd) { n = mathpow(10, LOG_BASE - sd); rd[i] = (k / n | 0) * n; } // Remove trailing words which are zero. for (; rd[i] === 0; i--) rd.pop(); // Zero? if (i < 0) { e = 0; rd = [0]; } else { e = -1; // Remove leading words which are zero and adjust exponent accordingly. for (; rd[0] === 0; e -= LOG_BASE) rd.shift(); // Count the digits of the first word of rd to determine leading zeros. for (k = 1, n = rd[0]; n >= 10; n /= 10) k++; // Adjust the exponent for leading zeros of the first word of rd. if (k < LOG_BASE) e -= LOG_BASE - k; } r.e = e; r.d = rd; return r; } /* * Return a new Decimal whose value is `x` rounded to an integer using rounding mode `rounding`. * * To emulate `Math.round`, set rounding to 7 (ROUND_HALF_CEIL). * * x {number|string|Decimal} * */ function round(x) { return finalise(x = new this(x), x.e + 1, this.rounding); } /* * Return * 1 if x > 0, * -1 if x < 0, * 0 if x is 0, * -0 if x is -0, * NaN otherwise * * x {number|string|Decimal} * */ function sign(x) { x = new this(x); return x.d ? (x.d[0] ? x.s : 0 * x.s) : x.s || NaN; } /* * Return a new Decimal whose value is the sine of `x`, rounded to `precision` significant digits * using rounding mode `rounding`. * * x {number|string|Decimal} A value in radians. * */ function sin(x) { return new this(x).sin(); } /* * Return a new Decimal whose value is the hyperbolic sine of `x`, rounded to `precision` * significant digits using rounding mode `rounding`. * * x {number|string|Decimal} A value in radians. * */ function sinh(x) { return new this(x).sinh(); } /* * Return a new Decimal whose value is the square root of `x`, rounded to `precision` significant * digits using rounding mode `rounding`. * * x {number|string|Decimal} * */ function sqrt(x) { return new this(x).sqrt(); } /* * Return a new Decimal whose value is `x` minus `y`, rounded to `precision` significant digits * using rounding mode `rounding`. * * x {number|string|Decimal} * y {number|string|Decimal} * */ function sub(x, y) { return new this(x).sub(y); } /* * Return a new Decimal whose value is the tangent of `x`, rounded to `precision` significant * digits using rounding mode `rounding`. * * x {number|string|Decimal} A value in radians. * */ function tan(x) { return new this(x).tan(); } /* * Return a new Decimal whose value is the hyperbolic tangent of `x`, rounded to `precision` * significant digits using rounding mode `rounding`. * * x {number|string|Decimal} A value in radians. * */ function tanh(x) { return new this(x).tanh(); } /* * Return a new Decimal whose value is `x` truncated to an integer. * * x {number|string|Decimal} * */ function trunc(x) { return finalise(x = new this(x), x.e + 1, 1); } // Create and configure initial Decimal constructor. Decimal = clone(DEFAULTS); Decimal['default'] = Decimal.Decimal = Decimal; // Create the internal constants from their string values. LN10 = new Decimal(LN10); PI = new Decimal(PI); // Export. // AMD. if (typeof define == 'function' && define.amd) { define(function () { return Decimal; }); // Node and other environments that support module.exports. } else if (typeof module != 'undefined' && module.exports) { if (typeof Symbol == 'function' && typeof Symbol.iterator == 'symbol') { P[Symbol.for('nodejs.util.inspect.custom')] = P.toString; P[Symbol.toStringTag] = 'Decimal'; } module.exports = Decimal; // Browser. } else { if (!globalScope) { globalScope = typeof self != 'undefined' && self && self.self == self ? self : window; } noConflict = globalScope.Decimal; Decimal.noConflict = function () { globalScope.Decimal = noConflict; return Decimal; }; globalScope.Decimal = Decimal; } })(this);
zhihu-crawler
/zhihu_crawler-0.0.2.tar.gz/zhihu_crawler-0.0.2/zhihu_crawler/common/node_modules/decimal.js/decimal.js
decimal.js