code
stringlengths
13
1.2M
order_type
stringclasses
1 value
original_example
dict
step_ids
listlengths
1
5
#!/usr/bin/env python # -*- coding: utf-8 -*- import json from alipay.aop.api.constant.ParamConstants import * from alipay.aop.api.domain.EpInfo import EpInfo from alipay.aop.api.domain.EpInfo import EpInfo from alipay.aop.api.domain.EpInfo import EpInfo from alipay.aop.api.domain.EpInfo import EpInfo from alipay.aop.api.domain.EpInfo import EpInfo from alipay.aop.api.domain.EpInfo import EpInfo from alipay.aop.api.domain.EpInfo import EpInfo from alipay.aop.api.domain.EpInfo import EpInfo class CompanyInfo(object): def __init__(self): self._alter_list = None self._basic_info = None self._case_info_list = None self._entinv_list = None self._fr_position_list = None self._frinv_list = None self._person_list = None self._share_holder_list = None @property def alter_list(self): return self._alter_list @alter_list.setter def alter_list(self, value): if isinstance(value, list): self._alter_list = list() for i in value: if isinstance(i, EpInfo): self._alter_list.append(i) else: self._alter_list.append(EpInfo.from_alipay_dict(i)) @property def basic_info(self): return self._basic_info @basic_info.setter def basic_info(self, value): if isinstance(value, EpInfo): self._basic_info = value else: self._basic_info = EpInfo.from_alipay_dict(value) @property def case_info_list(self): return self._case_info_list @case_info_list.setter def case_info_list(self, value): if isinstance(value, list): self._case_info_list = list() for i in value: if isinstance(i, EpInfo): self._case_info_list.append(i) else: self._case_info_list.append(EpInfo.from_alipay_dict(i)) @property def entinv_list(self): return self._entinv_list @entinv_list.setter def entinv_list(self, value): if isinstance(value, list): self._entinv_list = list() for i in value: if isinstance(i, EpInfo): self._entinv_list.append(i) else: self._entinv_list.append(EpInfo.from_alipay_dict(i)) @property def fr_position_list(self): return self._fr_position_list @fr_position_list.setter def fr_position_list(self, value): if isinstance(value, list): self._fr_position_list = list() for i in value: if isinstance(i, EpInfo): self._fr_position_list.append(i) else: self._fr_position_list.append(EpInfo.from_alipay_dict(i)) @property def frinv_list(self): return self._frinv_list @frinv_list.setter def frinv_list(self, value): if isinstance(value, list): self._frinv_list = list() for i in value: if isinstance(i, EpInfo): self._frinv_list.append(i) else: self._frinv_list.append(EpInfo.from_alipay_dict(i)) @property def person_list(self): return self._person_list @person_list.setter def person_list(self, value): if isinstance(value, list): self._person_list = list() for i in value: if isinstance(i, EpInfo): self._person_list.append(i) else: self._person_list.append(EpInfo.from_alipay_dict(i)) @property def share_holder_list(self): return self._share_holder_list @share_holder_list.setter def share_holder_list(self, value): if isinstance(value, list): self._share_holder_list = list() for i in value: if isinstance(i, EpInfo): self._share_holder_list.append(i) else: self._share_holder_list.append(EpInfo.from_alipay_dict(i)) def to_alipay_dict(self): params = dict() if self.alter_list: if isinstance(self.alter_list, list): for i in range(0, len(self.alter_list)): element = self.alter_list[i] if hasattr(element, 'to_alipay_dict'): self.alter_list[i] = element.to_alipay_dict() if hasattr(self.alter_list, 'to_alipay_dict'): params['alter_list'] = self.alter_list.to_alipay_dict() else: params['alter_list'] = self.alter_list if self.basic_info: if hasattr(self.basic_info, 'to_alipay_dict'): params['basic_info'] = self.basic_info.to_alipay_dict() else: params['basic_info'] = self.basic_info if self.case_info_list: if isinstance(self.case_info_list, list): for i in range(0, len(self.case_info_list)): element = self.case_info_list[i] if hasattr(element, 'to_alipay_dict'): self.case_info_list[i] = element.to_alipay_dict() if hasattr(self.case_info_list, 'to_alipay_dict'): params['case_info_list'] = self.case_info_list.to_alipay_dict() else: params['case_info_list'] = self.case_info_list if self.entinv_list: if isinstance(self.entinv_list, list): for i in range(0, len(self.entinv_list)): element = self.entinv_list[i] if hasattr(element, 'to_alipay_dict'): self.entinv_list[i] = element.to_alipay_dict() if hasattr(self.entinv_list, 'to_alipay_dict'): params['entinv_list'] = self.entinv_list.to_alipay_dict() else: params['entinv_list'] = self.entinv_list if self.fr_position_list: if isinstance(self.fr_position_list, list): for i in range(0, len(self.fr_position_list)): element = self.fr_position_list[i] if hasattr(element, 'to_alipay_dict'): self.fr_position_list[i] = element.to_alipay_dict() if hasattr(self.fr_position_list, 'to_alipay_dict'): params['fr_position_list'] = self.fr_position_list.to_alipay_dict() else: params['fr_position_list'] = self.fr_position_list if self.frinv_list: if isinstance(self.frinv_list, list): for i in range(0, len(self.frinv_list)): element = self.frinv_list[i] if hasattr(element, 'to_alipay_dict'): self.frinv_list[i] = element.to_alipay_dict() if hasattr(self.frinv_list, 'to_alipay_dict'): params['frinv_list'] = self.frinv_list.to_alipay_dict() else: params['frinv_list'] = self.frinv_list if self.person_list: if isinstance(self.person_list, list): for i in range(0, len(self.person_list)): element = self.person_list[i] if hasattr(element, 'to_alipay_dict'): self.person_list[i] = element.to_alipay_dict() if hasattr(self.person_list, 'to_alipay_dict'): params['person_list'] = self.person_list.to_alipay_dict() else: params['person_list'] = self.person_list if self.share_holder_list: if isinstance(self.share_holder_list, list): for i in range(0, len(self.share_holder_list)): element = self.share_holder_list[i] if hasattr(element, 'to_alipay_dict'): self.share_holder_list[i] = element.to_alipay_dict() if hasattr(self.share_holder_list, 'to_alipay_dict'): params['share_holder_list'] = self.share_holder_list.to_alipay_dict() else: params['share_holder_list'] = self.share_holder_list return params @staticmethod def from_alipay_dict(d): if not d: return None o = CompanyInfo() if 'alter_list' in d: o.alter_list = d['alter_list'] if 'basic_info' in d: o.basic_info = d['basic_info'] if 'case_info_list' in d: o.case_info_list = d['case_info_list'] if 'entinv_list' in d: o.entinv_list = d['entinv_list'] if 'fr_position_list' in d: o.fr_position_list = d['fr_position_list'] if 'frinv_list' in d: o.frinv_list = d['frinv_list'] if 'person_list' in d: o.person_list = d['person_list'] if 'share_holder_list' in d: o.share_holder_list = d['share_holder_list'] return o
normal
{ "blob_id": "6743a4f3c9118e790e52b586a36d71a735101702", "index": 1901, "step-1": "<mask token>\n\n\nclass CompanyInfo(object):\n\n def __init__(self):\n self._alter_list = None\n self._basic_info = None\n self._case_info_list = None\n self._entinv_list = None\n self._fr_position_list = None\n self._frinv_list = None\n self._person_list = None\n self._share_holder_list = None\n <mask token>\n\n @alter_list.setter\n def alter_list(self, value):\n if isinstance(value, list):\n self._alter_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._alter_list.append(i)\n else:\n self._alter_list.append(EpInfo.from_alipay_dict(i))\n\n @property\n def basic_info(self):\n return self._basic_info\n <mask token>\n\n @property\n def case_info_list(self):\n return self._case_info_list\n\n @case_info_list.setter\n def case_info_list(self, value):\n if isinstance(value, list):\n self._case_info_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._case_info_list.append(i)\n else:\n self._case_info_list.append(EpInfo.from_alipay_dict(i))\n <mask token>\n <mask token>\n\n @property\n def fr_position_list(self):\n return self._fr_position_list\n <mask token>\n <mask token>\n\n @frinv_list.setter\n def frinv_list(self, value):\n if isinstance(value, list):\n self._frinv_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._frinv_list.append(i)\n else:\n self._frinv_list.append(EpInfo.from_alipay_dict(i))\n\n @property\n def person_list(self):\n return self._person_list\n\n @person_list.setter\n def person_list(self, value):\n if isinstance(value, list):\n self._person_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._person_list.append(i)\n else:\n self._person_list.append(EpInfo.from_alipay_dict(i))\n\n @property\n def share_holder_list(self):\n return self._share_holder_list\n\n @share_holder_list.setter\n def share_holder_list(self, value):\n if isinstance(value, list):\n self._share_holder_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._share_holder_list.append(i)\n else:\n self._share_holder_list.append(EpInfo.from_alipay_dict(i))\n\n def to_alipay_dict(self):\n params = dict()\n if self.alter_list:\n if isinstance(self.alter_list, list):\n for i in range(0, len(self.alter_list)):\n element = self.alter_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.alter_list[i] = element.to_alipay_dict()\n if hasattr(self.alter_list, 'to_alipay_dict'):\n params['alter_list'] = self.alter_list.to_alipay_dict()\n else:\n params['alter_list'] = self.alter_list\n if self.basic_info:\n if hasattr(self.basic_info, 'to_alipay_dict'):\n params['basic_info'] = self.basic_info.to_alipay_dict()\n else:\n params['basic_info'] = self.basic_info\n if self.case_info_list:\n if isinstance(self.case_info_list, list):\n for i in range(0, len(self.case_info_list)):\n element = self.case_info_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.case_info_list[i] = element.to_alipay_dict()\n if hasattr(self.case_info_list, 'to_alipay_dict'):\n params['case_info_list'] = self.case_info_list.to_alipay_dict()\n else:\n params['case_info_list'] = self.case_info_list\n if self.entinv_list:\n if isinstance(self.entinv_list, list):\n for i in range(0, len(self.entinv_list)):\n element = self.entinv_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.entinv_list[i] = element.to_alipay_dict()\n if hasattr(self.entinv_list, 'to_alipay_dict'):\n params['entinv_list'] = self.entinv_list.to_alipay_dict()\n else:\n params['entinv_list'] = self.entinv_list\n if self.fr_position_list:\n if isinstance(self.fr_position_list, list):\n for i in range(0, len(self.fr_position_list)):\n element = self.fr_position_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.fr_position_list[i] = element.to_alipay_dict()\n if hasattr(self.fr_position_list, 'to_alipay_dict'):\n params['fr_position_list'\n ] = self.fr_position_list.to_alipay_dict()\n else:\n params['fr_position_list'] = self.fr_position_list\n if self.frinv_list:\n if isinstance(self.frinv_list, list):\n for i in range(0, len(self.frinv_list)):\n element = self.frinv_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.frinv_list[i] = element.to_alipay_dict()\n if hasattr(self.frinv_list, 'to_alipay_dict'):\n params['frinv_list'] = self.frinv_list.to_alipay_dict()\n else:\n params['frinv_list'] = self.frinv_list\n if self.person_list:\n if isinstance(self.person_list, list):\n for i in range(0, len(self.person_list)):\n element = self.person_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.person_list[i] = element.to_alipay_dict()\n if hasattr(self.person_list, 'to_alipay_dict'):\n params['person_list'] = self.person_list.to_alipay_dict()\n else:\n params['person_list'] = self.person_list\n if self.share_holder_list:\n if isinstance(self.share_holder_list, list):\n for i in range(0, len(self.share_holder_list)):\n element = self.share_holder_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.share_holder_list[i] = element.to_alipay_dict()\n if hasattr(self.share_holder_list, 'to_alipay_dict'):\n params['share_holder_list'\n ] = self.share_holder_list.to_alipay_dict()\n else:\n params['share_holder_list'] = self.share_holder_list\n return params\n\n @staticmethod\n def from_alipay_dict(d):\n if not d:\n return None\n o = CompanyInfo()\n if 'alter_list' in d:\n o.alter_list = d['alter_list']\n if 'basic_info' in d:\n o.basic_info = d['basic_info']\n if 'case_info_list' in d:\n o.case_info_list = d['case_info_list']\n if 'entinv_list' in d:\n o.entinv_list = d['entinv_list']\n if 'fr_position_list' in d:\n o.fr_position_list = d['fr_position_list']\n if 'frinv_list' in d:\n o.frinv_list = d['frinv_list']\n if 'person_list' in d:\n o.person_list = d['person_list']\n if 'share_holder_list' in d:\n o.share_holder_list = d['share_holder_list']\n return o\n", "step-2": "<mask token>\n\n\nclass CompanyInfo(object):\n\n def __init__(self):\n self._alter_list = None\n self._basic_info = None\n self._case_info_list = None\n self._entinv_list = None\n self._fr_position_list = None\n self._frinv_list = None\n self._person_list = None\n self._share_holder_list = None\n <mask token>\n\n @alter_list.setter\n def alter_list(self, value):\n if isinstance(value, list):\n self._alter_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._alter_list.append(i)\n else:\n self._alter_list.append(EpInfo.from_alipay_dict(i))\n\n @property\n def basic_info(self):\n return self._basic_info\n\n @basic_info.setter\n def basic_info(self, value):\n if isinstance(value, EpInfo):\n self._basic_info = value\n else:\n self._basic_info = EpInfo.from_alipay_dict(value)\n\n @property\n def case_info_list(self):\n return self._case_info_list\n\n @case_info_list.setter\n def case_info_list(self, value):\n if isinstance(value, list):\n self._case_info_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._case_info_list.append(i)\n else:\n self._case_info_list.append(EpInfo.from_alipay_dict(i))\n\n @property\n def entinv_list(self):\n return self._entinv_list\n\n @entinv_list.setter\n def entinv_list(self, value):\n if isinstance(value, list):\n self._entinv_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._entinv_list.append(i)\n else:\n self._entinv_list.append(EpInfo.from_alipay_dict(i))\n\n @property\n def fr_position_list(self):\n return self._fr_position_list\n\n @fr_position_list.setter\n def fr_position_list(self, value):\n if isinstance(value, list):\n self._fr_position_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._fr_position_list.append(i)\n else:\n self._fr_position_list.append(EpInfo.from_alipay_dict(i))\n <mask token>\n\n @frinv_list.setter\n def frinv_list(self, value):\n if isinstance(value, list):\n self._frinv_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._frinv_list.append(i)\n else:\n self._frinv_list.append(EpInfo.from_alipay_dict(i))\n\n @property\n def person_list(self):\n return self._person_list\n\n @person_list.setter\n def person_list(self, value):\n if isinstance(value, list):\n self._person_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._person_list.append(i)\n else:\n self._person_list.append(EpInfo.from_alipay_dict(i))\n\n @property\n def share_holder_list(self):\n return self._share_holder_list\n\n @share_holder_list.setter\n def share_holder_list(self, value):\n if isinstance(value, list):\n self._share_holder_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._share_holder_list.append(i)\n else:\n self._share_holder_list.append(EpInfo.from_alipay_dict(i))\n\n def to_alipay_dict(self):\n params = dict()\n if self.alter_list:\n if isinstance(self.alter_list, list):\n for i in range(0, len(self.alter_list)):\n element = self.alter_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.alter_list[i] = element.to_alipay_dict()\n if hasattr(self.alter_list, 'to_alipay_dict'):\n params['alter_list'] = self.alter_list.to_alipay_dict()\n else:\n params['alter_list'] = self.alter_list\n if self.basic_info:\n if hasattr(self.basic_info, 'to_alipay_dict'):\n params['basic_info'] = self.basic_info.to_alipay_dict()\n else:\n params['basic_info'] = self.basic_info\n if self.case_info_list:\n if isinstance(self.case_info_list, list):\n for i in range(0, len(self.case_info_list)):\n element = self.case_info_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.case_info_list[i] = element.to_alipay_dict()\n if hasattr(self.case_info_list, 'to_alipay_dict'):\n params['case_info_list'] = self.case_info_list.to_alipay_dict()\n else:\n params['case_info_list'] = self.case_info_list\n if self.entinv_list:\n if isinstance(self.entinv_list, list):\n for i in range(0, len(self.entinv_list)):\n element = self.entinv_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.entinv_list[i] = element.to_alipay_dict()\n if hasattr(self.entinv_list, 'to_alipay_dict'):\n params['entinv_list'] = self.entinv_list.to_alipay_dict()\n else:\n params['entinv_list'] = self.entinv_list\n if self.fr_position_list:\n if isinstance(self.fr_position_list, list):\n for i in range(0, len(self.fr_position_list)):\n element = self.fr_position_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.fr_position_list[i] = element.to_alipay_dict()\n if hasattr(self.fr_position_list, 'to_alipay_dict'):\n params['fr_position_list'\n ] = self.fr_position_list.to_alipay_dict()\n else:\n params['fr_position_list'] = self.fr_position_list\n if self.frinv_list:\n if isinstance(self.frinv_list, list):\n for i in range(0, len(self.frinv_list)):\n element = self.frinv_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.frinv_list[i] = element.to_alipay_dict()\n if hasattr(self.frinv_list, 'to_alipay_dict'):\n params['frinv_list'] = self.frinv_list.to_alipay_dict()\n else:\n params['frinv_list'] = self.frinv_list\n if self.person_list:\n if isinstance(self.person_list, list):\n for i in range(0, len(self.person_list)):\n element = self.person_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.person_list[i] = element.to_alipay_dict()\n if hasattr(self.person_list, 'to_alipay_dict'):\n params['person_list'] = self.person_list.to_alipay_dict()\n else:\n params['person_list'] = self.person_list\n if self.share_holder_list:\n if isinstance(self.share_holder_list, list):\n for i in range(0, len(self.share_holder_list)):\n element = self.share_holder_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.share_holder_list[i] = element.to_alipay_dict()\n if hasattr(self.share_holder_list, 'to_alipay_dict'):\n params['share_holder_list'\n ] = self.share_holder_list.to_alipay_dict()\n else:\n params['share_holder_list'] = self.share_holder_list\n return params\n\n @staticmethod\n def from_alipay_dict(d):\n if not d:\n return None\n o = CompanyInfo()\n if 'alter_list' in d:\n o.alter_list = d['alter_list']\n if 'basic_info' in d:\n o.basic_info = d['basic_info']\n if 'case_info_list' in d:\n o.case_info_list = d['case_info_list']\n if 'entinv_list' in d:\n o.entinv_list = d['entinv_list']\n if 'fr_position_list' in d:\n o.fr_position_list = d['fr_position_list']\n if 'frinv_list' in d:\n o.frinv_list = d['frinv_list']\n if 'person_list' in d:\n o.person_list = d['person_list']\n if 'share_holder_list' in d:\n o.share_holder_list = d['share_holder_list']\n return o\n", "step-3": "<mask token>\n\n\nclass CompanyInfo(object):\n\n def __init__(self):\n self._alter_list = None\n self._basic_info = None\n self._case_info_list = None\n self._entinv_list = None\n self._fr_position_list = None\n self._frinv_list = None\n self._person_list = None\n self._share_holder_list = None\n\n @property\n def alter_list(self):\n return self._alter_list\n\n @alter_list.setter\n def alter_list(self, value):\n if isinstance(value, list):\n self._alter_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._alter_list.append(i)\n else:\n self._alter_list.append(EpInfo.from_alipay_dict(i))\n\n @property\n def basic_info(self):\n return self._basic_info\n\n @basic_info.setter\n def basic_info(self, value):\n if isinstance(value, EpInfo):\n self._basic_info = value\n else:\n self._basic_info = EpInfo.from_alipay_dict(value)\n\n @property\n def case_info_list(self):\n return self._case_info_list\n\n @case_info_list.setter\n def case_info_list(self, value):\n if isinstance(value, list):\n self._case_info_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._case_info_list.append(i)\n else:\n self._case_info_list.append(EpInfo.from_alipay_dict(i))\n\n @property\n def entinv_list(self):\n return self._entinv_list\n\n @entinv_list.setter\n def entinv_list(self, value):\n if isinstance(value, list):\n self._entinv_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._entinv_list.append(i)\n else:\n self._entinv_list.append(EpInfo.from_alipay_dict(i))\n\n @property\n def fr_position_list(self):\n return self._fr_position_list\n\n @fr_position_list.setter\n def fr_position_list(self, value):\n if isinstance(value, list):\n self._fr_position_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._fr_position_list.append(i)\n else:\n self._fr_position_list.append(EpInfo.from_alipay_dict(i))\n <mask token>\n\n @frinv_list.setter\n def frinv_list(self, value):\n if isinstance(value, list):\n self._frinv_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._frinv_list.append(i)\n else:\n self._frinv_list.append(EpInfo.from_alipay_dict(i))\n\n @property\n def person_list(self):\n return self._person_list\n\n @person_list.setter\n def person_list(self, value):\n if isinstance(value, list):\n self._person_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._person_list.append(i)\n else:\n self._person_list.append(EpInfo.from_alipay_dict(i))\n\n @property\n def share_holder_list(self):\n return self._share_holder_list\n\n @share_holder_list.setter\n def share_holder_list(self, value):\n if isinstance(value, list):\n self._share_holder_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._share_holder_list.append(i)\n else:\n self._share_holder_list.append(EpInfo.from_alipay_dict(i))\n\n def to_alipay_dict(self):\n params = dict()\n if self.alter_list:\n if isinstance(self.alter_list, list):\n for i in range(0, len(self.alter_list)):\n element = self.alter_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.alter_list[i] = element.to_alipay_dict()\n if hasattr(self.alter_list, 'to_alipay_dict'):\n params['alter_list'] = self.alter_list.to_alipay_dict()\n else:\n params['alter_list'] = self.alter_list\n if self.basic_info:\n if hasattr(self.basic_info, 'to_alipay_dict'):\n params['basic_info'] = self.basic_info.to_alipay_dict()\n else:\n params['basic_info'] = self.basic_info\n if self.case_info_list:\n if isinstance(self.case_info_list, list):\n for i in range(0, len(self.case_info_list)):\n element = self.case_info_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.case_info_list[i] = element.to_alipay_dict()\n if hasattr(self.case_info_list, 'to_alipay_dict'):\n params['case_info_list'] = self.case_info_list.to_alipay_dict()\n else:\n params['case_info_list'] = self.case_info_list\n if self.entinv_list:\n if isinstance(self.entinv_list, list):\n for i in range(0, len(self.entinv_list)):\n element = self.entinv_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.entinv_list[i] = element.to_alipay_dict()\n if hasattr(self.entinv_list, 'to_alipay_dict'):\n params['entinv_list'] = self.entinv_list.to_alipay_dict()\n else:\n params['entinv_list'] = self.entinv_list\n if self.fr_position_list:\n if isinstance(self.fr_position_list, list):\n for i in range(0, len(self.fr_position_list)):\n element = self.fr_position_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.fr_position_list[i] = element.to_alipay_dict()\n if hasattr(self.fr_position_list, 'to_alipay_dict'):\n params['fr_position_list'\n ] = self.fr_position_list.to_alipay_dict()\n else:\n params['fr_position_list'] = self.fr_position_list\n if self.frinv_list:\n if isinstance(self.frinv_list, list):\n for i in range(0, len(self.frinv_list)):\n element = self.frinv_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.frinv_list[i] = element.to_alipay_dict()\n if hasattr(self.frinv_list, 'to_alipay_dict'):\n params['frinv_list'] = self.frinv_list.to_alipay_dict()\n else:\n params['frinv_list'] = self.frinv_list\n if self.person_list:\n if isinstance(self.person_list, list):\n for i in range(0, len(self.person_list)):\n element = self.person_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.person_list[i] = element.to_alipay_dict()\n if hasattr(self.person_list, 'to_alipay_dict'):\n params['person_list'] = self.person_list.to_alipay_dict()\n else:\n params['person_list'] = self.person_list\n if self.share_holder_list:\n if isinstance(self.share_holder_list, list):\n for i in range(0, len(self.share_holder_list)):\n element = self.share_holder_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.share_holder_list[i] = element.to_alipay_dict()\n if hasattr(self.share_holder_list, 'to_alipay_dict'):\n params['share_holder_list'\n ] = self.share_holder_list.to_alipay_dict()\n else:\n params['share_holder_list'] = self.share_holder_list\n return params\n\n @staticmethod\n def from_alipay_dict(d):\n if not d:\n return None\n o = CompanyInfo()\n if 'alter_list' in d:\n o.alter_list = d['alter_list']\n if 'basic_info' in d:\n o.basic_info = d['basic_info']\n if 'case_info_list' in d:\n o.case_info_list = d['case_info_list']\n if 'entinv_list' in d:\n o.entinv_list = d['entinv_list']\n if 'fr_position_list' in d:\n o.fr_position_list = d['fr_position_list']\n if 'frinv_list' in d:\n o.frinv_list = d['frinv_list']\n if 'person_list' in d:\n o.person_list = d['person_list']\n if 'share_holder_list' in d:\n o.share_holder_list = d['share_holder_list']\n return o\n", "step-4": "<mask token>\n\n\nclass CompanyInfo(object):\n\n def __init__(self):\n self._alter_list = None\n self._basic_info = None\n self._case_info_list = None\n self._entinv_list = None\n self._fr_position_list = None\n self._frinv_list = None\n self._person_list = None\n self._share_holder_list = None\n\n @property\n def alter_list(self):\n return self._alter_list\n\n @alter_list.setter\n def alter_list(self, value):\n if isinstance(value, list):\n self._alter_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._alter_list.append(i)\n else:\n self._alter_list.append(EpInfo.from_alipay_dict(i))\n\n @property\n def basic_info(self):\n return self._basic_info\n\n @basic_info.setter\n def basic_info(self, value):\n if isinstance(value, EpInfo):\n self._basic_info = value\n else:\n self._basic_info = EpInfo.from_alipay_dict(value)\n\n @property\n def case_info_list(self):\n return self._case_info_list\n\n @case_info_list.setter\n def case_info_list(self, value):\n if isinstance(value, list):\n self._case_info_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._case_info_list.append(i)\n else:\n self._case_info_list.append(EpInfo.from_alipay_dict(i))\n\n @property\n def entinv_list(self):\n return self._entinv_list\n\n @entinv_list.setter\n def entinv_list(self, value):\n if isinstance(value, list):\n self._entinv_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._entinv_list.append(i)\n else:\n self._entinv_list.append(EpInfo.from_alipay_dict(i))\n\n @property\n def fr_position_list(self):\n return self._fr_position_list\n\n @fr_position_list.setter\n def fr_position_list(self, value):\n if isinstance(value, list):\n self._fr_position_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._fr_position_list.append(i)\n else:\n self._fr_position_list.append(EpInfo.from_alipay_dict(i))\n\n @property\n def frinv_list(self):\n return self._frinv_list\n\n @frinv_list.setter\n def frinv_list(self, value):\n if isinstance(value, list):\n self._frinv_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._frinv_list.append(i)\n else:\n self._frinv_list.append(EpInfo.from_alipay_dict(i))\n\n @property\n def person_list(self):\n return self._person_list\n\n @person_list.setter\n def person_list(self, value):\n if isinstance(value, list):\n self._person_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._person_list.append(i)\n else:\n self._person_list.append(EpInfo.from_alipay_dict(i))\n\n @property\n def share_holder_list(self):\n return self._share_holder_list\n\n @share_holder_list.setter\n def share_holder_list(self, value):\n if isinstance(value, list):\n self._share_holder_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._share_holder_list.append(i)\n else:\n self._share_holder_list.append(EpInfo.from_alipay_dict(i))\n\n def to_alipay_dict(self):\n params = dict()\n if self.alter_list:\n if isinstance(self.alter_list, list):\n for i in range(0, len(self.alter_list)):\n element = self.alter_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.alter_list[i] = element.to_alipay_dict()\n if hasattr(self.alter_list, 'to_alipay_dict'):\n params['alter_list'] = self.alter_list.to_alipay_dict()\n else:\n params['alter_list'] = self.alter_list\n if self.basic_info:\n if hasattr(self.basic_info, 'to_alipay_dict'):\n params['basic_info'] = self.basic_info.to_alipay_dict()\n else:\n params['basic_info'] = self.basic_info\n if self.case_info_list:\n if isinstance(self.case_info_list, list):\n for i in range(0, len(self.case_info_list)):\n element = self.case_info_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.case_info_list[i] = element.to_alipay_dict()\n if hasattr(self.case_info_list, 'to_alipay_dict'):\n params['case_info_list'] = self.case_info_list.to_alipay_dict()\n else:\n params['case_info_list'] = self.case_info_list\n if self.entinv_list:\n if isinstance(self.entinv_list, list):\n for i in range(0, len(self.entinv_list)):\n element = self.entinv_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.entinv_list[i] = element.to_alipay_dict()\n if hasattr(self.entinv_list, 'to_alipay_dict'):\n params['entinv_list'] = self.entinv_list.to_alipay_dict()\n else:\n params['entinv_list'] = self.entinv_list\n if self.fr_position_list:\n if isinstance(self.fr_position_list, list):\n for i in range(0, len(self.fr_position_list)):\n element = self.fr_position_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.fr_position_list[i] = element.to_alipay_dict()\n if hasattr(self.fr_position_list, 'to_alipay_dict'):\n params['fr_position_list'\n ] = self.fr_position_list.to_alipay_dict()\n else:\n params['fr_position_list'] = self.fr_position_list\n if self.frinv_list:\n if isinstance(self.frinv_list, list):\n for i in range(0, len(self.frinv_list)):\n element = self.frinv_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.frinv_list[i] = element.to_alipay_dict()\n if hasattr(self.frinv_list, 'to_alipay_dict'):\n params['frinv_list'] = self.frinv_list.to_alipay_dict()\n else:\n params['frinv_list'] = self.frinv_list\n if self.person_list:\n if isinstance(self.person_list, list):\n for i in range(0, len(self.person_list)):\n element = self.person_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.person_list[i] = element.to_alipay_dict()\n if hasattr(self.person_list, 'to_alipay_dict'):\n params['person_list'] = self.person_list.to_alipay_dict()\n else:\n params['person_list'] = self.person_list\n if self.share_holder_list:\n if isinstance(self.share_holder_list, list):\n for i in range(0, len(self.share_holder_list)):\n element = self.share_holder_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.share_holder_list[i] = element.to_alipay_dict()\n if hasattr(self.share_holder_list, 'to_alipay_dict'):\n params['share_holder_list'\n ] = self.share_holder_list.to_alipay_dict()\n else:\n params['share_holder_list'] = self.share_holder_list\n return params\n\n @staticmethod\n def from_alipay_dict(d):\n if not d:\n return None\n o = CompanyInfo()\n if 'alter_list' in d:\n o.alter_list = d['alter_list']\n if 'basic_info' in d:\n o.basic_info = d['basic_info']\n if 'case_info_list' in d:\n o.case_info_list = d['case_info_list']\n if 'entinv_list' in d:\n o.entinv_list = d['entinv_list']\n if 'fr_position_list' in d:\n o.fr_position_list = d['fr_position_list']\n if 'frinv_list' in d:\n o.frinv_list = d['frinv_list']\n if 'person_list' in d:\n o.person_list = d['person_list']\n if 'share_holder_list' in d:\n o.share_holder_list = d['share_holder_list']\n return o\n", "step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport json\n\nfrom alipay.aop.api.constant.ParamConstants import *\nfrom alipay.aop.api.domain.EpInfo import EpInfo\nfrom alipay.aop.api.domain.EpInfo import EpInfo\nfrom alipay.aop.api.domain.EpInfo import EpInfo\nfrom alipay.aop.api.domain.EpInfo import EpInfo\nfrom alipay.aop.api.domain.EpInfo import EpInfo\nfrom alipay.aop.api.domain.EpInfo import EpInfo\nfrom alipay.aop.api.domain.EpInfo import EpInfo\nfrom alipay.aop.api.domain.EpInfo import EpInfo\n\n\nclass CompanyInfo(object):\n\n def __init__(self):\n self._alter_list = None\n self._basic_info = None\n self._case_info_list = None\n self._entinv_list = None\n self._fr_position_list = None\n self._frinv_list = None\n self._person_list = None\n self._share_holder_list = None\n\n @property\n def alter_list(self):\n return self._alter_list\n\n @alter_list.setter\n def alter_list(self, value):\n if isinstance(value, list):\n self._alter_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._alter_list.append(i)\n else:\n self._alter_list.append(EpInfo.from_alipay_dict(i))\n @property\n def basic_info(self):\n return self._basic_info\n\n @basic_info.setter\n def basic_info(self, value):\n if isinstance(value, EpInfo):\n self._basic_info = value\n else:\n self._basic_info = EpInfo.from_alipay_dict(value)\n @property\n def case_info_list(self):\n return self._case_info_list\n\n @case_info_list.setter\n def case_info_list(self, value):\n if isinstance(value, list):\n self._case_info_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._case_info_list.append(i)\n else:\n self._case_info_list.append(EpInfo.from_alipay_dict(i))\n @property\n def entinv_list(self):\n return self._entinv_list\n\n @entinv_list.setter\n def entinv_list(self, value):\n if isinstance(value, list):\n self._entinv_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._entinv_list.append(i)\n else:\n self._entinv_list.append(EpInfo.from_alipay_dict(i))\n @property\n def fr_position_list(self):\n return self._fr_position_list\n\n @fr_position_list.setter\n def fr_position_list(self, value):\n if isinstance(value, list):\n self._fr_position_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._fr_position_list.append(i)\n else:\n self._fr_position_list.append(EpInfo.from_alipay_dict(i))\n @property\n def frinv_list(self):\n return self._frinv_list\n\n @frinv_list.setter\n def frinv_list(self, value):\n if isinstance(value, list):\n self._frinv_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._frinv_list.append(i)\n else:\n self._frinv_list.append(EpInfo.from_alipay_dict(i))\n @property\n def person_list(self):\n return self._person_list\n\n @person_list.setter\n def person_list(self, value):\n if isinstance(value, list):\n self._person_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._person_list.append(i)\n else:\n self._person_list.append(EpInfo.from_alipay_dict(i))\n @property\n def share_holder_list(self):\n return self._share_holder_list\n\n @share_holder_list.setter\n def share_holder_list(self, value):\n if isinstance(value, list):\n self._share_holder_list = list()\n for i in value:\n if isinstance(i, EpInfo):\n self._share_holder_list.append(i)\n else:\n self._share_holder_list.append(EpInfo.from_alipay_dict(i))\n\n\n def to_alipay_dict(self):\n params = dict()\n if self.alter_list:\n if isinstance(self.alter_list, list):\n for i in range(0, len(self.alter_list)):\n element = self.alter_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.alter_list[i] = element.to_alipay_dict()\n if hasattr(self.alter_list, 'to_alipay_dict'):\n params['alter_list'] = self.alter_list.to_alipay_dict()\n else:\n params['alter_list'] = self.alter_list\n if self.basic_info:\n if hasattr(self.basic_info, 'to_alipay_dict'):\n params['basic_info'] = self.basic_info.to_alipay_dict()\n else:\n params['basic_info'] = self.basic_info\n if self.case_info_list:\n if isinstance(self.case_info_list, list):\n for i in range(0, len(self.case_info_list)):\n element = self.case_info_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.case_info_list[i] = element.to_alipay_dict()\n if hasattr(self.case_info_list, 'to_alipay_dict'):\n params['case_info_list'] = self.case_info_list.to_alipay_dict()\n else:\n params['case_info_list'] = self.case_info_list\n if self.entinv_list:\n if isinstance(self.entinv_list, list):\n for i in range(0, len(self.entinv_list)):\n element = self.entinv_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.entinv_list[i] = element.to_alipay_dict()\n if hasattr(self.entinv_list, 'to_alipay_dict'):\n params['entinv_list'] = self.entinv_list.to_alipay_dict()\n else:\n params['entinv_list'] = self.entinv_list\n if self.fr_position_list:\n if isinstance(self.fr_position_list, list):\n for i in range(0, len(self.fr_position_list)):\n element = self.fr_position_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.fr_position_list[i] = element.to_alipay_dict()\n if hasattr(self.fr_position_list, 'to_alipay_dict'):\n params['fr_position_list'] = self.fr_position_list.to_alipay_dict()\n else:\n params['fr_position_list'] = self.fr_position_list\n if self.frinv_list:\n if isinstance(self.frinv_list, list):\n for i in range(0, len(self.frinv_list)):\n element = self.frinv_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.frinv_list[i] = element.to_alipay_dict()\n if hasattr(self.frinv_list, 'to_alipay_dict'):\n params['frinv_list'] = self.frinv_list.to_alipay_dict()\n else:\n params['frinv_list'] = self.frinv_list\n if self.person_list:\n if isinstance(self.person_list, list):\n for i in range(0, len(self.person_list)):\n element = self.person_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.person_list[i] = element.to_alipay_dict()\n if hasattr(self.person_list, 'to_alipay_dict'):\n params['person_list'] = self.person_list.to_alipay_dict()\n else:\n params['person_list'] = self.person_list\n if self.share_holder_list:\n if isinstance(self.share_holder_list, list):\n for i in range(0, len(self.share_holder_list)):\n element = self.share_holder_list[i]\n if hasattr(element, 'to_alipay_dict'):\n self.share_holder_list[i] = element.to_alipay_dict()\n if hasattr(self.share_holder_list, 'to_alipay_dict'):\n params['share_holder_list'] = self.share_holder_list.to_alipay_dict()\n else:\n params['share_holder_list'] = self.share_holder_list\n return params\n\n @staticmethod\n def from_alipay_dict(d):\n if not d:\n return None\n o = CompanyInfo()\n if 'alter_list' in d:\n o.alter_list = d['alter_list']\n if 'basic_info' in d:\n o.basic_info = d['basic_info']\n if 'case_info_list' in d:\n o.case_info_list = d['case_info_list']\n if 'entinv_list' in d:\n o.entinv_list = d['entinv_list']\n if 'fr_position_list' in d:\n o.fr_position_list = d['fr_position_list']\n if 'frinv_list' in d:\n o.frinv_list = d['frinv_list']\n if 'person_list' in d:\n o.person_list = d['person_list']\n if 'share_holder_list' in d:\n o.share_holder_list = d['share_holder_list']\n return o\n\n\n", "step-ids": [ 14, 18, 19, 20, 22 ] }
[ 14, 18, 19, 20, 22 ]
#Week 5 #Task 1.1 a = 13 b = 14 calculation = a + 1 <=b calculation2 = a + 1 >=b calculation3 = a + 1 !=b print (calculation) print (calculation2) print (calculation3) #Task 1.2 myage = input("How old are you : ") print ("Hi there, You are " +myage+ " years old") #Task 1.3 num1 = input("Enter the first number : ") num2 = input("Enter the second number : ") result = num1 + num2 print ("The result is " +result) #Task 1.4 print ("average: %.2f" % ((3 + 11 + 78 + 112 + 4 + 18) / 6)) #Task 1.5 num1 = int(input ("Enter a number : ")) remainder = num1 % 7 print (remainder) #Task 1.6 num1 = int(input ("Enter a number : ")) remainder = num1 % 7 print (remainder) num2 = 7 num3 = num1 / num2 print (num3) #Task 1.8 userinput = input("Enter Y to quit : ") if userinput == 'Y': print ("Goodbye") elif userinput == 'y': print ("Goodbye") else: print ("Round 2 ~ Fight!") #Task 1.9a x = int(input ("Enter a number : ")) if (x) >0: print(x) #Task 1.9b if 1 + x > x ** sqrt(2) : y = y + x #Task 1.9c x = 1 y = 5 if x == 1: y += 1 print (x) print (y) #Task 1.9d letterGrade = int(input("Enter your grade : ")) if letterGrade >= 90: print ("A") elif letterGrade >= 80: print ("B") elif letterGrade >= 70: print ("C") elif letterGrade >= 60: print ("D") elif letterGrade <= 40: print ("F") #Task 1.10 richter = float(input ("Enter magnitude on richter scale : ")) if richter >= 8.0: print ("Most structures fall") elif richter >= 7.0: print ("many buildings destroyed") elif richter >= 6.0: print ("Many buildings considerbly damaged, some collapse") elif richter >= 4.5: print ("Damage to poorly constructed buildings.") elif richter <= 4.4: print ("No destruction of buildings.") #Task 1.11 user = input("Enter a username : ") print ("Welcome " + user + " Please select a password") password = input("Enter a password : ") count = 0 while count <= 4: if count == 4: print ("Access denied,Please press enter to exit and contact security to reset your password") elif (len(password)<8): input("Password needs to be more than 8 characters, Please try again : ") elif (len(password)>=8): print ("Password changed successfully") break count += 1 #Task 1.12 for i in range(3): for j in range(1, 4): print (i + j, end="") print () #Task 1.13 for i in range (1,6): print("%d %d %d %d %d" % ((i**1),(i**2),(i**3),(i**4),(i**5)))
normal
{ "blob_id": "03f73a55e0a0773bbdbb0d5e29a2db598ba2e080", "index": 149, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint(calculation)\nprint(calculation2)\nprint(calculation3)\n<mask token>\nprint('Hi there, You are ' + myage + ' years old')\n<mask token>\nprint('The result is ' + result)\nprint('average: %.2f' % ((3 + 11 + 78 + 112 + 4 + 18) / 6))\n<mask token>\nprint(remainder)\n<mask token>\nprint(remainder)\n<mask token>\nprint(num3)\n<mask token>\nif userinput == 'Y':\n print('Goodbye')\nelif userinput == 'y':\n print('Goodbye')\nelse:\n print('Round 2 ~ Fight!')\n<mask token>\nif x > 0:\n print(x)\nif 1 + x > x ** sqrt(2):\n y = y + x\n<mask token>\nif x == 1:\n y += 1\nprint(x)\nprint(y)\n<mask token>\nif letterGrade >= 90:\n print('A')\nelif letterGrade >= 80:\n print('B')\nelif letterGrade >= 70:\n print('C')\nelif letterGrade >= 60:\n print('D')\nelif letterGrade <= 40:\n print('F')\n<mask token>\nif richter >= 8.0:\n print('Most structures fall')\nelif richter >= 7.0:\n print('many buildings destroyed')\nelif richter >= 6.0:\n print('Many buildings considerbly damaged, some collapse')\nelif richter >= 4.5:\n print('Damage to poorly constructed buildings.')\nelif richter <= 4.4:\n print('No destruction of buildings.')\n<mask token>\nprint('Welcome ' + user + ' Please select a password')\n<mask token>\nwhile count <= 4:\n if count == 4:\n print(\n 'Access denied,Please press enter to exit and contact security to reset your password'\n )\n elif len(password) < 8:\n input(\n 'Password needs to be more than 8 characters, Please try again : ')\n elif len(password) >= 8:\n print('Password changed successfully')\n break\ncount += 1\nfor i in range(3):\n for j in range(1, 4):\n print(i + j, end='')\n print()\nfor i in range(1, 6):\n print('%d %d %d %d %d' % (i ** 1, i ** 2, i ** 3, i ** 4, i ** 5))\n", "step-3": "a = 13\nb = 14\ncalculation = a + 1 <= b\ncalculation2 = a + 1 >= b\ncalculation3 = a + 1 != b\nprint(calculation)\nprint(calculation2)\nprint(calculation3)\nmyage = input('How old are you : ')\nprint('Hi there, You are ' + myage + ' years old')\nnum1 = input('Enter the first number : ')\nnum2 = input('Enter the second number : ')\nresult = num1 + num2\nprint('The result is ' + result)\nprint('average: %.2f' % ((3 + 11 + 78 + 112 + 4 + 18) / 6))\nnum1 = int(input('Enter a number : '))\nremainder = num1 % 7\nprint(remainder)\nnum1 = int(input('Enter a number : '))\nremainder = num1 % 7\nprint(remainder)\nnum2 = 7\nnum3 = num1 / num2\nprint(num3)\nuserinput = input('Enter Y to quit : ')\nif userinput == 'Y':\n print('Goodbye')\nelif userinput == 'y':\n print('Goodbye')\nelse:\n print('Round 2 ~ Fight!')\nx = int(input('Enter a number : '))\nif x > 0:\n print(x)\nif 1 + x > x ** sqrt(2):\n y = y + x\nx = 1\ny = 5\nif x == 1:\n y += 1\nprint(x)\nprint(y)\nletterGrade = int(input('Enter your grade : '))\nif letterGrade >= 90:\n print('A')\nelif letterGrade >= 80:\n print('B')\nelif letterGrade >= 70:\n print('C')\nelif letterGrade >= 60:\n print('D')\nelif letterGrade <= 40:\n print('F')\nrichter = float(input('Enter magnitude on richter scale : '))\nif richter >= 8.0:\n print('Most structures fall')\nelif richter >= 7.0:\n print('many buildings destroyed')\nelif richter >= 6.0:\n print('Many buildings considerbly damaged, some collapse')\nelif richter >= 4.5:\n print('Damage to poorly constructed buildings.')\nelif richter <= 4.4:\n print('No destruction of buildings.')\nuser = input('Enter a username : ')\nprint('Welcome ' + user + ' Please select a password')\npassword = input('Enter a password : ')\ncount = 0\nwhile count <= 4:\n if count == 4:\n print(\n 'Access denied,Please press enter to exit and contact security to reset your password'\n )\n elif len(password) < 8:\n input(\n 'Password needs to be more than 8 characters, Please try again : ')\n elif len(password) >= 8:\n print('Password changed successfully')\n break\ncount += 1\nfor i in range(3):\n for j in range(1, 4):\n print(i + j, end='')\n print()\nfor i in range(1, 6):\n print('%d %d %d %d %d' % (i ** 1, i ** 2, i ** 3, i ** 4, i ** 5))\n", "step-4": "#Week 5\n#Task 1.1\na = 13\nb = 14\ncalculation = a + 1 <=b\ncalculation2 = a + 1 >=b\ncalculation3 = a + 1 !=b\nprint (calculation)\nprint (calculation2)\nprint (calculation3)\n#Task 1.2\nmyage = input(\"How old are you : \")\nprint (\"Hi there, You are \" +myage+ \" years old\")\n#Task 1.3\nnum1 = input(\"Enter the first number : \")\nnum2 = input(\"Enter the second number : \")\nresult = num1 + num2\nprint (\"The result is \" +result)\n#Task 1.4\nprint (\"average: %.2f\" % ((3 + 11 + 78 + 112 + 4 + 18) / 6))\n#Task 1.5\nnum1 = int(input (\"Enter a number : \"))\nremainder = num1 % 7\nprint (remainder)\n#Task 1.6\nnum1 = int(input (\"Enter a number : \"))\nremainder = num1 % 7\nprint (remainder)\nnum2 = 7\nnum3 = num1 / num2\nprint (num3)\n#Task 1.8\nuserinput = input(\"Enter Y to quit : \")\nif userinput == 'Y':\n print (\"Goodbye\")\nelif userinput == 'y':\n print (\"Goodbye\")\nelse:\n print (\"Round 2 ~ Fight!\")\n#Task 1.9a\nx = int(input (\"Enter a number : \"))\nif (x) >0:\n print(x)\n#Task 1.9b\nif 1 + x > x ** sqrt(2) : y = y + x\n#Task 1.9c\nx = 1\ny = 5\nif x == 1:\n y += 1\nprint (x)\nprint (y)\n#Task 1.9d\nletterGrade = int(input(\"Enter your grade : \"))\nif letterGrade >= 90: print (\"A\")\nelif letterGrade >= 80: print (\"B\")\nelif letterGrade >= 70: print (\"C\")\nelif letterGrade >= 60: print (\"D\")\nelif letterGrade <= 40: print (\"F\")\n#Task 1.10\nrichter = float(input (\"Enter magnitude on richter scale : \"))\nif richter >= 8.0: print (\"Most structures fall\")\nelif richter >= 7.0: print (\"many buildings destroyed\")\nelif richter >= 6.0: print (\"Many buildings considerbly damaged, some collapse\")\nelif richter >= 4.5: print (\"Damage to poorly constructed buildings.\")\nelif richter <= 4.4: print (\"No destruction of buildings.\")\n#Task 1.11\nuser = input(\"Enter a username : \")\nprint (\"Welcome \" + user + \" Please select a password\")\npassword = input(\"Enter a password : \")\ncount = 0\n\n\nwhile count <= 4:\n if count == 4:\n print (\"Access denied,Please press enter to exit and contact security to reset your password\")\n elif (len(password)<8):\n input(\"Password needs to be more than 8 characters, Please try again : \")\n elif (len(password)>=8):\n print (\"Password changed successfully\")\n break\n \ncount += 1\n\n#Task 1.12\nfor i in range(3):\n for j in range(1, 4):\n print (i + j, end=\"\")\n print ()\n \n#Task 1.13\n \nfor i in range (1,6):\n print(\"%d %d %d %d %d\" % ((i**1),(i**2),(i**3),(i**4),(i**5)))\n\n\n\n \n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
import pickle from sklearn import linear_model from sklearn.model_selection import train_test_split import random from sklearn.manifold import TSNE import matplotlib def loadXY(): zippedXY = pickle.load(open("../Vectorizer/zippedXY_wff_2k.p","rb")) #random.shuffle(zippedXY) X,Y = zip(*zippedXY) return X,Y def outliers(X,Y): from sklearn.ensemble import IsolationForest out = IsolationForest() out.fit(X,Y) outliers = list(out.predict(X)) print "Total outliers : ",outliers if __name__ == "__main__": X,Y = loadXY() print "X and Y loaded" Ynum = [] # converting labels to num label2num = {} label2num["ANGER"],label2num["SADNESS"],label2num["JOY"],label2num["FEAR"],label2num["SURPRISE"] = 0,1,2,3,4 for yy in range(len(Y)): Ynum.append(label2num[Y[yy]]) print Ynum.index(0) print Ynum.index(1) print Ynum.index(2) print Ynum.index(3) print Ynum.index(4) """ ########## 2D PLOT #################### # Fitting the tsne with data tsne = TSNE(n_components=2, verbose=1) tsne_fit = tsne.fit_transform(X) # Saving and loading the fitted tsne import pickle pickle.dump(tsne_fit,open("tsne_fit_wff_2k.p","wb")) tsne_fit = pickle.load(open("tsne_fit_wff_2k.p","rb")) """ """ # Visualize the data from matplotlib import pyplot as plt xx = tsne_fit[:, 0] yy = tsne_fit[:, 1] colors = ['red','green','blue','black','yellow'] plt.scatter(xx, yy, c=Ynum, edgecolors='none',cmap=matplotlib.colors.ListedColormap(colors)) #plt.show() # Saving the plot in Plots/ folder plt.draw() plt.savefig("wff_2k_visualise.png") #outliers(X,Ynum) """ ################## 3D PLOT ############################# # Fitting the tsne with data tsne = TSNE(n_components=3, verbose=1) tsne_fit = tsne.fit_transform(X) # Saving and loading the fitted tsne import pickle pickle.dump(tsne_fit,open("tsne_fit_wff_2k_3d.p","wb")) tsne_fit = pickle.load(open("tsne_fit_wff_2k_3d.p","rb")) """ """ # Visualize the data from matplotlib import pyplot as plt xx = tsne_fit[:, 0] yy = tsne_fit[:, 1] zz = tsne_fit[:, 2] colors = ['red','green','blue','black','yellow'] from mpl_toolkits.mplot3d import Axes3D fig = plt.figure() ax = fig.add_subplot(111, projection='3d') print Ynum ax.scatter(xx, yy,zz, c=Ynum, edgecolors='none',cmap=matplotlib.colors.ListedColormap(colors)) #plt.show() # Saving the plot in Plots/ folder plt.draw() plt.savefig("wff_2k_visualise_3d__new.png") #outliers(X,Ynum)
normal
{ "blob_id": "cb13011def8fc7ed6a2e98a794343857e3e34562", "index": 3142, "step-1": "import pickle\nfrom sklearn import linear_model\nfrom sklearn.model_selection import train_test_split\nimport random\nfrom sklearn.manifold import TSNE\nimport matplotlib\n\ndef loadXY():\n\tzippedXY = pickle.load(open(\"../Vectorizer/zippedXY_wff_2k.p\",\"rb\"))\n\t#random.shuffle(zippedXY)\n\tX,Y = zip(*zippedXY)\n\treturn X,Y\n\ndef outliers(X,Y):\n\tfrom sklearn.ensemble import IsolationForest\n\tout = IsolationForest()\n\tout.fit(X,Y)\n\toutliers = list(out.predict(X))\n\tprint \"Total outliers : \",outliers\n\n\n\nif __name__ == \"__main__\":\n\n\tX,Y = loadXY()\n\tprint \"X and Y loaded\"\n\tYnum = []\n\t# converting labels to num\n\tlabel2num = {}\n\tlabel2num[\"ANGER\"],label2num[\"SADNESS\"],label2num[\"JOY\"],label2num[\"FEAR\"],label2num[\"SURPRISE\"] = 0,1,2,3,4\n\n\tfor yy in range(len(Y)):\n\t\tYnum.append(label2num[Y[yy]])\n\tprint Ynum.index(0)\n\tprint Ynum.index(1)\n\tprint Ynum.index(2)\n\tprint Ynum.index(3)\n\tprint Ynum.index(4)\n\t\"\"\"\n\t########## 2D PLOT ####################\n\t# Fitting the tsne with data\n\ttsne = TSNE(n_components=2, verbose=1) \n\ttsne_fit = tsne.fit_transform(X)\n\n\t\n\t# Saving and loading the fitted tsne\n\timport pickle\n\tpickle.dump(tsne_fit,open(\"tsne_fit_wff_2k.p\",\"wb\"))\n\ttsne_fit = pickle.load(open(\"tsne_fit_wff_2k.p\",\"rb\"))\n\t\"\"\"\n\t\"\"\"\n\t# Visualize the data\n\tfrom matplotlib import pyplot as plt\n\txx = tsne_fit[:, 0]\n\tyy = tsne_fit[:, 1]\n\tcolors = ['red','green','blue','black','yellow']\n\tplt.scatter(xx, yy, c=Ynum, edgecolors='none',cmap=matplotlib.colors.ListedColormap(colors))\n\t#plt.show()\n\t\n\n\t# Saving the plot in Plots/ folder\n\tplt.draw()\n\tplt.savefig(\"wff_2k_visualise.png\")\n\t#outliers(X,Ynum)\n\t\"\"\"\n\n\t################## 3D PLOT #############################\n\t# Fitting the tsne with data\n\ttsne = TSNE(n_components=3, verbose=1) \n\ttsne_fit = tsne.fit_transform(X)\n\n\t\n\t# Saving and loading the fitted tsne\n\timport pickle\n\tpickle.dump(tsne_fit,open(\"tsne_fit_wff_2k_3d.p\",\"wb\"))\n\ttsne_fit = pickle.load(open(\"tsne_fit_wff_2k_3d.p\",\"rb\"))\n\t\"\"\"\n\t\"\"\"\n\t# Visualize the data\n\tfrom matplotlib import pyplot as plt\n\txx = tsne_fit[:, 0]\n\tyy = tsne_fit[:, 1]\n\tzz = tsne_fit[:, 2]\n\tcolors = ['red','green','blue','black','yellow']\n\tfrom mpl_toolkits.mplot3d import Axes3D\n\tfig = plt.figure()\n\tax = fig.add_subplot(111, projection='3d')\n\tprint Ynum\n\tax.scatter(xx, yy,zz, c=Ynum, edgecolors='none',cmap=matplotlib.colors.ListedColormap(colors))\n\t#plt.show()\n\t\n\n\t# Saving the plot in Plots/ folder\n\tplt.draw()\n\tplt.savefig(\"wff_2k_visualise_3d__new.png\")\n\t#outliers(X,Ynum)\n\n\t\n\t\n\t\n\n\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
import datetime class assignmentObject: def __init__(self, name, day): self.name = name self.day = day
normal
{ "blob_id": "1673214215043644e1a878ed7c30b69064f1a022", "index": 5375, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass assignmentObject:\n <mask token>\n", "step-3": "<mask token>\n\n\nclass assignmentObject:\n\n def __init__(self, name, day):\n self.name = name\n self.day = day\n", "step-4": "import datetime\n\n\nclass assignmentObject:\n\n def __init__(self, name, day):\n self.name = name\n self.day = day\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
#!/usr/bin/env python import rospy from racecar_control.msg import drive_param import curses forward = 0; left = 0; stdscr = curses.initscr() curses.cbreak() stdscr.keypad(1) rospy.init_node('keyop', anonymous=True) pub = rospy.Publisher('drive_parameters', drive_param, queue_size=10) stdscr.refresh() key = '' while key != ord('q'): key = stdscr.getch() stdscr.refresh() if key == curses.KEY_UP: forward = forward + 1; if forward >= 40: forward = 40 elif forward < -40: forward = -40 stdscr.addstr(2, 20, "Up ") stdscr.addstr(2, 25, '%.2f' % forward) stdscr.addstr(5, 20, " ") elif key == curses.KEY_DOWN: forward = forward - 1; if forward >= 40: forward = 40 elif forward < -40: forward = -40 stdscr.addstr(2, 20, "Down") stdscr.addstr(2, 25, '%.2f' % forward) stdscr.addstr(5, 20, " ") if key == curses.KEY_LEFT: left = left + 0.1; if left >= 0.78: left = 0.78 elif left < -0.78: left = -0.78 stdscr.addstr(3, 20, "left") stdscr.addstr(3, 25, '%.2f' % left) stdscr.addstr(5, 20, " ") elif key == curses.KEY_RIGHT: left = left - 0.1; if left >= 0.78: left = 0.78 elif left < -0.78: left = -0.78 stdscr.addstr(3, 20, "rgt ") stdscr.addstr(3, 25, '%.2f' % left) stdscr.addstr(5, 20, " ") if key == curses.KEY_DC: left = 0 forward = 0 stdscr.addstr(5, 20, "Stop") msg = drive_param() msg.velocity = forward msg.angle = left pub.publish(msg) curses.endwin()
normal
{ "blob_id": "fb332808890e369d1439d1dba61244a0f7b89301", "index": 4524, "step-1": "<mask token>\n", "step-2": "<mask token>\ncurses.cbreak()\nstdscr.keypad(1)\nrospy.init_node('keyop', anonymous=True)\n<mask token>\nstdscr.refresh()\n<mask token>\nwhile key != ord('q'):\n key = stdscr.getch()\n stdscr.refresh()\n if key == curses.KEY_UP:\n forward = forward + 1\n if forward >= 40:\n forward = 40\n elif forward < -40:\n forward = -40\n stdscr.addstr(2, 20, 'Up ')\n stdscr.addstr(2, 25, '%.2f' % forward)\n stdscr.addstr(5, 20, ' ')\n elif key == curses.KEY_DOWN:\n forward = forward - 1\n if forward >= 40:\n forward = 40\n elif forward < -40:\n forward = -40\n stdscr.addstr(2, 20, 'Down')\n stdscr.addstr(2, 25, '%.2f' % forward)\n stdscr.addstr(5, 20, ' ')\n if key == curses.KEY_LEFT:\n left = left + 0.1\n if left >= 0.78:\n left = 0.78\n elif left < -0.78:\n left = -0.78\n stdscr.addstr(3, 20, 'left')\n stdscr.addstr(3, 25, '%.2f' % left)\n stdscr.addstr(5, 20, ' ')\n elif key == curses.KEY_RIGHT:\n left = left - 0.1\n if left >= 0.78:\n left = 0.78\n elif left < -0.78:\n left = -0.78\n stdscr.addstr(3, 20, 'rgt ')\n stdscr.addstr(3, 25, '%.2f' % left)\n stdscr.addstr(5, 20, ' ')\n if key == curses.KEY_DC:\n left = 0\n forward = 0\n stdscr.addstr(5, 20, 'Stop')\n msg = drive_param()\n msg.velocity = forward\n msg.angle = left\n pub.publish(msg)\ncurses.endwin()\n", "step-3": "<mask token>\nforward = 0\nleft = 0\nstdscr = curses.initscr()\ncurses.cbreak()\nstdscr.keypad(1)\nrospy.init_node('keyop', anonymous=True)\npub = rospy.Publisher('drive_parameters', drive_param, queue_size=10)\nstdscr.refresh()\nkey = ''\nwhile key != ord('q'):\n key = stdscr.getch()\n stdscr.refresh()\n if key == curses.KEY_UP:\n forward = forward + 1\n if forward >= 40:\n forward = 40\n elif forward < -40:\n forward = -40\n stdscr.addstr(2, 20, 'Up ')\n stdscr.addstr(2, 25, '%.2f' % forward)\n stdscr.addstr(5, 20, ' ')\n elif key == curses.KEY_DOWN:\n forward = forward - 1\n if forward >= 40:\n forward = 40\n elif forward < -40:\n forward = -40\n stdscr.addstr(2, 20, 'Down')\n stdscr.addstr(2, 25, '%.2f' % forward)\n stdscr.addstr(5, 20, ' ')\n if key == curses.KEY_LEFT:\n left = left + 0.1\n if left >= 0.78:\n left = 0.78\n elif left < -0.78:\n left = -0.78\n stdscr.addstr(3, 20, 'left')\n stdscr.addstr(3, 25, '%.2f' % left)\n stdscr.addstr(5, 20, ' ')\n elif key == curses.KEY_RIGHT:\n left = left - 0.1\n if left >= 0.78:\n left = 0.78\n elif left < -0.78:\n left = -0.78\n stdscr.addstr(3, 20, 'rgt ')\n stdscr.addstr(3, 25, '%.2f' % left)\n stdscr.addstr(5, 20, ' ')\n if key == curses.KEY_DC:\n left = 0\n forward = 0\n stdscr.addstr(5, 20, 'Stop')\n msg = drive_param()\n msg.velocity = forward\n msg.angle = left\n pub.publish(msg)\ncurses.endwin()\n", "step-4": "import rospy\nfrom racecar_control.msg import drive_param\nimport curses\nforward = 0\nleft = 0\nstdscr = curses.initscr()\ncurses.cbreak()\nstdscr.keypad(1)\nrospy.init_node('keyop', anonymous=True)\npub = rospy.Publisher('drive_parameters', drive_param, queue_size=10)\nstdscr.refresh()\nkey = ''\nwhile key != ord('q'):\n key = stdscr.getch()\n stdscr.refresh()\n if key == curses.KEY_UP:\n forward = forward + 1\n if forward >= 40:\n forward = 40\n elif forward < -40:\n forward = -40\n stdscr.addstr(2, 20, 'Up ')\n stdscr.addstr(2, 25, '%.2f' % forward)\n stdscr.addstr(5, 20, ' ')\n elif key == curses.KEY_DOWN:\n forward = forward - 1\n if forward >= 40:\n forward = 40\n elif forward < -40:\n forward = -40\n stdscr.addstr(2, 20, 'Down')\n stdscr.addstr(2, 25, '%.2f' % forward)\n stdscr.addstr(5, 20, ' ')\n if key == curses.KEY_LEFT:\n left = left + 0.1\n if left >= 0.78:\n left = 0.78\n elif left < -0.78:\n left = -0.78\n stdscr.addstr(3, 20, 'left')\n stdscr.addstr(3, 25, '%.2f' % left)\n stdscr.addstr(5, 20, ' ')\n elif key == curses.KEY_RIGHT:\n left = left - 0.1\n if left >= 0.78:\n left = 0.78\n elif left < -0.78:\n left = -0.78\n stdscr.addstr(3, 20, 'rgt ')\n stdscr.addstr(3, 25, '%.2f' % left)\n stdscr.addstr(5, 20, ' ')\n if key == curses.KEY_DC:\n left = 0\n forward = 0\n stdscr.addstr(5, 20, 'Stop')\n msg = drive_param()\n msg.velocity = forward\n msg.angle = left\n pub.publish(msg)\ncurses.endwin()\n", "step-5": "#!/usr/bin/env python\n\nimport rospy\nfrom racecar_control.msg import drive_param\nimport curses\n\nforward = 0;\nleft = 0;\n\n\nstdscr = curses.initscr()\ncurses.cbreak()\nstdscr.keypad(1)\nrospy.init_node('keyop', anonymous=True)\n\npub = rospy.Publisher('drive_parameters', drive_param, queue_size=10)\n\n\nstdscr.refresh()\n\nkey = ''\nwhile key != ord('q'):\n\n\tkey = stdscr.getch()\n\tstdscr.refresh()\n\n\tif key == curses.KEY_UP: \n\t\tforward = forward + 1;\n\t\tif forward >= 40:\n\t\t\tforward = 40\n\t\telif forward < -40:\n\t\t\tforward = -40\n\t\tstdscr.addstr(2, 20, \"Up \")\n\t\tstdscr.addstr(2, 25, '%.2f' % forward)\n\t\tstdscr.addstr(5, 20, \" \")\n\telif key == curses.KEY_DOWN:\n\t\tforward = forward - 1; \n\t\tif forward >= 40:\n\t\t\tforward = 40\n\t\telif forward < -40:\n\t\t\tforward = -40\n\t\tstdscr.addstr(2, 20, \"Down\")\n\t\tstdscr.addstr(2, 25, '%.2f' % forward)\n\t\tstdscr.addstr(5, 20, \" \")\n\tif key == curses.KEY_LEFT: \t\t\t\n\t\tleft = left + 0.1; \n\t\tif left >= 0.78:\n\t\t\tleft = 0.78\n\t\telif left < -0.78:\n\t\t\tleft = -0.78\n\t\tstdscr.addstr(3, 20, \"left\")\n\t\tstdscr.addstr(3, 25, '%.2f' % left)\n\t\tstdscr.addstr(5, 20, \" \")\n\telif key == curses.KEY_RIGHT:\n\t\tleft = left - 0.1; \n\t\tif left >= 0.78:\n\t\t\tleft = 0.78\n\t\telif left < -0.78:\n\t\t\tleft = -0.78\n\t\tstdscr.addstr(3, 20, \"rgt \")\n\t\tstdscr.addstr(3, 25, '%.2f' % left)\n\t\tstdscr.addstr(5, 20, \" \")\n\tif key == curses.KEY_DC:\n\t\tleft = 0\n\t\tforward = 0\n\t\tstdscr.addstr(5, 20, \"Stop\")\n\tmsg = drive_param()\n\tmsg.velocity = forward\n\tmsg.angle = left\n\tpub.publish(msg)\ncurses.endwin()\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
# -*- coding: utf-8 -*- print ("—— 七、Python爬虫实战演练:爬取百度百科1000个页面的数据 ——"); print ("—— 7.2、调度程序 ——"); print ("————— Python爬虫:1、总教程程序 ———————————————"); from Reptilian.baike_spider import url_manager, html_downloader, html_parser, html_outputer class SpiderMain(object): # 构造函数初始化各个对象 def __init__(self): self.urls = url_manager.UrlManager() # Url管理器 self.downloader = html_downloader.HtmlDownloader() # 下载器 self.parser = html_parser.HtmlParser() # 解析器 self.outputer = html_outputer.HtmlOutputer() # 输出器 # 爬虫的调度程序 def craw(self, root_url): # 添加辅助信息,用count判断当前爬取的是第几个url count = 1 # 入口url添加到管理器 self.urls.add_new_url(root_url) # (如果有待爬取的url)遍历url管理器获取url while self.urls.has_new_url(): try: # 获取一个待爬取的url(当前爬取的url) new_url = self.urls.get_new_url() print ('craw %d : %s' % (count, new_url) ) # 启动下载器下载页面(页面数据) html_cont = self.downloader.download(new_url) # 下载好页面,调用解析器解析页面数据-->得到新的url列表和新的数据 new_urls, new_data = self.parser.parse(new_url, html_cont) # url添加进url管理器;收集数据 self.urls.add_new_urls(new_urls) self.outputer.collect_data(new_data) if count == 20: break count = count + 1 except: print ('craw failed') # 输出收集好的数据 self.outputer.output_html() # 1、编写main函数 if __name__=="__main__": # 编写入口url root_url = "https://baike.baidu.com/item/Python/407313" # 创建spider obj_spider = SpiderMain() # 启动爬虫 obj_spider.craw(root_url)
normal
{ "blob_id": "e99a81a5600aad6111bb2694cbda02021ccfd71c", "index": 2817, "step-1": "<mask token>\n\n\nclass SpiderMain(object):\n <mask token>\n\n def craw(self, root_url):\n count = 1\n self.urls.add_new_url(root_url)\n while self.urls.has_new_url():\n try:\n new_url = self.urls.get_new_url()\n print('craw %d : %s' % (count, new_url))\n html_cont = self.downloader.download(new_url)\n new_urls, new_data = self.parser.parse(new_url, html_cont)\n self.urls.add_new_urls(new_urls)\n self.outputer.collect_data(new_data)\n if count == 20:\n break\n count = count + 1\n except:\n print('craw failed')\n self.outputer.output_html()\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass SpiderMain(object):\n\n def __init__(self):\n self.urls = url_manager.UrlManager()\n self.downloader = html_downloader.HtmlDownloader()\n self.parser = html_parser.HtmlParser()\n self.outputer = html_outputer.HtmlOutputer()\n\n def craw(self, root_url):\n count = 1\n self.urls.add_new_url(root_url)\n while self.urls.has_new_url():\n try:\n new_url = self.urls.get_new_url()\n print('craw %d : %s' % (count, new_url))\n html_cont = self.downloader.download(new_url)\n new_urls, new_data = self.parser.parse(new_url, html_cont)\n self.urls.add_new_urls(new_urls)\n self.outputer.collect_data(new_data)\n if count == 20:\n break\n count = count + 1\n except:\n print('craw failed')\n self.outputer.output_html()\n\n\n<mask token>\n", "step-3": "print('—— 七、Python爬虫实战演练:爬取百度百科1000个页面的数据 ——')\nprint('—— 7.2、调度程序 ——')\nprint('————— Python爬虫:1、总教程程序 ———————————————')\n<mask token>\n\n\nclass SpiderMain(object):\n\n def __init__(self):\n self.urls = url_manager.UrlManager()\n self.downloader = html_downloader.HtmlDownloader()\n self.parser = html_parser.HtmlParser()\n self.outputer = html_outputer.HtmlOutputer()\n\n def craw(self, root_url):\n count = 1\n self.urls.add_new_url(root_url)\n while self.urls.has_new_url():\n try:\n new_url = self.urls.get_new_url()\n print('craw %d : %s' % (count, new_url))\n html_cont = self.downloader.download(new_url)\n new_urls, new_data = self.parser.parse(new_url, html_cont)\n self.urls.add_new_urls(new_urls)\n self.outputer.collect_data(new_data)\n if count == 20:\n break\n count = count + 1\n except:\n print('craw failed')\n self.outputer.output_html()\n\n\nif __name__ == '__main__':\n root_url = 'https://baike.baidu.com/item/Python/407313'\n obj_spider = SpiderMain()\n obj_spider.craw(root_url)\n", "step-4": "print('—— 七、Python爬虫实战演练:爬取百度百科1000个页面的数据 ——')\nprint('—— 7.2、调度程序 ——')\nprint('————— Python爬虫:1、总教程程序 ———————————————')\nfrom Reptilian.baike_spider import url_manager, html_downloader, html_parser, html_outputer\n\n\nclass SpiderMain(object):\n\n def __init__(self):\n self.urls = url_manager.UrlManager()\n self.downloader = html_downloader.HtmlDownloader()\n self.parser = html_parser.HtmlParser()\n self.outputer = html_outputer.HtmlOutputer()\n\n def craw(self, root_url):\n count = 1\n self.urls.add_new_url(root_url)\n while self.urls.has_new_url():\n try:\n new_url = self.urls.get_new_url()\n print('craw %d : %s' % (count, new_url))\n html_cont = self.downloader.download(new_url)\n new_urls, new_data = self.parser.parse(new_url, html_cont)\n self.urls.add_new_urls(new_urls)\n self.outputer.collect_data(new_data)\n if count == 20:\n break\n count = count + 1\n except:\n print('craw failed')\n self.outputer.output_html()\n\n\nif __name__ == '__main__':\n root_url = 'https://baike.baidu.com/item/Python/407313'\n obj_spider = SpiderMain()\n obj_spider.craw(root_url)\n", "step-5": "# -*- coding: utf-8 -*-\nprint (\"—— 七、Python爬虫实战演练:爬取百度百科1000个页面的数据 ——\");\nprint (\"—— 7.2、调度程序 ——\");\n\nprint (\"————— Python爬虫:1、总教程程序 ———————————————\");\n\nfrom Reptilian.baike_spider import url_manager, html_downloader, html_parser, html_outputer\n\nclass SpiderMain(object):\n # 构造函数初始化各个对象\n def __init__(self):\n self.urls = url_manager.UrlManager() # Url管理器\n self.downloader = html_downloader.HtmlDownloader() # 下载器\n self.parser = html_parser.HtmlParser() # 解析器\n self.outputer = html_outputer.HtmlOutputer() # 输出器\n\n # 爬虫的调度程序\n def craw(self, root_url):\n # 添加辅助信息,用count判断当前爬取的是第几个url\n count = 1\n # 入口url添加到管理器\n self.urls.add_new_url(root_url)\n # (如果有待爬取的url)遍历url管理器获取url\n while self.urls.has_new_url():\n try:\n # 获取一个待爬取的url(当前爬取的url)\n new_url = self.urls.get_new_url()\n print ('craw %d : %s' % (count, new_url) )\n # 启动下载器下载页面(页面数据)\n html_cont = self.downloader.download(new_url)\n # 下载好页面,调用解析器解析页面数据-->得到新的url列表和新的数据\n new_urls, new_data = self.parser.parse(new_url, html_cont)\n # url添加进url管理器;收集数据\n self.urls.add_new_urls(new_urls)\n self.outputer.collect_data(new_data)\n\n if count == 20:\n break\n count = count + 1\n except:\n print ('craw failed')\n\n # 输出收集好的数据\n self.outputer.output_html()\n\n# 1、编写main函数\nif __name__==\"__main__\":\n # 编写入口url\n root_url = \"https://baike.baidu.com/item/Python/407313\"\n # 创建spider\n obj_spider = SpiderMain()\n # 启动爬虫\n obj_spider.craw(root_url)", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
""" bubble sort start at beginning switch to left if smaller - very naive approach n-1 comparisons, n-1 iterations (n-1)^2 worst case: O(n^2) = average case best case: O(n) space complexity: O(1) """ def bubbleSort(list): for num in range(len(list)-1,0,-1): for i in range(num): if list[i] > list[i+1]: temp = list[i] list[i] = list [i+1] list[i+1] = temp list = [12,34,2,45,6] bubbleSort(list) print(list)
normal
{ "blob_id": "29c25721a4754650f0d5d63d6cc3215cb0ea1b3e", "index": 7849, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef bubbleSort(list):\n for num in range(len(list) - 1, 0, -1):\n for i in range(num):\n if list[i] > list[i + 1]:\n temp = list[i]\n list[i] = list[i + 1]\n list[i + 1] = temp\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef bubbleSort(list):\n for num in range(len(list) - 1, 0, -1):\n for i in range(num):\n if list[i] > list[i + 1]:\n temp = list[i]\n list[i] = list[i + 1]\n list[i + 1] = temp\n\n\n<mask token>\nbubbleSort(list)\nprint(list)\n", "step-4": "<mask token>\n\n\ndef bubbleSort(list):\n for num in range(len(list) - 1, 0, -1):\n for i in range(num):\n if list[i] > list[i + 1]:\n temp = list[i]\n list[i] = list[i + 1]\n list[i + 1] = temp\n\n\nlist = [12, 34, 2, 45, 6]\nbubbleSort(list)\nprint(list)\n", "step-5": "\"\"\"\nbubble sort\nstart at beginning switch to left if smaller - very naive approach\nn-1 comparisons, n-1 iterations\n(n-1)^2\nworst case: O(n^2) = average case\nbest case: O(n)\nspace complexity: O(1)\n\"\"\"\ndef bubbleSort(list):\n for num in range(len(list)-1,0,-1):\n for i in range(num):\n if list[i] > list[i+1]:\n temp = list[i]\n list[i] = list [i+1]\n list[i+1] = temp\nlist = [12,34,2,45,6]\nbubbleSort(list)\nprint(list)\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
#!/usr/bin/env python """ This example shows how to create an unstructured grid. """ import vtk import numpy as np import pickle as pkl colors_list = pkl.load(open('permuted_colors.pkl','rb')) meta = pkl.load(open('v_atlas/meta_information.pkl','rb')) def main(): colors = vtk.vtkNamedColors() Data=np.load('tessaltions_compressed.npz') indices=meta['sorted_keys'] struct_D={} # a mapping of structure names to colors. for i,s in enumerate(set([x[0] for x in indices])): struct_D[s]=colors_list[i] renderer = vtk.vtkRenderer() renWin = vtk.vtkRenderWindow() renWin.AddRenderer(renderer) iren = vtk.vtkRenderWindowInteractor() iren.SetRenderWindow(renWin) for index in range(len(indices)): x=Data['points_'+str(index)] triangles = Data['triangles_'+str(index)] print(index,x.shape, triangles.shape,'\r',end='') points = vtk.vtkPoints() for i in range(0, x.shape[0]): points.InsertPoint(i, x[i,:]) ugrid = vtk.vtkUnstructuredGrid() ugrid.Allocate(triangles.shape[0]) for i in range(triangles.shape[0]): ugrid.InsertNextCell(vtk.VTK_TRIANGLE, 3, triangles[i,:]) ugrid.SetPoints(points) uGridNormals = vtk.vtkPolyDataNormals() uGridNormals.SetInputData(ugrid) uGridNormals.SetFeatureAngle(30.0) #uGridNormals.ComputePointNormalsOn() uGridNormals.SplittingOn() print(uGridNormals) uGridNormals.Update() # causes an error normalsPolyData = vtk.vtkPolyData() normalsPolyData.DeepCopy(uGridNormals.GetOutput()) ugridMapper = vtk.vtkPolyDataMapper() ugridMapper.SetInputData(normalsPolyData) ugridMapper.ScalarVisibilityOff() # ugridMapper = vtk.vtkDataSetMapper() # ugridMapper.SetInputData(ugrid) ugridActor = vtk.vtkActor() ugridActor.SetMapper(ugridMapper) # print(index,indices[index],struct_D[indices[index][0]]) color = struct_D[indices[index][0]] ugridActor.GetProperty().SetDiffuseColor(colors.GetColor3d(color)) ugridActor.GetProperty().SetDiffuse(.7) ugridActor.GetProperty().SetSpecularPower(20) ugridActor.GetProperty().SetSpecular(.5) ugridActor.GetProperty().EdgeVisibilityOff() ugridActor.GetProperty().SetOpacity(0.5) ugridActor.GetProperty().SetInterpolationToGouraud() renderer.AddActor(ugridActor) break renderer.SetBackground(colors.GetColor3d('Beige')) renderer.ResetCamera() renderer.GetActiveCamera().Elevation(60.0) renderer.GetActiveCamera().Azimuth(30.0) renderer.GetActiveCamera().Dolly(1.2) renWin.SetSize(640, 480) # Interact with the data. renWin.Render() iren.Start() if __name__ == "__main__": main()
normal
{ "blob_id": "7261c5f9ac87c8337383daec312372b345ab7652", "index": 4109, "step-1": "<mask token>\n\n\ndef main():\n colors = vtk.vtkNamedColors()\n Data = np.load('tessaltions_compressed.npz')\n indices = meta['sorted_keys']\n struct_D = {}\n for i, s in enumerate(set([x[0] for x in indices])):\n struct_D[s] = colors_list[i]\n renderer = vtk.vtkRenderer()\n renWin = vtk.vtkRenderWindow()\n renWin.AddRenderer(renderer)\n iren = vtk.vtkRenderWindowInteractor()\n iren.SetRenderWindow(renWin)\n for index in range(len(indices)):\n x = Data['points_' + str(index)]\n triangles = Data['triangles_' + str(index)]\n print(index, x.shape, triangles.shape, '\\r', end='')\n points = vtk.vtkPoints()\n for i in range(0, x.shape[0]):\n points.InsertPoint(i, x[i, :])\n ugrid = vtk.vtkUnstructuredGrid()\n ugrid.Allocate(triangles.shape[0])\n for i in range(triangles.shape[0]):\n ugrid.InsertNextCell(vtk.VTK_TRIANGLE, 3, triangles[i, :])\n ugrid.SetPoints(points)\n uGridNormals = vtk.vtkPolyDataNormals()\n uGridNormals.SetInputData(ugrid)\n uGridNormals.SetFeatureAngle(30.0)\n uGridNormals.SplittingOn()\n print(uGridNormals)\n uGridNormals.Update()\n normalsPolyData = vtk.vtkPolyData()\n normalsPolyData.DeepCopy(uGridNormals.GetOutput())\n ugridMapper = vtk.vtkPolyDataMapper()\n ugridMapper.SetInputData(normalsPolyData)\n ugridMapper.ScalarVisibilityOff()\n ugridActor = vtk.vtkActor()\n ugridActor.SetMapper(ugridMapper)\n color = struct_D[indices[index][0]]\n ugridActor.GetProperty().SetDiffuseColor(colors.GetColor3d(color))\n ugridActor.GetProperty().SetDiffuse(0.7)\n ugridActor.GetProperty().SetSpecularPower(20)\n ugridActor.GetProperty().SetSpecular(0.5)\n ugridActor.GetProperty().EdgeVisibilityOff()\n ugridActor.GetProperty().SetOpacity(0.5)\n ugridActor.GetProperty().SetInterpolationToGouraud()\n renderer.AddActor(ugridActor)\n break\n renderer.SetBackground(colors.GetColor3d('Beige'))\n renderer.ResetCamera()\n renderer.GetActiveCamera().Elevation(60.0)\n renderer.GetActiveCamera().Azimuth(30.0)\n renderer.GetActiveCamera().Dolly(1.2)\n renWin.SetSize(640, 480)\n renWin.Render()\n iren.Start()\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef main():\n colors = vtk.vtkNamedColors()\n Data = np.load('tessaltions_compressed.npz')\n indices = meta['sorted_keys']\n struct_D = {}\n for i, s in enumerate(set([x[0] for x in indices])):\n struct_D[s] = colors_list[i]\n renderer = vtk.vtkRenderer()\n renWin = vtk.vtkRenderWindow()\n renWin.AddRenderer(renderer)\n iren = vtk.vtkRenderWindowInteractor()\n iren.SetRenderWindow(renWin)\n for index in range(len(indices)):\n x = Data['points_' + str(index)]\n triangles = Data['triangles_' + str(index)]\n print(index, x.shape, triangles.shape, '\\r', end='')\n points = vtk.vtkPoints()\n for i in range(0, x.shape[0]):\n points.InsertPoint(i, x[i, :])\n ugrid = vtk.vtkUnstructuredGrid()\n ugrid.Allocate(triangles.shape[0])\n for i in range(triangles.shape[0]):\n ugrid.InsertNextCell(vtk.VTK_TRIANGLE, 3, triangles[i, :])\n ugrid.SetPoints(points)\n uGridNormals = vtk.vtkPolyDataNormals()\n uGridNormals.SetInputData(ugrid)\n uGridNormals.SetFeatureAngle(30.0)\n uGridNormals.SplittingOn()\n print(uGridNormals)\n uGridNormals.Update()\n normalsPolyData = vtk.vtkPolyData()\n normalsPolyData.DeepCopy(uGridNormals.GetOutput())\n ugridMapper = vtk.vtkPolyDataMapper()\n ugridMapper.SetInputData(normalsPolyData)\n ugridMapper.ScalarVisibilityOff()\n ugridActor = vtk.vtkActor()\n ugridActor.SetMapper(ugridMapper)\n color = struct_D[indices[index][0]]\n ugridActor.GetProperty().SetDiffuseColor(colors.GetColor3d(color))\n ugridActor.GetProperty().SetDiffuse(0.7)\n ugridActor.GetProperty().SetSpecularPower(20)\n ugridActor.GetProperty().SetSpecular(0.5)\n ugridActor.GetProperty().EdgeVisibilityOff()\n ugridActor.GetProperty().SetOpacity(0.5)\n ugridActor.GetProperty().SetInterpolationToGouraud()\n renderer.AddActor(ugridActor)\n break\n renderer.SetBackground(colors.GetColor3d('Beige'))\n renderer.ResetCamera()\n renderer.GetActiveCamera().Elevation(60.0)\n renderer.GetActiveCamera().Azimuth(30.0)\n renderer.GetActiveCamera().Dolly(1.2)\n renWin.SetSize(640, 480)\n renWin.Render()\n iren.Start()\n\n\nif __name__ == '__main__':\n main()\n", "step-3": "<mask token>\ncolors_list = pkl.load(open('permuted_colors.pkl', 'rb'))\nmeta = pkl.load(open('v_atlas/meta_information.pkl', 'rb'))\n\n\ndef main():\n colors = vtk.vtkNamedColors()\n Data = np.load('tessaltions_compressed.npz')\n indices = meta['sorted_keys']\n struct_D = {}\n for i, s in enumerate(set([x[0] for x in indices])):\n struct_D[s] = colors_list[i]\n renderer = vtk.vtkRenderer()\n renWin = vtk.vtkRenderWindow()\n renWin.AddRenderer(renderer)\n iren = vtk.vtkRenderWindowInteractor()\n iren.SetRenderWindow(renWin)\n for index in range(len(indices)):\n x = Data['points_' + str(index)]\n triangles = Data['triangles_' + str(index)]\n print(index, x.shape, triangles.shape, '\\r', end='')\n points = vtk.vtkPoints()\n for i in range(0, x.shape[0]):\n points.InsertPoint(i, x[i, :])\n ugrid = vtk.vtkUnstructuredGrid()\n ugrid.Allocate(triangles.shape[0])\n for i in range(triangles.shape[0]):\n ugrid.InsertNextCell(vtk.VTK_TRIANGLE, 3, triangles[i, :])\n ugrid.SetPoints(points)\n uGridNormals = vtk.vtkPolyDataNormals()\n uGridNormals.SetInputData(ugrid)\n uGridNormals.SetFeatureAngle(30.0)\n uGridNormals.SplittingOn()\n print(uGridNormals)\n uGridNormals.Update()\n normalsPolyData = vtk.vtkPolyData()\n normalsPolyData.DeepCopy(uGridNormals.GetOutput())\n ugridMapper = vtk.vtkPolyDataMapper()\n ugridMapper.SetInputData(normalsPolyData)\n ugridMapper.ScalarVisibilityOff()\n ugridActor = vtk.vtkActor()\n ugridActor.SetMapper(ugridMapper)\n color = struct_D[indices[index][0]]\n ugridActor.GetProperty().SetDiffuseColor(colors.GetColor3d(color))\n ugridActor.GetProperty().SetDiffuse(0.7)\n ugridActor.GetProperty().SetSpecularPower(20)\n ugridActor.GetProperty().SetSpecular(0.5)\n ugridActor.GetProperty().EdgeVisibilityOff()\n ugridActor.GetProperty().SetOpacity(0.5)\n ugridActor.GetProperty().SetInterpolationToGouraud()\n renderer.AddActor(ugridActor)\n break\n renderer.SetBackground(colors.GetColor3d('Beige'))\n renderer.ResetCamera()\n renderer.GetActiveCamera().Elevation(60.0)\n renderer.GetActiveCamera().Azimuth(30.0)\n renderer.GetActiveCamera().Dolly(1.2)\n renWin.SetSize(640, 480)\n renWin.Render()\n iren.Start()\n\n\nif __name__ == '__main__':\n main()\n", "step-4": "<mask token>\nimport vtk\nimport numpy as np\nimport pickle as pkl\ncolors_list = pkl.load(open('permuted_colors.pkl', 'rb'))\nmeta = pkl.load(open('v_atlas/meta_information.pkl', 'rb'))\n\n\ndef main():\n colors = vtk.vtkNamedColors()\n Data = np.load('tessaltions_compressed.npz')\n indices = meta['sorted_keys']\n struct_D = {}\n for i, s in enumerate(set([x[0] for x in indices])):\n struct_D[s] = colors_list[i]\n renderer = vtk.vtkRenderer()\n renWin = vtk.vtkRenderWindow()\n renWin.AddRenderer(renderer)\n iren = vtk.vtkRenderWindowInteractor()\n iren.SetRenderWindow(renWin)\n for index in range(len(indices)):\n x = Data['points_' + str(index)]\n triangles = Data['triangles_' + str(index)]\n print(index, x.shape, triangles.shape, '\\r', end='')\n points = vtk.vtkPoints()\n for i in range(0, x.shape[0]):\n points.InsertPoint(i, x[i, :])\n ugrid = vtk.vtkUnstructuredGrid()\n ugrid.Allocate(triangles.shape[0])\n for i in range(triangles.shape[0]):\n ugrid.InsertNextCell(vtk.VTK_TRIANGLE, 3, triangles[i, :])\n ugrid.SetPoints(points)\n uGridNormals = vtk.vtkPolyDataNormals()\n uGridNormals.SetInputData(ugrid)\n uGridNormals.SetFeatureAngle(30.0)\n uGridNormals.SplittingOn()\n print(uGridNormals)\n uGridNormals.Update()\n normalsPolyData = vtk.vtkPolyData()\n normalsPolyData.DeepCopy(uGridNormals.GetOutput())\n ugridMapper = vtk.vtkPolyDataMapper()\n ugridMapper.SetInputData(normalsPolyData)\n ugridMapper.ScalarVisibilityOff()\n ugridActor = vtk.vtkActor()\n ugridActor.SetMapper(ugridMapper)\n color = struct_D[indices[index][0]]\n ugridActor.GetProperty().SetDiffuseColor(colors.GetColor3d(color))\n ugridActor.GetProperty().SetDiffuse(0.7)\n ugridActor.GetProperty().SetSpecularPower(20)\n ugridActor.GetProperty().SetSpecular(0.5)\n ugridActor.GetProperty().EdgeVisibilityOff()\n ugridActor.GetProperty().SetOpacity(0.5)\n ugridActor.GetProperty().SetInterpolationToGouraud()\n renderer.AddActor(ugridActor)\n break\n renderer.SetBackground(colors.GetColor3d('Beige'))\n renderer.ResetCamera()\n renderer.GetActiveCamera().Elevation(60.0)\n renderer.GetActiveCamera().Azimuth(30.0)\n renderer.GetActiveCamera().Dolly(1.2)\n renWin.SetSize(640, 480)\n renWin.Render()\n iren.Start()\n\n\nif __name__ == '__main__':\n main()\n", "step-5": "#!/usr/bin/env python\n\n\"\"\"\nThis example shows how to create an unstructured grid.\n\"\"\"\n\nimport vtk\nimport numpy as np\nimport pickle as pkl\n\ncolors_list = pkl.load(open('permuted_colors.pkl','rb'))\nmeta = pkl.load(open('v_atlas/meta_information.pkl','rb'))\n\ndef main():\n colors = vtk.vtkNamedColors()\n\n Data=np.load('tessaltions_compressed.npz')\n\n indices=meta['sorted_keys']\n struct_D={} # a mapping of structure names to colors.\n for i,s in enumerate(set([x[0] for x in indices])):\n struct_D[s]=colors_list[i]\n \n renderer = vtk.vtkRenderer()\n\n renWin = vtk.vtkRenderWindow()\n renWin.AddRenderer(renderer)\n iren = vtk.vtkRenderWindowInteractor()\n iren.SetRenderWindow(renWin)\n\n for index in range(len(indices)):\n x=Data['points_'+str(index)]\n triangles = Data['triangles_'+str(index)]\n print(index,x.shape, triangles.shape,'\\r',end='')\n\n points = vtk.vtkPoints()\n for i in range(0, x.shape[0]):\n points.InsertPoint(i, x[i,:])\n\n ugrid = vtk.vtkUnstructuredGrid()\n ugrid.Allocate(triangles.shape[0])\n for i in range(triangles.shape[0]):\n ugrid.InsertNextCell(vtk.VTK_TRIANGLE, 3, triangles[i,:])\n\n ugrid.SetPoints(points)\n\n\n uGridNormals = vtk.vtkPolyDataNormals()\n uGridNormals.SetInputData(ugrid)\n uGridNormals.SetFeatureAngle(30.0)\n\n #uGridNormals.ComputePointNormalsOn()\n uGridNormals.SplittingOn()\n\n print(uGridNormals)\n uGridNormals.Update() # causes an error\n\n normalsPolyData = vtk.vtkPolyData()\n normalsPolyData.DeepCopy(uGridNormals.GetOutput())\n \n ugridMapper = vtk.vtkPolyDataMapper()\n ugridMapper.SetInputData(normalsPolyData)\n ugridMapper.ScalarVisibilityOff()\n \n # ugridMapper = vtk.vtkDataSetMapper()\n # ugridMapper.SetInputData(ugrid)\n\n ugridActor = vtk.vtkActor()\n ugridActor.SetMapper(ugridMapper)\n # print(index,indices[index],struct_D[indices[index][0]])\n color = struct_D[indices[index][0]]\n ugridActor.GetProperty().SetDiffuseColor(colors.GetColor3d(color))\n ugridActor.GetProperty().SetDiffuse(.7)\n ugridActor.GetProperty().SetSpecularPower(20)\n ugridActor.GetProperty().SetSpecular(.5)\n \n ugridActor.GetProperty().EdgeVisibilityOff()\n ugridActor.GetProperty().SetOpacity(0.5)\n ugridActor.GetProperty().SetInterpolationToGouraud()\n\n renderer.AddActor(ugridActor)\n break\n\n renderer.SetBackground(colors.GetColor3d('Beige'))\n\n renderer.ResetCamera()\n renderer.GetActiveCamera().Elevation(60.0)\n renderer.GetActiveCamera().Azimuth(30.0)\n renderer.GetActiveCamera().Dolly(1.2)\n\n renWin.SetSize(640, 480)\n\n # Interact with the data.\n renWin.Render()\n\n iren.Start()\n\n\nif __name__ == \"__main__\":\n main()\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
import Adafruit_GPIO.SPI as SPI import Adafruit_SSD1306 # Raspberry Pi pin configuration: RST = 24 # Note the following are only used with SPI: DC = 23 SPI_PORT = 0 SPI_DEVICE = 0 # Beaglebone Black pin configuration: # RST = 'P9_12' # Note the following are only used with SPI: # DC = 'P9_15' # SPI_PORT = 1 # SPI_DEVICE = 0 # 128x32 display with hardware I2C: #disp = Adafruit_SSD1306.SSD1306_128_32(rst=RST) # 128x64 display with hardware I2C: #disp = Adafruit_SSD1306.SSD1306_128_64(rst=RST) # Note you can change the I2C address by passing an i2c_address parameter like: # disp = Adafruit_SSD1306.SSD1306_128_64(rst=RST, i2c_address=0x3C) # Alternatively you can specify an explicit I2C bus number, for example # with the 128x32 display you would use: # disp = Adafruit_SSD1306.SSD1306_128_32(rst=RST, i2c_bus=2) # 128x32 display with hardware SPI: # disp = Adafruit_SSD1306.SSD1306_128_32(rst=RST, dc=DC, spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE, max_speed_hz=8000000)) # 128x64 display with hardware SPI: disp = Adafruit_SSD1306.SSD1306_128_64(rst=RST, dc=DC, spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE, max_speed_hz=8000000)) # Alternatively you can specify a software SPI implementation by providing # digital GPIO pin numbers for all the required display pins. For example # on a Raspberry Pi with the 128x32 display you might use: # disp = Adafruit_SSD1306.SSD1306_128_32(rst=RST, dc=DC, sclk=18, din=25, cs=22) # Initialize library. disp.begin() # Clear display. disp.clear() disp.display()
normal
{ "blob_id": "d8cbed25f4c97be5a74a6e1f097fcb9fa9439a9a", "index": 8160, "step-1": "<mask token>\n", "step-2": "<mask token>\ndisp.begin()\ndisp.clear()\ndisp.display()\n", "step-3": "<mask token>\nRST = 24\nDC = 23\nSPI_PORT = 0\nSPI_DEVICE = 0\ndisp = Adafruit_SSD1306.SSD1306_128_64(rst=RST, dc=DC, spi=SPI.SpiDev(\n SPI_PORT, SPI_DEVICE, max_speed_hz=8000000))\ndisp.begin()\ndisp.clear()\ndisp.display()\n", "step-4": "import Adafruit_GPIO.SPI as SPI\nimport Adafruit_SSD1306\nRST = 24\nDC = 23\nSPI_PORT = 0\nSPI_DEVICE = 0\ndisp = Adafruit_SSD1306.SSD1306_128_64(rst=RST, dc=DC, spi=SPI.SpiDev(\n SPI_PORT, SPI_DEVICE, max_speed_hz=8000000))\ndisp.begin()\ndisp.clear()\ndisp.display()\n", "step-5": "import Adafruit_GPIO.SPI as SPI\nimport Adafruit_SSD1306\n\n# Raspberry Pi pin configuration:\nRST = 24\n# Note the following are only used with SPI:\nDC = 23\nSPI_PORT = 0\nSPI_DEVICE = 0\n\n# Beaglebone Black pin configuration:\n# RST = 'P9_12'\n# Note the following are only used with SPI:\n# DC = 'P9_15'\n# SPI_PORT = 1\n# SPI_DEVICE = 0\n\n# 128x32 display with hardware I2C:\n#disp = Adafruit_SSD1306.SSD1306_128_32(rst=RST)\n\n# 128x64 display with hardware I2C:\n#disp = Adafruit_SSD1306.SSD1306_128_64(rst=RST)\n\n# Note you can change the I2C address by passing an i2c_address parameter like:\n# disp = Adafruit_SSD1306.SSD1306_128_64(rst=RST, i2c_address=0x3C)\n\n# Alternatively you can specify an explicit I2C bus number, for example\n# with the 128x32 display you would use:\n# disp = Adafruit_SSD1306.SSD1306_128_32(rst=RST, i2c_bus=2)\n\n# 128x32 display with hardware SPI:\n# disp = Adafruit_SSD1306.SSD1306_128_32(rst=RST, dc=DC, spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE, max_speed_hz=8000000))\n\n# 128x64 display with hardware SPI:\ndisp = Adafruit_SSD1306.SSD1306_128_64(rst=RST, dc=DC, spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE, max_speed_hz=8000000))\n\n# Alternatively you can specify a software SPI implementation by providing\n# digital GPIO pin numbers for all the required display pins. For example\n# on a Raspberry Pi with the 128x32 display you might use:\n# disp = Adafruit_SSD1306.SSD1306_128_32(rst=RST, dc=DC, sclk=18, din=25, cs=22)\n\n# Initialize library.\ndisp.begin()\n\n# Clear display.\ndisp.clear()\ndisp.display()\n\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import os import re import time import numpy as np import pandas as pd from sklearn.cluster import AgglomerativeClustering import math import edlib from progress.bar import IncrementalBar as Bar from multiprocessing import Pool import argparse parser = argparse.ArgumentParser() parser.add_argument("--pools", default=4, type=int, help="Number of threads to use in aligning. Default 4. Optional." ) parser.add_argument("--misses", default=5, type=float, help="Number of allowed substitutions/insertions/deletions in aligning a sequence of length k. " "For longer sequences, this is scaled. " ) parser.add_argument("--aligned", default=None, type=str, help="Path to the output aligned directory. Required." ) parser.add_argument("--overview", default=None, type=str, help="Path to the output description csv. Required. Pairs with <--aligned> directory." ) parser.add_argument("--k", default=-1, type=int, help="Size of the k-mer created by BCALM. Required." ) parser.add_argument("--input", default=None, type=str, help="Path to the input file." ) parser.set_defaults(all_sqs_result=False) args = parser.parse_args([] if "__file__" not in globals() else None) bases = dict(A=0, C=1, G=2, T=3) bases['-'] = 4 rev_bases = {v: k for k, v in bases.items()} global_alignment_ident_no = 0 operations = { '.' : 0, '-' : 1, '|' : 0 } class AlignmentProfile: def __init__(self, width, df, identifier): self.ident = identifier self.profile = np.zeros((5, width)) self.repre_sq = "" self.seq_alignments = None # this will be a pandas df self.seq_align_counter = -1 self.calculate_profile(df) def calculate_profile(self, df): self.seq_alignments = pd.DataFrame([(index, *np.zeros(self.profile.shape[1], dtype=np.int8)) for index in df.index]) unwrapped_sq = df['sq'].str.split('', expand=True) unwrapped_sq = unwrapped_sq.drop(columns=[unwrapped_sq.columns[0], unwrapped_sq.columns[-1]]) counts = np.stack(df['count'].values) for base in bases: a = unwrapped_sq != base newX = np.ma.array(counts, mask=a) new_counts = newX.sum(axis=0) self.profile[bases[base], :] += new_counts # repre_sq maxs = np.argmax(self.profile, axis=0) self.repre_sq = "".join([rev_bases[x] for x in maxs]) def add_sequence(self, new_sq, new_counts, nice, sq_index): offset = re.search(nice['target_aligned'].replace('-', ''), self.repre_sq).start(0) x = self.profile # padding with the following number of observed positions (sum of all bases) # pad profile with insertions insertions = np.where(np.array(list(nice['target_aligned'])) == '-')[0] for i, index in enumerate(insertions): if x.shape[1] >= index: value = 0 else: value = x[:, index].sum() x = np.insert(x, index + offset, [0, 0, 0, 0, value], axis=1) self.seq_alignments.insert(loc=int(index+offset), column=self.seq_align_counter, value=1) self.seq_align_counter -= 1 # pad new counts with deletions aligned_query = np.array(list(nice['query_aligned'])) deletions = np.where(aligned_query == '-')[0] for i, index in enumerate(deletions): value = new_counts[index] new_counts = np.insert(new_counts, index, value, axis=0) i = offset for base, count in zip(aligned_query, new_counts): x[bases[base], i] += count i += 1 self.profile = x # store new sequence alignment added_alignment = -np.ones(self.profile.shape[1]) for i, char in enumerate(nice['target_aligned']): if char == '-': added_alignment[offset + i] = 1 else: added_alignment[offset + i] = 0 self.seq_alignments.loc[-1] = [sq_index, *added_alignment] # adding a row self.seq_alignments.index = self.seq_alignments.index + 1 # shifting index # recalculate repre_sq -- the most probable one maxs = np.argmax(self.profile, axis=0) self.repre_sq = "".join([rev_bases[x] for x in maxs if rev_bases[x] != '-']) # '-' is removed from the sq def dst_func(x, y): return (np.array(x) != np.array(y)).sum() def read_alignment(filename): for line in open(filename): sq, count = line.strip('\n').split(';') yield sq, np.array([int(x) for x in count.split(',')]), count def cluster_group(df_group, l, dst=dst_func): sqs = df_group.reset_index()['sq'] n = len(sqs) if n <= 1: return np.zeros(n) dst_matrix = np.zeros((n, n)) for i in range(n): for j in range(i): d = dst(sqs[i], sqs[j]) dst_matrix[i, j] = d dst_matrix[j, i] = d model = AgglomerativeClustering(distance_threshold=threshold * l, n_clusters=None, linkage='complete', affinity='precomputed') clusters = model.fit_predict(dst_matrix) return clusters aligned_sqs_file = args.input k = args.k misses = args.misses pools = args.pools threshold = misses / k if args.aligned is None: output_profile_dir = aligned_sqs_file + "_profiles" else: output_profile_dir = args.aligned if args.overview is None: output_csv_file = aligned_sqs_file + "_overview.csv" else: output_csv_file = args.overview # read df = pd.DataFrame(read_alignment(aligned_sqs_file)) df.columns = ['sq', 'count', 'str_count'] df['length'] = df['sq'].str.len() # df['alignment'] = -1 # every aligned sq has an alignment identification groups = df.groupby(by='length') unique_lengths = df['length'].sort_values(ascending=False).unique() against = [] longest = unique_lengths[0] df_group = groups.get_group(longest).copy() clusters = cluster_group(df_group, longest) df_group['cluster'] = clusters alignments = { } for cluster, cluster_df in df_group.groupby(by='cluster'): alignment = AlignmentProfile(longest, cluster_df, global_alignment_ident_no) alignments[global_alignment_ident_no] = alignment global_alignment_ident_no += 1 against.append(alignment) # df.loc[df['sq'].isin(cluster_df['sq']), 'alignment'] = alignment.ident # to each sequence start = time.time() # print(df.groupby(by='length').get_group(longest)) # print("running on shorter") with Bar("Processing length groups...", max=len(unique_lengths) - 1) as bar: for length in unique_lengths[1:]: bar.next() df_group = groups.get_group(length).copy() def getDistanceAndAlignment(sq): # this is a fallback, it should not happen maxval = np.floor(threshold * len(sq)) min = np.inf min_target = None if maxval < 1: return min,min_target for target in against: align_res = edlib.align(sq, target.repre_sq, mode='HW', task='distance', k=maxval) if align_res['editDistance'] != -1: if min > align_res['editDistance']: if align_res['editDistance'] == 0: return align_res['editDistance'], target.ident min = align_res['editDistance'] min_target = target if min_target is not None: min_target = min_target.ident return min, min_target x = length * threshold if length * threshold >= 1: # try align with Pool(pools) as pool: result = pool.map(getDistanceAndAlignment, df_group['sq']) df_group['aligned'] = result # add aligned to profiles aligned = df_group[df_group['aligned'] != (np.inf, None)] for index, row in aligned.iterrows(): to = alignments[row['aligned'][1]] align_res = edlib.align(row.sq, to.repre_sq, mode='HW', task='path') nice = edlib.getNiceAlignment(align_res, row.sq, to.repre_sq) to.add_sequence(row.sq, row['count'], nice, index) # df.loc[df['sq'] == row.sq, 'alignment'] = to.ident # cluster unaligned, add to against unaligned = df_group[df_group['aligned'] == (np.inf, None)].copy() clusters = cluster_group(unaligned, length) unaligned['cluster'] = clusters for cluster, cluster_df in unaligned.groupby(by='cluster'): alignment = AlignmentProfile(length, cluster_df, global_alignment_ident_no) alignments[global_alignment_ident_no] = alignment global_alignment_ident_no += 1 against.append(alignment) else: # threshold is less than one, no clustering nor alignment takes place df_group["aligned"] = [(np.inf, None) for _ in range(len(df_group))] unaligned = df_group.copy() unaligned["cluster"] = list(range(len(unaligned))) # print(f"pseudoclustering elapsed: {time.time() - s}") s = time.time() for i, row in unaligned.iterrows(): cluster_df = pd.DataFrame(row).T alignment = AlignmentProfile(length, cluster_df, global_alignment_ident_no) alignments[global_alignment_ident_no] = alignment global_alignment_ident_no += 1 against.append(alignment) # print(f"alignment elapsed: {time.time() - s}") print(f"{aligned_sqs_file} elapsed: {time.time() - start}") print(f"{aligned_sqs_file} writing...") os.makedirs(output_profile_dir, exist_ok=True) for alignment in against: filename = f"{output_profile_dir}/{alignment.ident}.prf" np.save(filename, alignment.profile) # get actual alignment for each sq all_alignments = [] for alignment in against: itemized = alignment.seq_alignments num_cols = itemized.columns[1:] # index_col = itemized.columns[0] # translate to sth readable for col in num_cols: itemized[col] = itemized[col].astype(int).apply(str) itemized['alignment_actual'] = itemized[num_cols].agg(','.join, axis=1) # todo maybe cigar? itemized = itemized.drop(columns=num_cols) itemized.columns = ['index_df', 'alignment_actual'] itemized['alignment'] = alignment.ident all_alignments.append(itemized) all_alignments = pd.concat(all_alignments) merged = pd.merge(all_alignments, df, left_on='index_df', right_index=True) # write sequences in df merged.drop(columns=['count', 'index_df']).to_csv(output_csv_file, index=False) print(f"{aligned_sqs_file} done")
normal
{ "blob_id": "7ae328bcfdec2d17fceb5d707f13cf495fde4469", "index": 7490, "step-1": "<mask token>\n\n\nclass AlignmentProfile:\n\n def __init__(self, width, df, identifier):\n self.ident = identifier\n self.profile = np.zeros((5, width))\n self.repre_sq = ''\n self.seq_alignments = None\n self.seq_align_counter = -1\n self.calculate_profile(df)\n\n def calculate_profile(self, df):\n self.seq_alignments = pd.DataFrame([(index, *np.zeros(self.profile.\n shape[1], dtype=np.int8)) for index in df.index])\n unwrapped_sq = df['sq'].str.split('', expand=True)\n unwrapped_sq = unwrapped_sq.drop(columns=[unwrapped_sq.columns[0],\n unwrapped_sq.columns[-1]])\n counts = np.stack(df['count'].values)\n for base in bases:\n a = unwrapped_sq != base\n newX = np.ma.array(counts, mask=a)\n new_counts = newX.sum(axis=0)\n self.profile[bases[base], :] += new_counts\n maxs = np.argmax(self.profile, axis=0)\n self.repre_sq = ''.join([rev_bases[x] for x in maxs])\n\n def add_sequence(self, new_sq, new_counts, nice, sq_index):\n offset = re.search(nice['target_aligned'].replace('-', ''), self.\n repre_sq).start(0)\n x = self.profile\n insertions = np.where(np.array(list(nice['target_aligned'])) == '-')[0]\n for i, index in enumerate(insertions):\n if x.shape[1] >= index:\n value = 0\n else:\n value = x[:, index].sum()\n x = np.insert(x, index + offset, [0, 0, 0, 0, value], axis=1)\n self.seq_alignments.insert(loc=int(index + offset), column=self\n .seq_align_counter, value=1)\n self.seq_align_counter -= 1\n aligned_query = np.array(list(nice['query_aligned']))\n deletions = np.where(aligned_query == '-')[0]\n for i, index in enumerate(deletions):\n value = new_counts[index]\n new_counts = np.insert(new_counts, index, value, axis=0)\n i = offset\n for base, count in zip(aligned_query, new_counts):\n x[bases[base], i] += count\n i += 1\n self.profile = x\n added_alignment = -np.ones(self.profile.shape[1])\n for i, char in enumerate(nice['target_aligned']):\n if char == '-':\n added_alignment[offset + i] = 1\n else:\n added_alignment[offset + i] = 0\n self.seq_alignments.loc[-1] = [sq_index, *added_alignment]\n self.seq_alignments.index = self.seq_alignments.index + 1\n maxs = np.argmax(self.profile, axis=0)\n self.repre_sq = ''.join([rev_bases[x] for x in maxs if rev_bases[x] !=\n '-'])\n\n\n<mask token>\n\n\ndef cluster_group(df_group, l, dst=dst_func):\n sqs = df_group.reset_index()['sq']\n n = len(sqs)\n if n <= 1:\n return np.zeros(n)\n dst_matrix = np.zeros((n, n))\n for i in range(n):\n for j in range(i):\n d = dst(sqs[i], sqs[j])\n dst_matrix[i, j] = d\n dst_matrix[j, i] = d\n model = AgglomerativeClustering(distance_threshold=threshold * l,\n n_clusters=None, linkage='complete', affinity='precomputed')\n clusters = model.fit_predict(dst_matrix)\n return clusters\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass AlignmentProfile:\n\n def __init__(self, width, df, identifier):\n self.ident = identifier\n self.profile = np.zeros((5, width))\n self.repre_sq = ''\n self.seq_alignments = None\n self.seq_align_counter = -1\n self.calculate_profile(df)\n\n def calculate_profile(self, df):\n self.seq_alignments = pd.DataFrame([(index, *np.zeros(self.profile.\n shape[1], dtype=np.int8)) for index in df.index])\n unwrapped_sq = df['sq'].str.split('', expand=True)\n unwrapped_sq = unwrapped_sq.drop(columns=[unwrapped_sq.columns[0],\n unwrapped_sq.columns[-1]])\n counts = np.stack(df['count'].values)\n for base in bases:\n a = unwrapped_sq != base\n newX = np.ma.array(counts, mask=a)\n new_counts = newX.sum(axis=0)\n self.profile[bases[base], :] += new_counts\n maxs = np.argmax(self.profile, axis=0)\n self.repre_sq = ''.join([rev_bases[x] for x in maxs])\n\n def add_sequence(self, new_sq, new_counts, nice, sq_index):\n offset = re.search(nice['target_aligned'].replace('-', ''), self.\n repre_sq).start(0)\n x = self.profile\n insertions = np.where(np.array(list(nice['target_aligned'])) == '-')[0]\n for i, index in enumerate(insertions):\n if x.shape[1] >= index:\n value = 0\n else:\n value = x[:, index].sum()\n x = np.insert(x, index + offset, [0, 0, 0, 0, value], axis=1)\n self.seq_alignments.insert(loc=int(index + offset), column=self\n .seq_align_counter, value=1)\n self.seq_align_counter -= 1\n aligned_query = np.array(list(nice['query_aligned']))\n deletions = np.where(aligned_query == '-')[0]\n for i, index in enumerate(deletions):\n value = new_counts[index]\n new_counts = np.insert(new_counts, index, value, axis=0)\n i = offset\n for base, count in zip(aligned_query, new_counts):\n x[bases[base], i] += count\n i += 1\n self.profile = x\n added_alignment = -np.ones(self.profile.shape[1])\n for i, char in enumerate(nice['target_aligned']):\n if char == '-':\n added_alignment[offset + i] = 1\n else:\n added_alignment[offset + i] = 0\n self.seq_alignments.loc[-1] = [sq_index, *added_alignment]\n self.seq_alignments.index = self.seq_alignments.index + 1\n maxs = np.argmax(self.profile, axis=0)\n self.repre_sq = ''.join([rev_bases[x] for x in maxs if rev_bases[x] !=\n '-'])\n\n\ndef dst_func(x, y):\n return (np.array(x) != np.array(y)).sum()\n\n\ndef read_alignment(filename):\n for line in open(filename):\n sq, count = line.strip('\\n').split(';')\n yield sq, np.array([int(x) for x in count.split(',')]), count\n\n\ndef cluster_group(df_group, l, dst=dst_func):\n sqs = df_group.reset_index()['sq']\n n = len(sqs)\n if n <= 1:\n return np.zeros(n)\n dst_matrix = np.zeros((n, n))\n for i in range(n):\n for j in range(i):\n d = dst(sqs[i], sqs[j])\n dst_matrix[i, j] = d\n dst_matrix[j, i] = d\n model = AgglomerativeClustering(distance_threshold=threshold * l,\n n_clusters=None, linkage='complete', affinity='precomputed')\n clusters = model.fit_predict(dst_matrix)\n return clusters\n\n\n<mask token>\n", "step-3": "<mask token>\nparser.add_argument('--pools', default=4, type=int, help=\n 'Number of threads to use in aligning. Default 4. Optional.')\nparser.add_argument('--misses', default=5, type=float, help=\n 'Number of allowed substitutions/insertions/deletions in aligning a sequence of length k. For longer sequences, this is scaled. '\n )\nparser.add_argument('--aligned', default=None, type=str, help=\n 'Path to the output aligned directory. Required.')\nparser.add_argument('--overview', default=None, type=str, help=\n 'Path to the output description csv. Required. Pairs with <--aligned> directory.'\n )\nparser.add_argument('--k', default=-1, type=int, help=\n 'Size of the k-mer created by BCALM. Required.')\nparser.add_argument('--input', default=None, type=str, help=\n 'Path to the input file.')\nparser.set_defaults(all_sqs_result=False)\n<mask token>\n\n\nclass AlignmentProfile:\n\n def __init__(self, width, df, identifier):\n self.ident = identifier\n self.profile = np.zeros((5, width))\n self.repre_sq = ''\n self.seq_alignments = None\n self.seq_align_counter = -1\n self.calculate_profile(df)\n\n def calculate_profile(self, df):\n self.seq_alignments = pd.DataFrame([(index, *np.zeros(self.profile.\n shape[1], dtype=np.int8)) for index in df.index])\n unwrapped_sq = df['sq'].str.split('', expand=True)\n unwrapped_sq = unwrapped_sq.drop(columns=[unwrapped_sq.columns[0],\n unwrapped_sq.columns[-1]])\n counts = np.stack(df['count'].values)\n for base in bases:\n a = unwrapped_sq != base\n newX = np.ma.array(counts, mask=a)\n new_counts = newX.sum(axis=0)\n self.profile[bases[base], :] += new_counts\n maxs = np.argmax(self.profile, axis=0)\n self.repre_sq = ''.join([rev_bases[x] for x in maxs])\n\n def add_sequence(self, new_sq, new_counts, nice, sq_index):\n offset = re.search(nice['target_aligned'].replace('-', ''), self.\n repre_sq).start(0)\n x = self.profile\n insertions = np.where(np.array(list(nice['target_aligned'])) == '-')[0]\n for i, index in enumerate(insertions):\n if x.shape[1] >= index:\n value = 0\n else:\n value = x[:, index].sum()\n x = np.insert(x, index + offset, [0, 0, 0, 0, value], axis=1)\n self.seq_alignments.insert(loc=int(index + offset), column=self\n .seq_align_counter, value=1)\n self.seq_align_counter -= 1\n aligned_query = np.array(list(nice['query_aligned']))\n deletions = np.where(aligned_query == '-')[0]\n for i, index in enumerate(deletions):\n value = new_counts[index]\n new_counts = np.insert(new_counts, index, value, axis=0)\n i = offset\n for base, count in zip(aligned_query, new_counts):\n x[bases[base], i] += count\n i += 1\n self.profile = x\n added_alignment = -np.ones(self.profile.shape[1])\n for i, char in enumerate(nice['target_aligned']):\n if char == '-':\n added_alignment[offset + i] = 1\n else:\n added_alignment[offset + i] = 0\n self.seq_alignments.loc[-1] = [sq_index, *added_alignment]\n self.seq_alignments.index = self.seq_alignments.index + 1\n maxs = np.argmax(self.profile, axis=0)\n self.repre_sq = ''.join([rev_bases[x] for x in maxs if rev_bases[x] !=\n '-'])\n\n\ndef dst_func(x, y):\n return (np.array(x) != np.array(y)).sum()\n\n\ndef read_alignment(filename):\n for line in open(filename):\n sq, count = line.strip('\\n').split(';')\n yield sq, np.array([int(x) for x in count.split(',')]), count\n\n\ndef cluster_group(df_group, l, dst=dst_func):\n sqs = df_group.reset_index()['sq']\n n = len(sqs)\n if n <= 1:\n return np.zeros(n)\n dst_matrix = np.zeros((n, n))\n for i in range(n):\n for j in range(i):\n d = dst(sqs[i], sqs[j])\n dst_matrix[i, j] = d\n dst_matrix[j, i] = d\n model = AgglomerativeClustering(distance_threshold=threshold * l,\n n_clusters=None, linkage='complete', affinity='precomputed')\n clusters = model.fit_predict(dst_matrix)\n return clusters\n\n\n<mask token>\nif args.aligned is None:\n output_profile_dir = aligned_sqs_file + '_profiles'\nelse:\n output_profile_dir = args.aligned\nif args.overview is None:\n output_csv_file = aligned_sqs_file + '_overview.csv'\nelse:\n output_csv_file = args.overview\n<mask token>\nfor cluster, cluster_df in df_group.groupby(by='cluster'):\n alignment = AlignmentProfile(longest, cluster_df, global_alignment_ident_no\n )\n alignments[global_alignment_ident_no] = alignment\n global_alignment_ident_no += 1\n against.append(alignment)\n<mask token>\nwith Bar('Processing length groups...', max=len(unique_lengths) - 1) as bar:\n for length in unique_lengths[1:]:\n bar.next()\n df_group = groups.get_group(length).copy()\n\n def getDistanceAndAlignment(sq):\n maxval = np.floor(threshold * len(sq))\n min = np.inf\n min_target = None\n if maxval < 1:\n return min, min_target\n for target in against:\n align_res = edlib.align(sq, target.repre_sq, mode='HW',\n task='distance', k=maxval)\n if align_res['editDistance'] != -1:\n if min > align_res['editDistance']:\n if align_res['editDistance'] == 0:\n return align_res['editDistance'], target.ident\n min = align_res['editDistance']\n min_target = target\n if min_target is not None:\n min_target = min_target.ident\n return min, min_target\n x = length * threshold\n if length * threshold >= 1:\n with Pool(pools) as pool:\n result = pool.map(getDistanceAndAlignment, df_group['sq'])\n df_group['aligned'] = result\n aligned = df_group[df_group['aligned'] != (np.inf, None)]\n for index, row in aligned.iterrows():\n to = alignments[row['aligned'][1]]\n align_res = edlib.align(row.sq, to.repre_sq, mode='HW',\n task='path')\n nice = edlib.getNiceAlignment(align_res, row.sq, to.repre_sq)\n to.add_sequence(row.sq, row['count'], nice, index)\n unaligned = df_group[df_group['aligned'] == (np.inf, None)].copy()\n clusters = cluster_group(unaligned, length)\n unaligned['cluster'] = clusters\n for cluster, cluster_df in unaligned.groupby(by='cluster'):\n alignment = AlignmentProfile(length, cluster_df,\n global_alignment_ident_no)\n alignments[global_alignment_ident_no] = alignment\n global_alignment_ident_no += 1\n against.append(alignment)\n else:\n df_group['aligned'] = [(np.inf, None) for _ in range(len(df_group))\n ]\n unaligned = df_group.copy()\n unaligned['cluster'] = list(range(len(unaligned)))\n s = time.time()\n for i, row in unaligned.iterrows():\n cluster_df = pd.DataFrame(row).T\n alignment = AlignmentProfile(length, cluster_df,\n global_alignment_ident_no)\n alignments[global_alignment_ident_no] = alignment\n global_alignment_ident_no += 1\n against.append(alignment)\nprint(f'{aligned_sqs_file} elapsed: {time.time() - start}')\nprint(f'{aligned_sqs_file} writing...')\nos.makedirs(output_profile_dir, exist_ok=True)\nfor alignment in against:\n filename = f'{output_profile_dir}/{alignment.ident}.prf'\n np.save(filename, alignment.profile)\n<mask token>\nfor alignment in against:\n itemized = alignment.seq_alignments\n num_cols = itemized.columns[1:]\n for col in num_cols:\n itemized[col] = itemized[col].astype(int).apply(str)\n itemized['alignment_actual'] = itemized[num_cols].agg(','.join, axis=1)\n itemized = itemized.drop(columns=num_cols)\n itemized.columns = ['index_df', 'alignment_actual']\n itemized['alignment'] = alignment.ident\n all_alignments.append(itemized)\n<mask token>\nmerged.drop(columns=['count', 'index_df']).to_csv(output_csv_file, index=False)\nprint(f'{aligned_sqs_file} done')\n", "step-4": "<mask token>\nparser = argparse.ArgumentParser()\nparser.add_argument('--pools', default=4, type=int, help=\n 'Number of threads to use in aligning. Default 4. Optional.')\nparser.add_argument('--misses', default=5, type=float, help=\n 'Number of allowed substitutions/insertions/deletions in aligning a sequence of length k. For longer sequences, this is scaled. '\n )\nparser.add_argument('--aligned', default=None, type=str, help=\n 'Path to the output aligned directory. Required.')\nparser.add_argument('--overview', default=None, type=str, help=\n 'Path to the output description csv. Required. Pairs with <--aligned> directory.'\n )\nparser.add_argument('--k', default=-1, type=int, help=\n 'Size of the k-mer created by BCALM. Required.')\nparser.add_argument('--input', default=None, type=str, help=\n 'Path to the input file.')\nparser.set_defaults(all_sqs_result=False)\nargs = parser.parse_args([] if '__file__' not in globals() else None)\nbases = dict(A=0, C=1, G=2, T=3)\nbases['-'] = 4\nrev_bases = {v: k for k, v in bases.items()}\nglobal_alignment_ident_no = 0\noperations = {'.': 0, '-': 1, '|': 0}\n\n\nclass AlignmentProfile:\n\n def __init__(self, width, df, identifier):\n self.ident = identifier\n self.profile = np.zeros((5, width))\n self.repre_sq = ''\n self.seq_alignments = None\n self.seq_align_counter = -1\n self.calculate_profile(df)\n\n def calculate_profile(self, df):\n self.seq_alignments = pd.DataFrame([(index, *np.zeros(self.profile.\n shape[1], dtype=np.int8)) for index in df.index])\n unwrapped_sq = df['sq'].str.split('', expand=True)\n unwrapped_sq = unwrapped_sq.drop(columns=[unwrapped_sq.columns[0],\n unwrapped_sq.columns[-1]])\n counts = np.stack(df['count'].values)\n for base in bases:\n a = unwrapped_sq != base\n newX = np.ma.array(counts, mask=a)\n new_counts = newX.sum(axis=0)\n self.profile[bases[base], :] += new_counts\n maxs = np.argmax(self.profile, axis=0)\n self.repre_sq = ''.join([rev_bases[x] for x in maxs])\n\n def add_sequence(self, new_sq, new_counts, nice, sq_index):\n offset = re.search(nice['target_aligned'].replace('-', ''), self.\n repre_sq).start(0)\n x = self.profile\n insertions = np.where(np.array(list(nice['target_aligned'])) == '-')[0]\n for i, index in enumerate(insertions):\n if x.shape[1] >= index:\n value = 0\n else:\n value = x[:, index].sum()\n x = np.insert(x, index + offset, [0, 0, 0, 0, value], axis=1)\n self.seq_alignments.insert(loc=int(index + offset), column=self\n .seq_align_counter, value=1)\n self.seq_align_counter -= 1\n aligned_query = np.array(list(nice['query_aligned']))\n deletions = np.where(aligned_query == '-')[0]\n for i, index in enumerate(deletions):\n value = new_counts[index]\n new_counts = np.insert(new_counts, index, value, axis=0)\n i = offset\n for base, count in zip(aligned_query, new_counts):\n x[bases[base], i] += count\n i += 1\n self.profile = x\n added_alignment = -np.ones(self.profile.shape[1])\n for i, char in enumerate(nice['target_aligned']):\n if char == '-':\n added_alignment[offset + i] = 1\n else:\n added_alignment[offset + i] = 0\n self.seq_alignments.loc[-1] = [sq_index, *added_alignment]\n self.seq_alignments.index = self.seq_alignments.index + 1\n maxs = np.argmax(self.profile, axis=0)\n self.repre_sq = ''.join([rev_bases[x] for x in maxs if rev_bases[x] !=\n '-'])\n\n\ndef dst_func(x, y):\n return (np.array(x) != np.array(y)).sum()\n\n\ndef read_alignment(filename):\n for line in open(filename):\n sq, count = line.strip('\\n').split(';')\n yield sq, np.array([int(x) for x in count.split(',')]), count\n\n\ndef cluster_group(df_group, l, dst=dst_func):\n sqs = df_group.reset_index()['sq']\n n = len(sqs)\n if n <= 1:\n return np.zeros(n)\n dst_matrix = np.zeros((n, n))\n for i in range(n):\n for j in range(i):\n d = dst(sqs[i], sqs[j])\n dst_matrix[i, j] = d\n dst_matrix[j, i] = d\n model = AgglomerativeClustering(distance_threshold=threshold * l,\n n_clusters=None, linkage='complete', affinity='precomputed')\n clusters = model.fit_predict(dst_matrix)\n return clusters\n\n\naligned_sqs_file = args.input\nk = args.k\nmisses = args.misses\npools = args.pools\nthreshold = misses / k\nif args.aligned is None:\n output_profile_dir = aligned_sqs_file + '_profiles'\nelse:\n output_profile_dir = args.aligned\nif args.overview is None:\n output_csv_file = aligned_sqs_file + '_overview.csv'\nelse:\n output_csv_file = args.overview\ndf = pd.DataFrame(read_alignment(aligned_sqs_file))\ndf.columns = ['sq', 'count', 'str_count']\ndf['length'] = df['sq'].str.len()\ngroups = df.groupby(by='length')\nunique_lengths = df['length'].sort_values(ascending=False).unique()\nagainst = []\nlongest = unique_lengths[0]\ndf_group = groups.get_group(longest).copy()\nclusters = cluster_group(df_group, longest)\ndf_group['cluster'] = clusters\nalignments = {}\nfor cluster, cluster_df in df_group.groupby(by='cluster'):\n alignment = AlignmentProfile(longest, cluster_df, global_alignment_ident_no\n )\n alignments[global_alignment_ident_no] = alignment\n global_alignment_ident_no += 1\n against.append(alignment)\nstart = time.time()\nwith Bar('Processing length groups...', max=len(unique_lengths) - 1) as bar:\n for length in unique_lengths[1:]:\n bar.next()\n df_group = groups.get_group(length).copy()\n\n def getDistanceAndAlignment(sq):\n maxval = np.floor(threshold * len(sq))\n min = np.inf\n min_target = None\n if maxval < 1:\n return min, min_target\n for target in against:\n align_res = edlib.align(sq, target.repre_sq, mode='HW',\n task='distance', k=maxval)\n if align_res['editDistance'] != -1:\n if min > align_res['editDistance']:\n if align_res['editDistance'] == 0:\n return align_res['editDistance'], target.ident\n min = align_res['editDistance']\n min_target = target\n if min_target is not None:\n min_target = min_target.ident\n return min, min_target\n x = length * threshold\n if length * threshold >= 1:\n with Pool(pools) as pool:\n result = pool.map(getDistanceAndAlignment, df_group['sq'])\n df_group['aligned'] = result\n aligned = df_group[df_group['aligned'] != (np.inf, None)]\n for index, row in aligned.iterrows():\n to = alignments[row['aligned'][1]]\n align_res = edlib.align(row.sq, to.repre_sq, mode='HW',\n task='path')\n nice = edlib.getNiceAlignment(align_res, row.sq, to.repre_sq)\n to.add_sequence(row.sq, row['count'], nice, index)\n unaligned = df_group[df_group['aligned'] == (np.inf, None)].copy()\n clusters = cluster_group(unaligned, length)\n unaligned['cluster'] = clusters\n for cluster, cluster_df in unaligned.groupby(by='cluster'):\n alignment = AlignmentProfile(length, cluster_df,\n global_alignment_ident_no)\n alignments[global_alignment_ident_no] = alignment\n global_alignment_ident_no += 1\n against.append(alignment)\n else:\n df_group['aligned'] = [(np.inf, None) for _ in range(len(df_group))\n ]\n unaligned = df_group.copy()\n unaligned['cluster'] = list(range(len(unaligned)))\n s = time.time()\n for i, row in unaligned.iterrows():\n cluster_df = pd.DataFrame(row).T\n alignment = AlignmentProfile(length, cluster_df,\n global_alignment_ident_no)\n alignments[global_alignment_ident_no] = alignment\n global_alignment_ident_no += 1\n against.append(alignment)\nprint(f'{aligned_sqs_file} elapsed: {time.time() - start}')\nprint(f'{aligned_sqs_file} writing...')\nos.makedirs(output_profile_dir, exist_ok=True)\nfor alignment in against:\n filename = f'{output_profile_dir}/{alignment.ident}.prf'\n np.save(filename, alignment.profile)\nall_alignments = []\nfor alignment in against:\n itemized = alignment.seq_alignments\n num_cols = itemized.columns[1:]\n for col in num_cols:\n itemized[col] = itemized[col].astype(int).apply(str)\n itemized['alignment_actual'] = itemized[num_cols].agg(','.join, axis=1)\n itemized = itemized.drop(columns=num_cols)\n itemized.columns = ['index_df', 'alignment_actual']\n itemized['alignment'] = alignment.ident\n all_alignments.append(itemized)\nall_alignments = pd.concat(all_alignments)\nmerged = pd.merge(all_alignments, df, left_on='index_df', right_index=True)\nmerged.drop(columns=['count', 'index_df']).to_csv(output_csv_file, index=False)\nprint(f'{aligned_sqs_file} done')\n", "step-5": "import os\nimport re\nimport time\nimport numpy as np\nimport pandas as pd\nfrom sklearn.cluster import AgglomerativeClustering\nimport math\nimport edlib\nfrom progress.bar import IncrementalBar as Bar\nfrom multiprocessing import Pool\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--pools\",\n default=4,\n type=int,\n help=\"Number of threads to use in aligning. Default 4. Optional.\"\n )\nparser.add_argument(\"--misses\",\n default=5,\n type=float,\n help=\"Number of allowed substitutions/insertions/deletions in aligning a sequence of length k. \"\n \"For longer sequences, this is scaled. \"\n )\nparser.add_argument(\"--aligned\",\n default=None,\n type=str,\n help=\"Path to the output aligned directory. Required.\"\n )\nparser.add_argument(\"--overview\",\n default=None,\n type=str,\n help=\"Path to the output description csv. Required. Pairs with <--aligned> directory.\"\n )\nparser.add_argument(\"--k\",\n default=-1,\n type=int,\n help=\"Size of the k-mer created by BCALM. Required.\"\n )\nparser.add_argument(\"--input\",\n default=None,\n type=str,\n help=\"Path to the input file.\"\n )\nparser.set_defaults(all_sqs_result=False)\n\nargs = parser.parse_args([] if \"__file__\" not in globals() else None)\n\nbases = dict(A=0, C=1, G=2, T=3)\nbases['-'] = 4\nrev_bases = {v: k for k, v in bases.items()}\nglobal_alignment_ident_no = 0\n\n\noperations = {\n '.' : 0,\n '-' : 1,\n '|' : 0\n}\n\n\nclass AlignmentProfile:\n def __init__(self, width, df, identifier):\n self.ident = identifier\n\n self.profile = np.zeros((5, width))\n self.repre_sq = \"\"\n self.seq_alignments = None # this will be a pandas df\n self.seq_align_counter = -1\n\n self.calculate_profile(df)\n\n def calculate_profile(self, df):\n self.seq_alignments = pd.DataFrame([(index, *np.zeros(self.profile.shape[1], dtype=np.int8)) for index in df.index])\n\n unwrapped_sq = df['sq'].str.split('', expand=True)\n unwrapped_sq = unwrapped_sq.drop(columns=[unwrapped_sq.columns[0], unwrapped_sq.columns[-1]])\n\n counts = np.stack(df['count'].values)\n\n for base in bases:\n a = unwrapped_sq != base\n newX = np.ma.array(counts, mask=a)\n new_counts = newX.sum(axis=0)\n self.profile[bases[base], :] += new_counts\n\n # repre_sq\n maxs = np.argmax(self.profile, axis=0)\n self.repre_sq = \"\".join([rev_bases[x] for x in maxs])\n\n def add_sequence(self, new_sq, new_counts, nice, sq_index):\n offset = re.search(nice['target_aligned'].replace('-', ''), self.repre_sq).start(0)\n x = self.profile\n # padding with the following number of observed positions (sum of all bases)\n\n # pad profile with insertions\n insertions = np.where(np.array(list(nice['target_aligned'])) == '-')[0]\n for i, index in enumerate(insertions):\n if x.shape[1] >= index:\n value = 0\n else:\n value = x[:, index].sum()\n x = np.insert(x, index + offset, [0, 0, 0, 0, value], axis=1)\n self.seq_alignments.insert(loc=int(index+offset), column=self.seq_align_counter, value=1)\n self.seq_align_counter -= 1\n\n # pad new counts with deletions\n aligned_query = np.array(list(nice['query_aligned']))\n deletions = np.where(aligned_query == '-')[0]\n for i, index in enumerate(deletions):\n value = new_counts[index]\n new_counts = np.insert(new_counts, index, value, axis=0)\n\n i = offset\n for base, count in zip(aligned_query, new_counts):\n x[bases[base], i] += count\n i += 1\n\n self.profile = x\n\n # store new sequence alignment\n added_alignment = -np.ones(self.profile.shape[1])\n for i, char in enumerate(nice['target_aligned']):\n if char == '-':\n added_alignment[offset + i] = 1\n else:\n added_alignment[offset + i] = 0\n self.seq_alignments.loc[-1] = [sq_index, *added_alignment] # adding a row\n self.seq_alignments.index = self.seq_alignments.index + 1 # shifting index\n\n # recalculate repre_sq -- the most probable one\n maxs = np.argmax(self.profile, axis=0)\n self.repre_sq = \"\".join([rev_bases[x] for x in maxs if rev_bases[x] != '-']) # '-' is removed from the sq\n\n\ndef dst_func(x, y):\n return (np.array(x) != np.array(y)).sum()\n\n\ndef read_alignment(filename):\n for line in open(filename):\n sq, count = line.strip('\\n').split(';')\n yield sq, np.array([int(x) for x in count.split(',')]), count\n\n\ndef cluster_group(df_group, l, dst=dst_func):\n sqs = df_group.reset_index()['sq']\n n = len(sqs)\n\n if n <= 1:\n return np.zeros(n)\n\n dst_matrix = np.zeros((n, n))\n\n for i in range(n):\n for j in range(i):\n d = dst(sqs[i], sqs[j])\n dst_matrix[i, j] = d\n dst_matrix[j, i] = d\n\n model = AgglomerativeClustering(distance_threshold=threshold * l,\n n_clusters=None,\n linkage='complete',\n affinity='precomputed')\n clusters = model.fit_predict(dst_matrix)\n return clusters\n\n\naligned_sqs_file = args.input\nk = args.k\nmisses = args.misses\npools = args.pools\n\nthreshold = misses / k\nif args.aligned is None:\n output_profile_dir = aligned_sqs_file + \"_profiles\"\nelse:\n output_profile_dir = args.aligned\n\nif args.overview is None:\n output_csv_file = aligned_sqs_file + \"_overview.csv\"\nelse:\n output_csv_file = args.overview\n\n# read\ndf = pd.DataFrame(read_alignment(aligned_sqs_file))\ndf.columns = ['sq', 'count', 'str_count']\ndf['length'] = df['sq'].str.len()\n# df['alignment'] = -1 # every aligned sq has an alignment identification\ngroups = df.groupby(by='length')\n\nunique_lengths = df['length'].sort_values(ascending=False).unique()\n\nagainst = []\n\nlongest = unique_lengths[0]\ndf_group = groups.get_group(longest).copy()\n\nclusters = cluster_group(df_group, longest)\ndf_group['cluster'] = clusters\n\nalignments = {\n}\n\nfor cluster, cluster_df in df_group.groupby(by='cluster'):\n alignment = AlignmentProfile(longest, cluster_df, global_alignment_ident_no)\n alignments[global_alignment_ident_no] = alignment\n\n global_alignment_ident_no += 1\n against.append(alignment)\n\n # df.loc[df['sq'].isin(cluster_df['sq']), 'alignment'] = alignment.ident\n\n # to each sequence\n\n\nstart = time.time()\n\n# print(df.groupby(by='length').get_group(longest))\n# print(\"running on shorter\")\n\nwith Bar(\"Processing length groups...\", max=len(unique_lengths) - 1) as bar:\n for length in unique_lengths[1:]:\n bar.next()\n df_group = groups.get_group(length).copy()\n\n def getDistanceAndAlignment(sq):\n # this is a fallback, it should not happen\n maxval = np.floor(threshold * len(sq))\n\n min = np.inf\n min_target = None\n\n if maxval < 1:\n return min,min_target\n\n for target in against:\n align_res = edlib.align(sq, target.repre_sq, mode='HW', task='distance', k=maxval)\n if align_res['editDistance'] != -1:\n if min > align_res['editDistance']:\n if align_res['editDistance'] == 0:\n return align_res['editDistance'], target.ident\n\n min = align_res['editDistance']\n min_target = target\n\n if min_target is not None:\n min_target = min_target.ident\n\n return min, min_target\n\n x = length * threshold\n if length * threshold >= 1:\n # try align\n with Pool(pools) as pool:\n result = pool.map(getDistanceAndAlignment, df_group['sq'])\n df_group['aligned'] = result\n\n # add aligned to profiles\n aligned = df_group[df_group['aligned'] != (np.inf, None)]\n for index, row in aligned.iterrows():\n to = alignments[row['aligned'][1]]\n align_res = edlib.align(row.sq, to.repre_sq, mode='HW', task='path')\n nice = edlib.getNiceAlignment(align_res, row.sq, to.repre_sq)\n to.add_sequence(row.sq, row['count'], nice, index)\n # df.loc[df['sq'] == row.sq, 'alignment'] = to.ident\n\n # cluster unaligned, add to against\n unaligned = df_group[df_group['aligned'] == (np.inf, None)].copy()\n clusters = cluster_group(unaligned, length)\n unaligned['cluster'] = clusters\n\n for cluster, cluster_df in unaligned.groupby(by='cluster'):\n alignment = AlignmentProfile(length, cluster_df, global_alignment_ident_no)\n alignments[global_alignment_ident_no] = alignment\n global_alignment_ident_no += 1\n against.append(alignment)\n else:\n # threshold is less than one, no clustering nor alignment takes place\n df_group[\"aligned\"] = [(np.inf, None) for _ in range(len(df_group))]\n unaligned = df_group.copy()\n unaligned[\"cluster\"] = list(range(len(unaligned)))\n # print(f\"pseudoclustering elapsed: {time.time() - s}\")\n\n s = time.time()\n for i, row in unaligned.iterrows():\n cluster_df = pd.DataFrame(row).T\n alignment = AlignmentProfile(length, cluster_df, global_alignment_ident_no)\n alignments[global_alignment_ident_no] = alignment\n global_alignment_ident_no += 1\n against.append(alignment)\n # print(f\"alignment elapsed: {time.time() - s}\")\n\n\nprint(f\"{aligned_sqs_file} elapsed: {time.time() - start}\")\nprint(f\"{aligned_sqs_file} writing...\")\n\n\nos.makedirs(output_profile_dir, exist_ok=True)\nfor alignment in against:\n filename = f\"{output_profile_dir}/{alignment.ident}.prf\"\n np.save(filename, alignment.profile)\n\n# get actual alignment for each sq\nall_alignments = []\nfor alignment in against:\n itemized = alignment.seq_alignments\n num_cols = itemized.columns[1:]\n # index_col = itemized.columns[0]\n # translate to sth readable\n for col in num_cols:\n itemized[col] = itemized[col].astype(int).apply(str)\n\n itemized['alignment_actual'] = itemized[num_cols].agg(','.join, axis=1) # todo maybe cigar?\n itemized = itemized.drop(columns=num_cols)\n itemized.columns = ['index_df', 'alignment_actual']\n itemized['alignment'] = alignment.ident\n all_alignments.append(itemized)\n\nall_alignments = pd.concat(all_alignments)\nmerged = pd.merge(all_alignments, df, left_on='index_df', right_index=True)\n\n\n# write sequences in df\nmerged.drop(columns=['count', 'index_df']).to_csv(output_csv_file, index=False)\nprint(f\"{aligned_sqs_file} done\")\n", "step-ids": [ 5, 7, 8, 9, 11 ] }
[ 5, 7, 8, 9, 11 ]
my_list = [9, 9, 9, 8, 8, 7, 7, 6, 6, 5, 4, 4, 4, 2, 2, 1] new_num = int(input('Enter a new number - ')) i = 0 for n in my_list: if new_num <= n: i += 1 my_list.insert(i, float(new_num)) print(my_list)
normal
{ "blob_id": "be16e13c0e03952e45f98b175975795bba19cf9a", "index": 2775, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor n in my_list:\n if new_num <= n:\n i += 1\nmy_list.insert(i, float(new_num))\nprint(my_list)\n", "step-3": "my_list = [9, 9, 9, 8, 8, 7, 7, 6, 6, 5, 4, 4, 4, 2, 2, 1]\nnew_num = int(input('Enter a new number - '))\ni = 0\nfor n in my_list:\n if new_num <= n:\n i += 1\nmy_list.insert(i, float(new_num))\nprint(my_list)\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
# -*- coding: utf-8 -*- import os import sys, getopt import paho.mqtt.client as mqtt import random import _thread import time import json HOST = '0.0.0.0' PORT = 9090 # gb_freq = 0 CONFIG_PATH = 'config/config.cfg' ITEMS_PATH = 'config/items.cfg' MILISECOND = 0.001 class Item(object): def __init__(self, string): self.convert_string_to_item(string) def convert_string_to_item(self, string): # sensor_name, topic_in,topic_out,frequent tokens = str(string).split(',') self._platform_type = tokens[0] self._sensor_name = tokens[1] self._topic = tokens[2] self._frequent = int(tokens[3]) def get_sensor_name(self): return self._sensor_name def get_topic(self): return self._topic def get_frequent(self): return self._frequent def increase_frequent(self): self._frequent += 10 print(self._frequent) return self._frequent class SimulatorEngine(object): _bStop = 1 def __init__(self): # read config items = [line.rstrip('\n') for line in open(CONFIG_PATH)] self._ip_broker = items[0] self._port_broker = items[1] self._client_name = items[2] self._num_of_sensor = 0 if items[3] and items[3] == 'True': self._is_increase_freq = True else: self._is_increase_freq = False if items[4] and items[4] == 'True': self._is_increase_instance = True else: self._is_increase_instance = False self.test_time = float(items[5]) items = [Item(string=line.rstrip('\n')) for line in open(ITEMS_PATH)] self._items = items self._mqttc = mqtt.Client(self._client_name) self._mqttc.connect(self._ip_broker, int(self._port_broker)) # hostname = socket.gethostname() def send_data(self, item): start_time = time.time() time_data_change_period = random.randint(60, 3600) time_data_change = time.time() data_value = random.randint(0, 100) print('Change data value. Period {} Value {}'.format(time_data_change_period, data_value)) while 1: next_time = time.time() if next_time - time_data_change >= time_data_change_period: time_data_change = next_time time_data_change_period = random.randint(60, 3600) data_value = random.randint(0, 100) print('Change data value. Period {} Value {}'.format(time_data_change_period, data_value)) if item._platform_type == 'onem2m': # message = data_value data = {} data['value'] = str(data_value) data['timestamp'] = "{0:.3f}".format(time.time()) data['num_of_sensor'] = str(self._num_of_sensor) message = json.dumps(data) else: message = ''' <?xml version="1.0" encoding="UTF-8" standalone="yes"?> <obj> <int val="{value}" name="data"/> </obj> '''.format(value=data_value) print(message) self._mqttc.publish(topic=item.get_topic(), payload=message) time.sleep(60 / item.get_frequent()) print('Topic {} -- Data {}'.format(item.get_topic(), data_value)) if self._is_increase_freq: if next_time - start_time >= 3600: start_time = next_time item.increase_frequent() # if self._is_increase_instance: # if next_time - start_time >= 3600: # start_time = next_time # item.increase_frequent() def register_sensor_with_ordinator(self): os.system( 'sensor_detail="$(/bin/hostname -i),$(hostname)" && curl -F "sensor_detail=${sensor_detail}" -F "defined_file=@openhab/demo.items" ${CO_ORDINATOR_DOMAIN}/sensor/define') def execute(self, num_of_item_start): try: for item in self._items[num_of_item_start:num_of_item_start+5]: _thread.start_new_thread(self.send_data, (item,)) # print(item.get_topic()) except Exception as e: print(e) # # while self._bStop: # time.sleep(1) @property def is_increase_instance(self): return self._is_increase_instance @property def items(self): return self._items def main(argv): engine = SimulatorEngine() start_time = time.time() item_start = 0 engine._num_of_sensor = 5 engine.execute(item_start) while 1: if item_start+10 <= len(engine.items): next_time = time.time() if engine.is_increase_instance: if next_time - start_time >= engine.test_time: start_time = next_time item_start += 5 engine._num_of_sensor = item_start + 5 engine.execute(item_start) if __name__ == '__main__': main(sys.argv[1:])
normal
{ "blob_id": "3375bc94d214b0b1c67986d35b0587714dd63bcd", "index": 7723, "step-1": "<mask token>\n\n\nclass Item(object):\n <mask token>\n\n def convert_string_to_item(self, string):\n tokens = str(string).split(',')\n self._platform_type = tokens[0]\n self._sensor_name = tokens[1]\n self._topic = tokens[2]\n self._frequent = int(tokens[3])\n\n def get_sensor_name(self):\n return self._sensor_name\n <mask token>\n\n def get_frequent(self):\n return self._frequent\n\n def increase_frequent(self):\n self._frequent += 10\n print(self._frequent)\n return self._frequent\n\n\nclass SimulatorEngine(object):\n _bStop = 1\n\n def __init__(self):\n items = [line.rstrip('\\n') for line in open(CONFIG_PATH)]\n self._ip_broker = items[0]\n self._port_broker = items[1]\n self._client_name = items[2]\n self._num_of_sensor = 0\n if items[3] and items[3] == 'True':\n self._is_increase_freq = True\n else:\n self._is_increase_freq = False\n if items[4] and items[4] == 'True':\n self._is_increase_instance = True\n else:\n self._is_increase_instance = False\n self.test_time = float(items[5])\n items = [Item(string=line.rstrip('\\n')) for line in open(ITEMS_PATH)]\n self._items = items\n self._mqttc = mqtt.Client(self._client_name)\n self._mqttc.connect(self._ip_broker, int(self._port_broker))\n\n def send_data(self, item):\n start_time = time.time()\n time_data_change_period = random.randint(60, 3600)\n time_data_change = time.time()\n data_value = random.randint(0, 100)\n print('Change data value. Period {} Value {}'.format(\n time_data_change_period, data_value))\n while 1:\n next_time = time.time()\n if next_time - time_data_change >= time_data_change_period:\n time_data_change = next_time\n time_data_change_period = random.randint(60, 3600)\n data_value = random.randint(0, 100)\n print('Change data value. Period {} Value {}'.format(\n time_data_change_period, data_value))\n if item._platform_type == 'onem2m':\n data = {}\n data['value'] = str(data_value)\n data['timestamp'] = '{0:.3f}'.format(time.time())\n data['num_of_sensor'] = str(self._num_of_sensor)\n message = json.dumps(data)\n else:\n message = (\n \"\"\"\n <?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>\n <obj>\n <int val=\"{value}\" name=\"data\"/>\n </obj>\n \"\"\"\n .format(value=data_value))\n print(message)\n self._mqttc.publish(topic=item.get_topic(), payload=message)\n time.sleep(60 / item.get_frequent())\n print('Topic {} -- Data {}'.format(item.get_topic(), data_value))\n if self._is_increase_freq:\n if next_time - start_time >= 3600:\n start_time = next_time\n item.increase_frequent()\n\n def register_sensor_with_ordinator(self):\n os.system(\n 'sensor_detail=\"$(/bin/hostname -i),$(hostname)\" && curl -F \"sensor_detail=${sensor_detail}\" -F \"defined_file=@openhab/demo.items\" ${CO_ORDINATOR_DOMAIN}/sensor/define'\n )\n\n def execute(self, num_of_item_start):\n try:\n for item in self._items[num_of_item_start:num_of_item_start + 5]:\n _thread.start_new_thread(self.send_data, (item,))\n except Exception as e:\n print(e)\n\n @property\n def is_increase_instance(self):\n return self._is_increase_instance\n\n @property\n def items(self):\n return self._items\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass Item(object):\n\n def __init__(self, string):\n self.convert_string_to_item(string)\n\n def convert_string_to_item(self, string):\n tokens = str(string).split(',')\n self._platform_type = tokens[0]\n self._sensor_name = tokens[1]\n self._topic = tokens[2]\n self._frequent = int(tokens[3])\n\n def get_sensor_name(self):\n return self._sensor_name\n\n def get_topic(self):\n return self._topic\n\n def get_frequent(self):\n return self._frequent\n\n def increase_frequent(self):\n self._frequent += 10\n print(self._frequent)\n return self._frequent\n\n\nclass SimulatorEngine(object):\n _bStop = 1\n\n def __init__(self):\n items = [line.rstrip('\\n') for line in open(CONFIG_PATH)]\n self._ip_broker = items[0]\n self._port_broker = items[1]\n self._client_name = items[2]\n self._num_of_sensor = 0\n if items[3] and items[3] == 'True':\n self._is_increase_freq = True\n else:\n self._is_increase_freq = False\n if items[4] and items[4] == 'True':\n self._is_increase_instance = True\n else:\n self._is_increase_instance = False\n self.test_time = float(items[5])\n items = [Item(string=line.rstrip('\\n')) for line in open(ITEMS_PATH)]\n self._items = items\n self._mqttc = mqtt.Client(self._client_name)\n self._mqttc.connect(self._ip_broker, int(self._port_broker))\n\n def send_data(self, item):\n start_time = time.time()\n time_data_change_period = random.randint(60, 3600)\n time_data_change = time.time()\n data_value = random.randint(0, 100)\n print('Change data value. Period {} Value {}'.format(\n time_data_change_period, data_value))\n while 1:\n next_time = time.time()\n if next_time - time_data_change >= time_data_change_period:\n time_data_change = next_time\n time_data_change_period = random.randint(60, 3600)\n data_value = random.randint(0, 100)\n print('Change data value. Period {} Value {}'.format(\n time_data_change_period, data_value))\n if item._platform_type == 'onem2m':\n data = {}\n data['value'] = str(data_value)\n data['timestamp'] = '{0:.3f}'.format(time.time())\n data['num_of_sensor'] = str(self._num_of_sensor)\n message = json.dumps(data)\n else:\n message = (\n \"\"\"\n <?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>\n <obj>\n <int val=\"{value}\" name=\"data\"/>\n </obj>\n \"\"\"\n .format(value=data_value))\n print(message)\n self._mqttc.publish(topic=item.get_topic(), payload=message)\n time.sleep(60 / item.get_frequent())\n print('Topic {} -- Data {}'.format(item.get_topic(), data_value))\n if self._is_increase_freq:\n if next_time - start_time >= 3600:\n start_time = next_time\n item.increase_frequent()\n\n def register_sensor_with_ordinator(self):\n os.system(\n 'sensor_detail=\"$(/bin/hostname -i),$(hostname)\" && curl -F \"sensor_detail=${sensor_detail}\" -F \"defined_file=@openhab/demo.items\" ${CO_ORDINATOR_DOMAIN}/sensor/define'\n )\n\n def execute(self, num_of_item_start):\n try:\n for item in self._items[num_of_item_start:num_of_item_start + 5]:\n _thread.start_new_thread(self.send_data, (item,))\n except Exception as e:\n print(e)\n\n @property\n def is_increase_instance(self):\n return self._is_increase_instance\n\n @property\n def items(self):\n return self._items\n\n\ndef main(argv):\n engine = SimulatorEngine()\n start_time = time.time()\n item_start = 0\n engine._num_of_sensor = 5\n engine.execute(item_start)\n while 1:\n if item_start + 10 <= len(engine.items):\n next_time = time.time()\n if engine.is_increase_instance:\n if next_time - start_time >= engine.test_time:\n start_time = next_time\n item_start += 5\n engine._num_of_sensor = item_start + 5\n engine.execute(item_start)\n\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n", "step-3": "<mask token>\nHOST = '0.0.0.0'\nPORT = 9090\nCONFIG_PATH = 'config/config.cfg'\nITEMS_PATH = 'config/items.cfg'\nMILISECOND = 0.001\n\n\nclass Item(object):\n\n def __init__(self, string):\n self.convert_string_to_item(string)\n\n def convert_string_to_item(self, string):\n tokens = str(string).split(',')\n self._platform_type = tokens[0]\n self._sensor_name = tokens[1]\n self._topic = tokens[2]\n self._frequent = int(tokens[3])\n\n def get_sensor_name(self):\n return self._sensor_name\n\n def get_topic(self):\n return self._topic\n\n def get_frequent(self):\n return self._frequent\n\n def increase_frequent(self):\n self._frequent += 10\n print(self._frequent)\n return self._frequent\n\n\nclass SimulatorEngine(object):\n _bStop = 1\n\n def __init__(self):\n items = [line.rstrip('\\n') for line in open(CONFIG_PATH)]\n self._ip_broker = items[0]\n self._port_broker = items[1]\n self._client_name = items[2]\n self._num_of_sensor = 0\n if items[3] and items[3] == 'True':\n self._is_increase_freq = True\n else:\n self._is_increase_freq = False\n if items[4] and items[4] == 'True':\n self._is_increase_instance = True\n else:\n self._is_increase_instance = False\n self.test_time = float(items[5])\n items = [Item(string=line.rstrip('\\n')) for line in open(ITEMS_PATH)]\n self._items = items\n self._mqttc = mqtt.Client(self._client_name)\n self._mqttc.connect(self._ip_broker, int(self._port_broker))\n\n def send_data(self, item):\n start_time = time.time()\n time_data_change_period = random.randint(60, 3600)\n time_data_change = time.time()\n data_value = random.randint(0, 100)\n print('Change data value. Period {} Value {}'.format(\n time_data_change_period, data_value))\n while 1:\n next_time = time.time()\n if next_time - time_data_change >= time_data_change_period:\n time_data_change = next_time\n time_data_change_period = random.randint(60, 3600)\n data_value = random.randint(0, 100)\n print('Change data value. Period {} Value {}'.format(\n time_data_change_period, data_value))\n if item._platform_type == 'onem2m':\n data = {}\n data['value'] = str(data_value)\n data['timestamp'] = '{0:.3f}'.format(time.time())\n data['num_of_sensor'] = str(self._num_of_sensor)\n message = json.dumps(data)\n else:\n message = (\n \"\"\"\n <?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>\n <obj>\n <int val=\"{value}\" name=\"data\"/>\n </obj>\n \"\"\"\n .format(value=data_value))\n print(message)\n self._mqttc.publish(topic=item.get_topic(), payload=message)\n time.sleep(60 / item.get_frequent())\n print('Topic {} -- Data {}'.format(item.get_topic(), data_value))\n if self._is_increase_freq:\n if next_time - start_time >= 3600:\n start_time = next_time\n item.increase_frequent()\n\n def register_sensor_with_ordinator(self):\n os.system(\n 'sensor_detail=\"$(/bin/hostname -i),$(hostname)\" && curl -F \"sensor_detail=${sensor_detail}\" -F \"defined_file=@openhab/demo.items\" ${CO_ORDINATOR_DOMAIN}/sensor/define'\n )\n\n def execute(self, num_of_item_start):\n try:\n for item in self._items[num_of_item_start:num_of_item_start + 5]:\n _thread.start_new_thread(self.send_data, (item,))\n except Exception as e:\n print(e)\n\n @property\n def is_increase_instance(self):\n return self._is_increase_instance\n\n @property\n def items(self):\n return self._items\n\n\ndef main(argv):\n engine = SimulatorEngine()\n start_time = time.time()\n item_start = 0\n engine._num_of_sensor = 5\n engine.execute(item_start)\n while 1:\n if item_start + 10 <= len(engine.items):\n next_time = time.time()\n if engine.is_increase_instance:\n if next_time - start_time >= engine.test_time:\n start_time = next_time\n item_start += 5\n engine._num_of_sensor = item_start + 5\n engine.execute(item_start)\n\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n", "step-4": "import os\nimport sys, getopt\nimport paho.mqtt.client as mqtt\nimport random\nimport _thread\nimport time\nimport json\nHOST = '0.0.0.0'\nPORT = 9090\nCONFIG_PATH = 'config/config.cfg'\nITEMS_PATH = 'config/items.cfg'\nMILISECOND = 0.001\n\n\nclass Item(object):\n\n def __init__(self, string):\n self.convert_string_to_item(string)\n\n def convert_string_to_item(self, string):\n tokens = str(string).split(',')\n self._platform_type = tokens[0]\n self._sensor_name = tokens[1]\n self._topic = tokens[2]\n self._frequent = int(tokens[3])\n\n def get_sensor_name(self):\n return self._sensor_name\n\n def get_topic(self):\n return self._topic\n\n def get_frequent(self):\n return self._frequent\n\n def increase_frequent(self):\n self._frequent += 10\n print(self._frequent)\n return self._frequent\n\n\nclass SimulatorEngine(object):\n _bStop = 1\n\n def __init__(self):\n items = [line.rstrip('\\n') for line in open(CONFIG_PATH)]\n self._ip_broker = items[0]\n self._port_broker = items[1]\n self._client_name = items[2]\n self._num_of_sensor = 0\n if items[3] and items[3] == 'True':\n self._is_increase_freq = True\n else:\n self._is_increase_freq = False\n if items[4] and items[4] == 'True':\n self._is_increase_instance = True\n else:\n self._is_increase_instance = False\n self.test_time = float(items[5])\n items = [Item(string=line.rstrip('\\n')) for line in open(ITEMS_PATH)]\n self._items = items\n self._mqttc = mqtt.Client(self._client_name)\n self._mqttc.connect(self._ip_broker, int(self._port_broker))\n\n def send_data(self, item):\n start_time = time.time()\n time_data_change_period = random.randint(60, 3600)\n time_data_change = time.time()\n data_value = random.randint(0, 100)\n print('Change data value. Period {} Value {}'.format(\n time_data_change_period, data_value))\n while 1:\n next_time = time.time()\n if next_time - time_data_change >= time_data_change_period:\n time_data_change = next_time\n time_data_change_period = random.randint(60, 3600)\n data_value = random.randint(0, 100)\n print('Change data value. Period {} Value {}'.format(\n time_data_change_period, data_value))\n if item._platform_type == 'onem2m':\n data = {}\n data['value'] = str(data_value)\n data['timestamp'] = '{0:.3f}'.format(time.time())\n data['num_of_sensor'] = str(self._num_of_sensor)\n message = json.dumps(data)\n else:\n message = (\n \"\"\"\n <?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>\n <obj>\n <int val=\"{value}\" name=\"data\"/>\n </obj>\n \"\"\"\n .format(value=data_value))\n print(message)\n self._mqttc.publish(topic=item.get_topic(), payload=message)\n time.sleep(60 / item.get_frequent())\n print('Topic {} -- Data {}'.format(item.get_topic(), data_value))\n if self._is_increase_freq:\n if next_time - start_time >= 3600:\n start_time = next_time\n item.increase_frequent()\n\n def register_sensor_with_ordinator(self):\n os.system(\n 'sensor_detail=\"$(/bin/hostname -i),$(hostname)\" && curl -F \"sensor_detail=${sensor_detail}\" -F \"defined_file=@openhab/demo.items\" ${CO_ORDINATOR_DOMAIN}/sensor/define'\n )\n\n def execute(self, num_of_item_start):\n try:\n for item in self._items[num_of_item_start:num_of_item_start + 5]:\n _thread.start_new_thread(self.send_data, (item,))\n except Exception as e:\n print(e)\n\n @property\n def is_increase_instance(self):\n return self._is_increase_instance\n\n @property\n def items(self):\n return self._items\n\n\ndef main(argv):\n engine = SimulatorEngine()\n start_time = time.time()\n item_start = 0\n engine._num_of_sensor = 5\n engine.execute(item_start)\n while 1:\n if item_start + 10 <= len(engine.items):\n next_time = time.time()\n if engine.is_increase_instance:\n if next_time - start_time >= engine.test_time:\n start_time = next_time\n item_start += 5\n engine._num_of_sensor = item_start + 5\n engine.execute(item_start)\n\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n", "step-5": "# -*- coding: utf-8 -*-\nimport os\nimport sys, getopt\nimport paho.mqtt.client as mqtt\nimport random\nimport _thread\nimport time\nimport json\n\nHOST = '0.0.0.0'\nPORT = 9090\n# gb_freq = 0\nCONFIG_PATH = 'config/config.cfg'\nITEMS_PATH = 'config/items.cfg'\nMILISECOND = 0.001\n\nclass Item(object):\n def __init__(self, string):\n self.convert_string_to_item(string)\n\n def convert_string_to_item(self, string):\n # sensor_name, topic_in,topic_out,frequent\n tokens = str(string).split(',')\n self._platform_type = tokens[0]\n self._sensor_name = tokens[1]\n self._topic = tokens[2]\n self._frequent = int(tokens[3])\n\n def get_sensor_name(self):\n return self._sensor_name\n\n def get_topic(self):\n return self._topic\n\n def get_frequent(self):\n return self._frequent\n\n def increase_frequent(self):\n self._frequent += 10\n print(self._frequent)\n return self._frequent\n\n\nclass SimulatorEngine(object):\n _bStop = 1\n\n\n def __init__(self):\n # read config\n items = [line.rstrip('\\n') for line in open(CONFIG_PATH)]\n self._ip_broker = items[0]\n self._port_broker = items[1]\n self._client_name = items[2]\n self._num_of_sensor = 0\n if items[3] and items[3] == 'True':\n self._is_increase_freq = True\n else:\n self._is_increase_freq = False\n if items[4] and items[4] == 'True':\n self._is_increase_instance = True\n else:\n self._is_increase_instance = False\n self.test_time = float(items[5])\n items = [Item(string=line.rstrip('\\n')) for line in open(ITEMS_PATH)]\n self._items = items\n self._mqttc = mqtt.Client(self._client_name)\n self._mqttc.connect(self._ip_broker, int(self._port_broker))\n # hostname = socket.gethostname()\n\n def send_data(self, item):\n start_time = time.time()\n time_data_change_period = random.randint(60, 3600)\n time_data_change = time.time()\n data_value = random.randint(0, 100)\n print('Change data value. Period {} Value {}'.format(time_data_change_period, data_value))\n while 1:\n next_time = time.time()\n if next_time - time_data_change >= time_data_change_period:\n time_data_change = next_time\n time_data_change_period = random.randint(60, 3600)\n data_value = random.randint(0, 100)\n print('Change data value. Period {} Value {}'.format(time_data_change_period, data_value))\n\n if item._platform_type == 'onem2m':\n # message = data_value\n data = {}\n data['value'] = str(data_value)\n data['timestamp'] = \"{0:.3f}\".format(time.time())\n data['num_of_sensor'] = str(self._num_of_sensor)\n message = json.dumps(data)\n else:\n message = '''\n <?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>\n <obj>\n <int val=\"{value}\" name=\"data\"/>\n </obj>\n '''.format(value=data_value)\n print(message)\n self._mqttc.publish(topic=item.get_topic(), payload=message)\n time.sleep(60 / item.get_frequent())\n print('Topic {} -- Data {}'.format(item.get_topic(), data_value))\n if self._is_increase_freq:\n if next_time - start_time >= 3600:\n start_time = next_time\n item.increase_frequent()\n\n # if self._is_increase_instance:\n # if next_time - start_time >= 3600:\n # start_time = next_time\n # item.increase_frequent()\n\n def register_sensor_with_ordinator(self):\n os.system(\n 'sensor_detail=\"$(/bin/hostname -i),$(hostname)\" && curl -F \"sensor_detail=${sensor_detail}\" -F \"defined_file=@openhab/demo.items\" ${CO_ORDINATOR_DOMAIN}/sensor/define')\n\n def execute(self, num_of_item_start):\n try:\n for item in self._items[num_of_item_start:num_of_item_start+5]:\n _thread.start_new_thread(self.send_data, (item,))\n # print(item.get_topic())\n except Exception as e:\n print(e)\n #\n # while self._bStop:\n # time.sleep(1)\n\n @property\n def is_increase_instance(self):\n return self._is_increase_instance\n\n @property\n def items(self):\n return self._items\n\n\ndef main(argv):\n engine = SimulatorEngine()\n start_time = time.time()\n item_start = 0\n engine._num_of_sensor = 5\n engine.execute(item_start)\n while 1:\n if item_start+10 <= len(engine.items):\n next_time = time.time()\n if engine.is_increase_instance:\n if next_time - start_time >= engine.test_time:\n start_time = next_time\n item_start += 5\n engine._num_of_sensor = item_start + 5\n engine.execute(item_start)\n\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n\n", "step-ids": [ 13, 17, 18, 19, 20 ] }
[ 13, 17, 18, 19, 20 ]
COPY_GOOGLE_DOC_KEY = '1CdafeVmmtNa_PMV99TapPHvLUVzYz0xkvHcpINQtQ6c' DEPLOY_SLUG = 'al-qassemi' NUM_SLIDES_AFTER_CONTENT = 2 # Configuration AUDIO = True VIDEO = False FILMSTRIP = False PROGRESS_BAR = False
normal
{ "blob_id": "f398b724fc28bc25ddb8baf492f34075db0c1f61", "index": 7703, "step-1": "<mask token>\n", "step-2": "COPY_GOOGLE_DOC_KEY = '1CdafeVmmtNa_PMV99TapPHvLUVzYz0xkvHcpINQtQ6c'\nDEPLOY_SLUG = 'al-qassemi'\nNUM_SLIDES_AFTER_CONTENT = 2\nAUDIO = True\nVIDEO = False\nFILMSTRIP = False\nPROGRESS_BAR = False\n", "step-3": "COPY_GOOGLE_DOC_KEY = '1CdafeVmmtNa_PMV99TapPHvLUVzYz0xkvHcpINQtQ6c'\nDEPLOY_SLUG = 'al-qassemi'\nNUM_SLIDES_AFTER_CONTENT = 2\n\n# Configuration\nAUDIO = True\nVIDEO = False\nFILMSTRIP = False\nPROGRESS_BAR = False", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
#-*- coding:utf-8 -*- ''' ''' from flask import Flask, jsonify app = Flask(__name__) app.debug = True from datetime import timedelta from flask import make_response, request, current_app, render_template from functools import update_wrapper import json from subprocess import * def crossdomain(origin=None, methods=None, headers=None, max_age=21600, attach_to_all=True, automatic_options=True): if methods is not None: methods = ', '.join(sorted(x.upper() for x in methods)) if headers is not None and not isinstance(headers, basestring): headers = ', '.join(x.upper() for x in headers) if not isinstance(origin, basestring): origin = ', '.join(origin) if isinstance(max_age, timedelta): max_age = max_age.total_seconds() def get_methods(): if methods is not None: return methods options_resp = current_app.make_default_options_response() return options_resp.headers['allow'] def decorator(f): def wrapped_function(*args, **kwargs): if automatic_options and request.method == 'OPTIONS': resp = current_app.make_default_options_response() else: resp = make_response(f(*args, **kwargs)) if not attach_to_all and request.method != 'OPTIONS': return resp h = resp.headers h['Access-Control-Allow-Origin'] = origin h['Access-Control-Allow-Methods'] = get_methods() h['Access-Control-Max-Age'] = str(max_age) h['Access-Control-Allow-Credentials'] = 'true' h['Access-Control-Allow-Headers'] = \ "Origin, X-Requested-With, Content-Type, Accept, Authorization" if headers is not None: h['Access-Control-Allow-Headers'] = headers return resp f.provide_automatic_options = False return update_wrapper(wrapped_function, f) return decorator def getBzResult(search_str): ans_list = get_search_res("bugzilla", "text", search_str) for i in ans_list: i['bug_id'] = i.pop('id') #raise Exception('xyz') return ans_list def getIkbResult(search_str): ans_list = get_search_res("ikb", "kb", search_str) for i in ans_list: i['kb_id'] = i.pop('id') return ans_list def get_search_res(index, doc_type, query): ans = {} search_dsl = '{"query":{"regexp":{"text":\".*%s.*\"}}}' %(query) es_url = 'http://cybertron.eng.vmware.com:9200/%s/%s/_search?pretty=1' %(index, doc_type) child = Popen(["curl", es_url, "-d", str(search_dsl).lower().encode('string-escape')], stdout=PIPE) json_res = child.communicate(None)[0] jres = json.loads(json_res) ans_list = [] for item in jres['hits']['hits']: cur = {} cur['id'] = item['_id'] cur['summary'] = item['_source']['summary'] ans_list.append(cur) #sorted to get the latest item #newlist = list(reversed(sorted(ans_list, key=lambda k: k['id']))) return ans_list @app.route("/regexSearch") @crossdomain(origin='*') def regexSearch(): res = dict() para = request.args data = para.get('data', '').strip() data = json.loads(data) results = list() for regexItem in data: bzResult = getBzResult(regexItem) ikbResult = getIkbResult(regexItem) results.append([regexItem, bzResult, ikbResult]) #raise Exception('xyz') res['res'] = 'success' res['data'] = render_template('search_result.html', results = results) return render_template('search_result.html', results = results) @app.route("/DefaultError") @crossdomain(origin='*') def defaultError(): return render_template('stop_sign.html') if __name__ == "__main__": app.run(host='0.0.0.0', port=5555)
normal
{ "blob_id": "70c78021a2544ea372545b037ed55298c26391d1", "index": 1182, "step-1": "<mask token>\n\n\ndef getIkbResult(search_str):\n ans_list = get_search_res('ikb', 'kb', search_str)\n for i in ans_list:\n i['kb_id'] = i.pop('id')\n return ans_list\n\n\ndef get_search_res(index, doc_type, query):\n ans = {}\n search_dsl = '{\"query\":{\"regexp\":{\"text\":\".*%s.*\"}}}' % query\n es_url = 'http://cybertron.eng.vmware.com:9200/%s/%s/_search?pretty=1' % (\n index, doc_type)\n child = Popen(['curl', es_url, '-d', str(search_dsl).lower().encode(\n 'string-escape')], stdout=PIPE)\n json_res = child.communicate(None)[0]\n jres = json.loads(json_res)\n ans_list = []\n for item in jres['hits']['hits']:\n cur = {}\n cur['id'] = item['_id']\n cur['summary'] = item['_source']['summary']\n ans_list.append(cur)\n return ans_list\n\n\[email protected]('/regexSearch')\n@crossdomain(origin='*')\ndef regexSearch():\n res = dict()\n para = request.args\n data = para.get('data', '').strip()\n data = json.loads(data)\n results = list()\n for regexItem in data:\n bzResult = getBzResult(regexItem)\n ikbResult = getIkbResult(regexItem)\n results.append([regexItem, bzResult, ikbResult])\n res['res'] = 'success'\n res['data'] = render_template('search_result.html', results=results)\n return render_template('search_result.html', results=results)\n\n\[email protected]('/DefaultError')\n@crossdomain(origin='*')\ndef defaultError():\n return render_template('stop_sign.html')\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef getBzResult(search_str):\n ans_list = get_search_res('bugzilla', 'text', search_str)\n for i in ans_list:\n i['bug_id'] = i.pop('id')\n return ans_list\n\n\ndef getIkbResult(search_str):\n ans_list = get_search_res('ikb', 'kb', search_str)\n for i in ans_list:\n i['kb_id'] = i.pop('id')\n return ans_list\n\n\ndef get_search_res(index, doc_type, query):\n ans = {}\n search_dsl = '{\"query\":{\"regexp\":{\"text\":\".*%s.*\"}}}' % query\n es_url = 'http://cybertron.eng.vmware.com:9200/%s/%s/_search?pretty=1' % (\n index, doc_type)\n child = Popen(['curl', es_url, '-d', str(search_dsl).lower().encode(\n 'string-escape')], stdout=PIPE)\n json_res = child.communicate(None)[0]\n jres = json.loads(json_res)\n ans_list = []\n for item in jres['hits']['hits']:\n cur = {}\n cur['id'] = item['_id']\n cur['summary'] = item['_source']['summary']\n ans_list.append(cur)\n return ans_list\n\n\[email protected]('/regexSearch')\n@crossdomain(origin='*')\ndef regexSearch():\n res = dict()\n para = request.args\n data = para.get('data', '').strip()\n data = json.loads(data)\n results = list()\n for regexItem in data:\n bzResult = getBzResult(regexItem)\n ikbResult = getIkbResult(regexItem)\n results.append([regexItem, bzResult, ikbResult])\n res['res'] = 'success'\n res['data'] = render_template('search_result.html', results=results)\n return render_template('search_result.html', results=results)\n\n\[email protected]('/DefaultError')\n@crossdomain(origin='*')\ndef defaultError():\n return render_template('stop_sign.html')\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef crossdomain(origin=None, methods=None, headers=None, max_age=21600,\n attach_to_all=True, automatic_options=True):\n if methods is not None:\n methods = ', '.join(sorted(x.upper() for x in methods))\n if headers is not None and not isinstance(headers, basestring):\n headers = ', '.join(x.upper() for x in headers)\n if not isinstance(origin, basestring):\n origin = ', '.join(origin)\n if isinstance(max_age, timedelta):\n max_age = max_age.total_seconds()\n\n def get_methods():\n if methods is not None:\n return methods\n options_resp = current_app.make_default_options_response()\n return options_resp.headers['allow']\n\n def decorator(f):\n\n def wrapped_function(*args, **kwargs):\n if automatic_options and request.method == 'OPTIONS':\n resp = current_app.make_default_options_response()\n else:\n resp = make_response(f(*args, **kwargs))\n if not attach_to_all and request.method != 'OPTIONS':\n return resp\n h = resp.headers\n h['Access-Control-Allow-Origin'] = origin\n h['Access-Control-Allow-Methods'] = get_methods()\n h['Access-Control-Max-Age'] = str(max_age)\n h['Access-Control-Allow-Credentials'] = 'true'\n h['Access-Control-Allow-Headers'\n ] = 'Origin, X-Requested-With, Content-Type, Accept, Authorization'\n if headers is not None:\n h['Access-Control-Allow-Headers'] = headers\n return resp\n f.provide_automatic_options = False\n return update_wrapper(wrapped_function, f)\n return decorator\n\n\ndef getBzResult(search_str):\n ans_list = get_search_res('bugzilla', 'text', search_str)\n for i in ans_list:\n i['bug_id'] = i.pop('id')\n return ans_list\n\n\ndef getIkbResult(search_str):\n ans_list = get_search_res('ikb', 'kb', search_str)\n for i in ans_list:\n i['kb_id'] = i.pop('id')\n return ans_list\n\n\ndef get_search_res(index, doc_type, query):\n ans = {}\n search_dsl = '{\"query\":{\"regexp\":{\"text\":\".*%s.*\"}}}' % query\n es_url = 'http://cybertron.eng.vmware.com:9200/%s/%s/_search?pretty=1' % (\n index, doc_type)\n child = Popen(['curl', es_url, '-d', str(search_dsl).lower().encode(\n 'string-escape')], stdout=PIPE)\n json_res = child.communicate(None)[0]\n jres = json.loads(json_res)\n ans_list = []\n for item in jres['hits']['hits']:\n cur = {}\n cur['id'] = item['_id']\n cur['summary'] = item['_source']['summary']\n ans_list.append(cur)\n return ans_list\n\n\[email protected]('/regexSearch')\n@crossdomain(origin='*')\ndef regexSearch():\n res = dict()\n para = request.args\n data = para.get('data', '').strip()\n data = json.loads(data)\n results = list()\n for regexItem in data:\n bzResult = getBzResult(regexItem)\n ikbResult = getIkbResult(regexItem)\n results.append([regexItem, bzResult, ikbResult])\n res['res'] = 'success'\n res['data'] = render_template('search_result.html', results=results)\n return render_template('search_result.html', results=results)\n\n\[email protected]('/DefaultError')\n@crossdomain(origin='*')\ndef defaultError():\n return render_template('stop_sign.html')\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5555)\n", "step-4": "<mask token>\napp = Flask(__name__)\napp.debug = True\n<mask token>\n\n\ndef crossdomain(origin=None, methods=None, headers=None, max_age=21600,\n attach_to_all=True, automatic_options=True):\n if methods is not None:\n methods = ', '.join(sorted(x.upper() for x in methods))\n if headers is not None and not isinstance(headers, basestring):\n headers = ', '.join(x.upper() for x in headers)\n if not isinstance(origin, basestring):\n origin = ', '.join(origin)\n if isinstance(max_age, timedelta):\n max_age = max_age.total_seconds()\n\n def get_methods():\n if methods is not None:\n return methods\n options_resp = current_app.make_default_options_response()\n return options_resp.headers['allow']\n\n def decorator(f):\n\n def wrapped_function(*args, **kwargs):\n if automatic_options and request.method == 'OPTIONS':\n resp = current_app.make_default_options_response()\n else:\n resp = make_response(f(*args, **kwargs))\n if not attach_to_all and request.method != 'OPTIONS':\n return resp\n h = resp.headers\n h['Access-Control-Allow-Origin'] = origin\n h['Access-Control-Allow-Methods'] = get_methods()\n h['Access-Control-Max-Age'] = str(max_age)\n h['Access-Control-Allow-Credentials'] = 'true'\n h['Access-Control-Allow-Headers'\n ] = 'Origin, X-Requested-With, Content-Type, Accept, Authorization'\n if headers is not None:\n h['Access-Control-Allow-Headers'] = headers\n return resp\n f.provide_automatic_options = False\n return update_wrapper(wrapped_function, f)\n return decorator\n\n\ndef getBzResult(search_str):\n ans_list = get_search_res('bugzilla', 'text', search_str)\n for i in ans_list:\n i['bug_id'] = i.pop('id')\n return ans_list\n\n\ndef getIkbResult(search_str):\n ans_list = get_search_res('ikb', 'kb', search_str)\n for i in ans_list:\n i['kb_id'] = i.pop('id')\n return ans_list\n\n\ndef get_search_res(index, doc_type, query):\n ans = {}\n search_dsl = '{\"query\":{\"regexp\":{\"text\":\".*%s.*\"}}}' % query\n es_url = 'http://cybertron.eng.vmware.com:9200/%s/%s/_search?pretty=1' % (\n index, doc_type)\n child = Popen(['curl', es_url, '-d', str(search_dsl).lower().encode(\n 'string-escape')], stdout=PIPE)\n json_res = child.communicate(None)[0]\n jres = json.loads(json_res)\n ans_list = []\n for item in jres['hits']['hits']:\n cur = {}\n cur['id'] = item['_id']\n cur['summary'] = item['_source']['summary']\n ans_list.append(cur)\n return ans_list\n\n\[email protected]('/regexSearch')\n@crossdomain(origin='*')\ndef regexSearch():\n res = dict()\n para = request.args\n data = para.get('data', '').strip()\n data = json.loads(data)\n results = list()\n for regexItem in data:\n bzResult = getBzResult(regexItem)\n ikbResult = getIkbResult(regexItem)\n results.append([regexItem, bzResult, ikbResult])\n res['res'] = 'success'\n res['data'] = render_template('search_result.html', results=results)\n return render_template('search_result.html', results=results)\n\n\[email protected]('/DefaultError')\n@crossdomain(origin='*')\ndef defaultError():\n return render_template('stop_sign.html')\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5555)\n", "step-5": "#-*- coding:utf-8 -*-\n'''\n'''\nfrom flask import Flask, jsonify\napp = Flask(__name__)\napp.debug = True\nfrom datetime import timedelta\nfrom flask import make_response, request, current_app, render_template\nfrom functools import update_wrapper\nimport json\n\nfrom subprocess import * \n\ndef crossdomain(origin=None, methods=None, headers=None,\n max_age=21600, attach_to_all=True,\n automatic_options=True):\n if methods is not None:\n methods = ', '.join(sorted(x.upper() for x in methods))\n if headers is not None and not isinstance(headers, basestring):\n headers = ', '.join(x.upper() for x in headers)\n if not isinstance(origin, basestring):\n origin = ', '.join(origin)\n if isinstance(max_age, timedelta):\n max_age = max_age.total_seconds()\n\n def get_methods():\n if methods is not None:\n return methods\n\n options_resp = current_app.make_default_options_response()\n return options_resp.headers['allow']\n\n def decorator(f):\n def wrapped_function(*args, **kwargs):\n if automatic_options and request.method == 'OPTIONS':\n resp = current_app.make_default_options_response()\n else:\n resp = make_response(f(*args, **kwargs))\n if not attach_to_all and request.method != 'OPTIONS':\n return resp\n\n h = resp.headers\n h['Access-Control-Allow-Origin'] = origin\n h['Access-Control-Allow-Methods'] = get_methods()\n h['Access-Control-Max-Age'] = str(max_age)\n h['Access-Control-Allow-Credentials'] = 'true'\n h['Access-Control-Allow-Headers'] = \\\n \"Origin, X-Requested-With, Content-Type, Accept, Authorization\"\n if headers is not None:\n h['Access-Control-Allow-Headers'] = headers\n return resp\n\n f.provide_automatic_options = False\n return update_wrapper(wrapped_function, f)\n return decorator\n\ndef getBzResult(search_str):\n ans_list = get_search_res(\"bugzilla\", \"text\", search_str)\n for i in ans_list:\n i['bug_id'] = i.pop('id')\n #raise Exception('xyz')\n return ans_list\n\ndef getIkbResult(search_str):\n ans_list = get_search_res(\"ikb\", \"kb\", search_str)\n for i in ans_list:\n i['kb_id'] = i.pop('id')\n return ans_list\n\ndef get_search_res(index, doc_type, query):\n ans = {}\n search_dsl = '{\"query\":{\"regexp\":{\"text\":\\\".*%s.*\\\"}}}' %(query)\n es_url = 'http://cybertron.eng.vmware.com:9200/%s/%s/_search?pretty=1' %(index, doc_type)\n child = Popen([\"curl\", es_url, \"-d\", str(search_dsl).lower().encode('string-escape')], stdout=PIPE) \n json_res = child.communicate(None)[0]\n jres = json.loads(json_res)\n ans_list = []\n for item in jres['hits']['hits']:\n cur = {} \n cur['id'] = item['_id']\n cur['summary'] = item['_source']['summary']\n ans_list.append(cur)\n #sorted to get the latest item\n #newlist = list(reversed(sorted(ans_list, key=lambda k: k['id'])))\n \n return ans_list\n\[email protected](\"/regexSearch\")\n@crossdomain(origin='*')\ndef regexSearch():\n res = dict()\n para = request.args\n data = para.get('data', '').strip()\n data = json.loads(data)\n results = list()\n for regexItem in data:\n bzResult = getBzResult(regexItem)\n ikbResult = getIkbResult(regexItem)\n results.append([regexItem, bzResult, ikbResult])\n #raise Exception('xyz')\n res['res'] = 'success'\n res['data'] = render_template('search_result.html', results = results)\n\n return render_template('search_result.html', results = results)\n\[email protected](\"/DefaultError\")\n@crossdomain(origin='*')\ndef defaultError():\n return render_template('stop_sign.html')\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=5555)\n", "step-ids": [ 4, 5, 7, 8, 10 ] }
[ 4, 5, 7, 8, 10 ]
from datetime import date import config import datetime import numpy import pandas import data_sources from data_sources import POPULATION, convert_to_ccaa_iso import material_line_chart import ministry_datasources HEADER = '''<html> <head> <title>{}</title> <script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script> <script type="text/javascript"> ''' HEADER2 = ''' google.charts.load('current', {'packages':['line', 'corechart', 'controls']}); ''' DESCRIPTIONS_CCAA = { 'incidencia_acumulada': 'Número de casos informados en los 15 días anteriores por cien mil habitantes. Datos obtenidos de los informes del Carlos III.', 'hospitalized': 'Número medio de hospitalizaciones por cien mil habitantes (media de 7 días). Datos obtenidos a partir de las cifras acumuladas que aparecen en los informes diarios del ministerio.', 'deceased': 'Número medio de fallecidos por cien mil habitantes (media de 7 días). Datos obtenidos a partir del excel con datos de fallecidos diarios del ministerio.', } DESCRIPTIONS_SPA = { 'incidencia_acumulada': 'Número de casos informados en los 15 días anteriores por cien mil habitantes. Datos obtenidos de los informes del Carlos III.', 'hospitalized': 'Número medio de hospitalizaciones (media de 7 días). Datos obtenidos a partir de las cifras acumuladas que aparecen en los informes diarios del ministerio.', 'deceased': 'Número medio de fallecidos (media de 7 días). Datos obtenidos a partir del excel con datos de fallecidos diarios del ministerio.', } DESCRIPTIONS = {True: DESCRIPTIONS_SPA, False: DESCRIPTIONS_CCAA} def calc_accumulated_indicende_per_ccaa(report, num_days=15): ccaas = data_sources.get_ccaas_in_dset(report) dframe = report['dframe'] num_cases = dframe['num_casos'] ccaa_column = data_sources.get_ccaa_column_in_index(num_cases.index) index = num_cases.index.to_frame(index=False) time_delta = numpy.timedelta64(num_days, 'D') accumulated_cases_by_ccaa = {} for ccaa in ccaas: mask = index[ccaa_column] == ccaa mask = mask.values num_cases_for_this_ccaa = num_cases[mask] this_ccaa_index = num_cases_for_this_ccaa.index.to_frame(index=False) this_ccaa_dates = this_ccaa_index['fecha'] num_accumulated_cases = [] valid_dates = [] for date in this_ccaa_dates: date0 = date - time_delta mask = numpy.logical_and(this_ccaa_dates > date0, this_ccaa_dates <= date) mask = mask.values if numpy.sum(mask) < num_days: continue num_accumulated_cases.append(numpy.sum(num_cases_for_this_ccaa[mask])) valid_dates.append(date) num_accumulated_cases = pandas.Series(num_accumulated_cases, index=valid_dates) num_accumulated_cases = num_accumulated_cases / data_sources.POPULATION[ccaa] * 1e5 accumulated_cases_by_ccaa[ccaa] = num_accumulated_cases return accumulated_cases_by_ccaa def _create_js_chart(dframe, date_range, js_function_name, div_id, title, width, height): table = [] ccaas = sorted(dframe.index) dates = list(dframe.columns) if date_range is not None: dates = [date for date in dates if date > date_range[0] and date <= date_range[1]] columns = [('date', 'fecha')] columns.extend([('number', data_sources.convert_to_ccaa_name(ccaa)) for ccaa in ccaas]) for date in dates: row = [date.date()] for ccaa in ccaas: value = dframe.loc[ccaa, date] row.append(value) table.append(row) js_function_name = js_function_name html = material_line_chart.create_chart_js(js_function_name, div_id, title, columns, table, width=width, height=height) return html def _write_table_from_series(series): html = '<table>' for index, value in zip(series.index, series.values): html += f'<tr><td>{index}</td><td>{value}</td></tr>\n' html += '</table>' return html def is_desired_ccaa(ccaa, desired_ccaas): return desired_ccaas is None or data_sources.convert_to_ccaa_iso(ccaa) in desired_ccaas def _create_table_for_chart_from_dict(dict_data, desired_ccaas): one_data = list(dict_data.values())[0] ccaas = sorted(dict_data.keys()) ccaas = [ccaa for ccaa in ccaas if is_desired_ccaa(ccaa, desired_ccaas)] dates = list(one_data.index) table = [] for date in dates: row = [date.date()] for ccaa in ccaas: row.append(dict_data[ccaa][date]) table.append(row) return table, ccaas, dates def _create_accumulate_indicence_table_for_spa_chart_from_report(report, num_days): dframe = report['dframe'] time_delta = numpy.timedelta64(num_days, 'D') num_cases = dframe.groupby(level=1).sum().loc[:, 'num_casos'] tot_pop = sum(data_sources.POPULATION.values()) dates = numpy.array(num_cases.index) num_accumulated_cases = [] valid_dates = [] for date in dates: date0 = date - time_delta mask = numpy.logical_and(dates > date0, dates <= date) if numpy.sum(mask) < num_days: continue num_accumulated_cases.append(numpy.sum(num_cases[mask]) / tot_pop * 1e5) date = datetime.datetime.fromtimestamp(date.astype('O') / 1e9) valid_dates.append(date) table = [(date.date(), cases) for date, cases in zip(valid_dates, num_accumulated_cases)] dates = valid_dates return table, dates def _create_table_for_chart_from_dframe(dframe, desired_ccaas): ccaas = sorted(dframe.index) ccaas = [ccaa for ccaa in ccaas if is_desired_ccaa(ccaa, desired_ccaas)] dates = list(dframe.columns) table = [] for date in dates: row = [date.date()] for ccaa in ccaas: row.append(dframe.loc[ccaa, date]) table.append(row) return table, ccaas, dates def _create_table_for_chart_from_series(series): table = [(date.date(), value) for date, value in zip(series.index, series.values)] return table def write_html_report(out_path, date_range=None, desired_ccaas=None, spa_report=False): if spa_report and desired_ccaas: raise ValueError('choose one, either spa or ccaa report') if desired_ccaas and len(desired_ccaas) == 1: only_one_ccaa = True ccaa_iso = convert_to_ccaa_iso(desired_ccaas[0]) else: only_one_ccaa = False ccaa_info = data_sources.get_sorted_downloaded_ccaa_info() report = ccaa_info[-1] accumulaed_incidence = calc_accumulated_indicende_per_ccaa(report) deaths = sorted(ministry_datasources.read_deceased_excel_ministry_files(), key=lambda x: x['max_date'])[-1] if spa_report: accumulated_incidence_table, dates = _create_accumulate_indicence_table_for_spa_chart_from_report(report, 15) else: accumulated_incidence_table, ccaas, dates = _create_table_for_chart_from_dict(accumulaed_incidence, desired_ccaas) title = 'Resumen situación Covid-19' if spa_report: title += ' España' elif only_one_ccaa: title += ': ' + data_sources.convert_to_ccaa_name(ccaa_iso) else: title += ' por comunidad autónoma' html = HEADER.format(title) html += HEADER2 js_function_name = 'drawAccumulatedCasesIncidence' columns = [('date', 'fecha')] if spa_report: columns.extend([('number', 'España')]) else: columns.extend([('number', data_sources.convert_to_ccaa_name(ccaa)) for ccaa in ccaas if is_desired_ccaa(ccaa, desired_ccaas)]) title = 'Incidencia acumulada por 100.000 hab. (15 días)' width =900 height = 800 rangeslider_height = 50 js_sizes = {'dashboard': {'height': height + rangeslider_height, 'width': width}, 'chart': {'height': height, 'width': width}, 'rangeslider': {'height': rangeslider_height, 'width': 600}, } div_sizes = {} for html_element in js_sizes: div_sizes[html_element] = {} div_sizes[html_element]['height'] = f"{js_sizes[html_element]['height']}px" div_sizes[html_element]['width'] = f"{js_sizes[html_element]['width']}px" slider_config = {'column_controlled': 'fecha', 'min_value': dates[0], 'max_value': dates[-1], 'min_init_value': date_range[0], 'max_init_value': date_range[-1]} div_ids_accumulated_cases = {'dashboard': 'accumulated_cases_dashboard', 'chart': 'accumulated_cases_chart', 'rangeslider': 'accumulated_cases_rangeslider'} html += material_line_chart.create_chart_js_with_slider(js_function_name, slider_config, div_ids_accumulated_cases, title, columns, accumulated_incidence_table, sizes=js_sizes) js_function_names = {'hospitalized': 'drawHospitalized', 'icu': 'drawICU', 'deceased': 'drawDeceased'} div_ids = {'hospitalized': 'hospitalized_chart', 'icu': 'icu_chart', 'deceased': 'deceased_chart' } titles = {'hospitalized': 'Num. hospitalizaciones por 100.000 hab. (media 7 días)', 'icu': 'Num. ingresos UCI por 100.000 hab. (media 7 días)', 'deceased': 'Num. fallecidos por 100.000 hab. (media 7 días)' } if False: if spa_report: rolling_means = ministry_datasources.get_ministry_rolling_mean_spa() titles = {'hospitalized': 'Num. hospitalizaciones. (media 7 días)', 'icu': 'Num. ingresos UCI. (media 7 días)', 'deceased': 'Num. fallecidos. (media 7 días)' } else: rolling_means = ministry_datasources.get_ministry_rolling_mean() titles = {'hospitalized': 'Num. hospitalizaciones por 100.000 hab. (media 7 días)', 'icu': 'Num. ingresos UCI por 100.000 hab. (media 7 días)', 'deceased': 'Num. fallecidos por 100.000 hab. (media 7 días)' } div_ids_hospitalized = {'dashboard': 'hospitalized_dashboard', 'chart': 'hospitalized_chart', 'rangeslider': 'hospitalized_rangeslider'} div_ids_deceased = {'dashboard': 'deceased_dashboard', 'chart': 'deceased_chart', 'rangeslider': 'deceased_rangeslider'} div_ids = {'hospitalized': div_ids_hospitalized, 'deceased': div_ids_deceased, } if False: dframe = rolling_means['hospitalized'] if spa_report: columns = [('date', 'fecha'), ('number', 'España')] table = _create_table_for_chart_from_series(dframe) else: populations = [data_sources.get_population(ccaa) for ccaa in dframe.index] dframe = dframe.divide(populations, axis=0) * 1e5 table, ccaas, _ = _create_table_for_chart_from_dframe(dframe, desired_ccaas) columns = [('date', 'fecha')] columns.extend([('number', data_sources.convert_to_ccaa_name(ccaa)) for ccaa in ccaas]) key = 'hospitalized' hospitalized_slider_config = {'column_controlled': 'fecha', 'min_value': dates[0], 'max_value': dates[-1], 'min_init_value': date_range[0], 'max_init_value': datetime.datetime.now()} html += material_line_chart.create_chart_js_with_slider(js_function_names[key], hospitalized_slider_config, div_ids[key], title=titles[key], columns=columns, data_table=table, sizes=js_sizes) num_days = 7 key = 'deceased' deaths_dframe = deaths['dframe'] if spa_report: spa_deaths = deaths_dframe.sum(axis=0) deaths_rolling_mean = spa_deaths.rolling(num_days, center=True, min_periods=num_days).mean().dropna() table = _create_table_for_chart_from_series(deaths_rolling_mean) columns = [('date', 'fecha'), ('number', 'España')] else: deaths_rolling_mean = deaths_dframe.rolling(num_days, center=True, min_periods=num_days, axis=1).mean() deaths_rolling_mean = deaths_rolling_mean.dropna(axis=1, how='all') populations = [data_sources.get_population(ccaa) for ccaa in deaths_rolling_mean.index] deaths_rolling_mean = deaths_rolling_mean.divide(populations, axis=0) * 1e5 table, ccaas, _ = _create_table_for_chart_from_dframe(deaths_rolling_mean, desired_ccaas) columns = [('date', 'fecha')] columns.extend([('number', data_sources.convert_to_ccaa_name(ccaa)) for ccaa in ccaas]) html += material_line_chart.create_chart_js_with_slider(js_function_names[key], slider_config, div_ids[key], title=titles[key], columns=columns, data_table=table, sizes=js_sizes) html += ' </script>\n </head>\n <body>\n' today = datetime.datetime.now() html += '<p><a href="../">Menu</a></p>' html += f'<p>Informe generado el día: {today.day}-{today.month}-{today.year}</p>' html += f'<p>Este informe está generado para uso personal por <a href="https://twitter.com/jblanca42">@jblanca42</a>, pero lo sube a la web por si le pudiese ser de utilidad a alguien más.</p>' html += f'<p>El código utilizado para generarlo se encuentra en <a href="https://github.com/JoseBlanca/seguimiento_covid">github</a>, si encuentras algún fallo o quieres mejorar algo envía un mensaje o haz un pull request.</p>' if desired_ccaas: index = [ccaa for ccaa in deaths['dframe'].index if is_desired_ccaa(ccaa, desired_ccaas)] tot_deaths = deaths['dframe'].loc[index, :].values.sum() else: tot_deaths = deaths['dframe'].values.sum() + deaths['unassinged_deaths'] html += f'<p>Número total de fallecidos: {tot_deaths}</p>' if spa_report: death_rate = round(sum(data_sources.POPULATION.values()) / tot_deaths) html += f'<p>Una de cada {death_rate} personas han fallecido.</p>' elif desired_ccaas and len(desired_ccaas) == 1: death_rate = round(data_sources.get_population(desired_ccaas[0]) / tot_deaths) html += f'<p>Una de cada {death_rate} personas han fallecido en esta comunidad autónoma.</p>' else: deaths_per_ccaa = deaths['dframe'].sum(axis=1) populations = [data_sources.get_population(ccaa) for ccaa in deaths_per_ccaa.index] populations = pandas.Series(populations, index=deaths_per_ccaa.index) death_rate = (populations / deaths_per_ccaa).round().sort_values().astype(int) html += '<p>¿Una de cada cuántas personas han fallecido por comunidad autónoma?</p>' html += _write_table_from_series(death_rate) if False: for key in ['hospitalized']: html += f"<p>{DESCRIPTIONS[spa_report][key]}</p>\n" html += material_line_chart.create_chart_with_slider_divs(div_ids[key], sizes=div_sizes) html += f"<p>{DESCRIPTIONS[spa_report]['incidencia_acumulada']}</p>\n" html += material_line_chart.create_chart_with_slider_divs(div_ids_accumulated_cases, sizes=div_sizes) for key in ['deceased']: html += f"<p>{DESCRIPTIONS[spa_report][key]}</p>\n" html += material_line_chart.create_chart_with_slider_divs(div_ids[key], sizes=div_sizes) html += ' </body>\n</html>' out_path.open('wt').write(html) if __name__ == '__main__': ten_days_ago = datetime.datetime.now() - datetime.timedelta(days=10) forty_days_ago = datetime.datetime.now() - datetime.timedelta(days=40) first_date = datetime.datetime(2020, 9, 1) out_dir = config.HTML_REPORTS_DIR out_dir.mkdir(exist_ok=True) out_path = out_dir / 'situacion_covid_por_ca.html' write_html_report(out_path, date_range=[forty_days_ago, ten_days_ago])
normal
{ "blob_id": "4c5b3042a785342d6ef06fdc882e0dcf91a787c3", "index": 7816, "step-1": "<mask token>\n\n\ndef calc_accumulated_indicende_per_ccaa(report, num_days=15):\n ccaas = data_sources.get_ccaas_in_dset(report)\n dframe = report['dframe']\n num_cases = dframe['num_casos']\n ccaa_column = data_sources.get_ccaa_column_in_index(num_cases.index)\n index = num_cases.index.to_frame(index=False)\n time_delta = numpy.timedelta64(num_days, 'D')\n accumulated_cases_by_ccaa = {}\n for ccaa in ccaas:\n mask = index[ccaa_column] == ccaa\n mask = mask.values\n num_cases_for_this_ccaa = num_cases[mask]\n this_ccaa_index = num_cases_for_this_ccaa.index.to_frame(index=False)\n this_ccaa_dates = this_ccaa_index['fecha']\n num_accumulated_cases = []\n valid_dates = []\n for date in this_ccaa_dates:\n date0 = date - time_delta\n mask = numpy.logical_and(this_ccaa_dates > date0, \n this_ccaa_dates <= date)\n mask = mask.values\n if numpy.sum(mask) < num_days:\n continue\n num_accumulated_cases.append(numpy.sum(num_cases_for_this_ccaa[\n mask]))\n valid_dates.append(date)\n num_accumulated_cases = pandas.Series(num_accumulated_cases, index=\n valid_dates)\n num_accumulated_cases = (num_accumulated_cases / data_sources.\n POPULATION[ccaa] * 100000.0)\n accumulated_cases_by_ccaa[ccaa] = num_accumulated_cases\n return accumulated_cases_by_ccaa\n\n\n<mask token>\n\n\ndef is_desired_ccaa(ccaa, desired_ccaas):\n return desired_ccaas is None or data_sources.convert_to_ccaa_iso(ccaa\n ) in desired_ccaas\n\n\n<mask token>\n\n\ndef _create_table_for_chart_from_dframe(dframe, desired_ccaas):\n ccaas = sorted(dframe.index)\n ccaas = [ccaa for ccaa in ccaas if is_desired_ccaa(ccaa, desired_ccaas)]\n dates = list(dframe.columns)\n table = []\n for date in dates:\n row = [date.date()]\n for ccaa in ccaas:\n row.append(dframe.loc[ccaa, date])\n table.append(row)\n return table, ccaas, dates\n\n\ndef _create_table_for_chart_from_series(series):\n table = [(date.date(), value) for date, value in zip(series.index,\n series.values)]\n return table\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef calc_accumulated_indicende_per_ccaa(report, num_days=15):\n ccaas = data_sources.get_ccaas_in_dset(report)\n dframe = report['dframe']\n num_cases = dframe['num_casos']\n ccaa_column = data_sources.get_ccaa_column_in_index(num_cases.index)\n index = num_cases.index.to_frame(index=False)\n time_delta = numpy.timedelta64(num_days, 'D')\n accumulated_cases_by_ccaa = {}\n for ccaa in ccaas:\n mask = index[ccaa_column] == ccaa\n mask = mask.values\n num_cases_for_this_ccaa = num_cases[mask]\n this_ccaa_index = num_cases_for_this_ccaa.index.to_frame(index=False)\n this_ccaa_dates = this_ccaa_index['fecha']\n num_accumulated_cases = []\n valid_dates = []\n for date in this_ccaa_dates:\n date0 = date - time_delta\n mask = numpy.logical_and(this_ccaa_dates > date0, \n this_ccaa_dates <= date)\n mask = mask.values\n if numpy.sum(mask) < num_days:\n continue\n num_accumulated_cases.append(numpy.sum(num_cases_for_this_ccaa[\n mask]))\n valid_dates.append(date)\n num_accumulated_cases = pandas.Series(num_accumulated_cases, index=\n valid_dates)\n num_accumulated_cases = (num_accumulated_cases / data_sources.\n POPULATION[ccaa] * 100000.0)\n accumulated_cases_by_ccaa[ccaa] = num_accumulated_cases\n return accumulated_cases_by_ccaa\n\n\ndef _create_js_chart(dframe, date_range, js_function_name, div_id, title,\n width, height):\n table = []\n ccaas = sorted(dframe.index)\n dates = list(dframe.columns)\n if date_range is not None:\n dates = [date for date in dates if date > date_range[0] and date <=\n date_range[1]]\n columns = [('date', 'fecha')]\n columns.extend([('number', data_sources.convert_to_ccaa_name(ccaa)) for\n ccaa in ccaas])\n for date in dates:\n row = [date.date()]\n for ccaa in ccaas:\n value = dframe.loc[ccaa, date]\n row.append(value)\n table.append(row)\n js_function_name = js_function_name\n html = material_line_chart.create_chart_js(js_function_name, div_id,\n title, columns, table, width=width, height=height)\n return html\n\n\ndef _write_table_from_series(series):\n html = '<table>'\n for index, value in zip(series.index, series.values):\n html += f'<tr><td>{index}</td><td>{value}</td></tr>\\n'\n html += '</table>'\n return html\n\n\ndef is_desired_ccaa(ccaa, desired_ccaas):\n return desired_ccaas is None or data_sources.convert_to_ccaa_iso(ccaa\n ) in desired_ccaas\n\n\ndef _create_table_for_chart_from_dict(dict_data, desired_ccaas):\n one_data = list(dict_data.values())[0]\n ccaas = sorted(dict_data.keys())\n ccaas = [ccaa for ccaa in ccaas if is_desired_ccaa(ccaa, desired_ccaas)]\n dates = list(one_data.index)\n table = []\n for date in dates:\n row = [date.date()]\n for ccaa in ccaas:\n row.append(dict_data[ccaa][date])\n table.append(row)\n return table, ccaas, dates\n\n\ndef _create_accumulate_indicence_table_for_spa_chart_from_report(report,\n num_days):\n dframe = report['dframe']\n time_delta = numpy.timedelta64(num_days, 'D')\n num_cases = dframe.groupby(level=1).sum().loc[:, 'num_casos']\n tot_pop = sum(data_sources.POPULATION.values())\n dates = numpy.array(num_cases.index)\n num_accumulated_cases = []\n valid_dates = []\n for date in dates:\n date0 = date - time_delta\n mask = numpy.logical_and(dates > date0, dates <= date)\n if numpy.sum(mask) < num_days:\n continue\n num_accumulated_cases.append(numpy.sum(num_cases[mask]) / tot_pop *\n 100000.0)\n date = datetime.datetime.fromtimestamp(date.astype('O') / 1000000000.0)\n valid_dates.append(date)\n table = [(date.date(), cases) for date, cases in zip(valid_dates,\n num_accumulated_cases)]\n dates = valid_dates\n return table, dates\n\n\ndef _create_table_for_chart_from_dframe(dframe, desired_ccaas):\n ccaas = sorted(dframe.index)\n ccaas = [ccaa for ccaa in ccaas if is_desired_ccaa(ccaa, desired_ccaas)]\n dates = list(dframe.columns)\n table = []\n for date in dates:\n row = [date.date()]\n for ccaa in ccaas:\n row.append(dframe.loc[ccaa, date])\n table.append(row)\n return table, ccaas, dates\n\n\ndef _create_table_for_chart_from_series(series):\n table = [(date.date(), value) for date, value in zip(series.index,\n series.values)]\n return table\n\n\ndef write_html_report(out_path, date_range=None, desired_ccaas=None,\n spa_report=False):\n if spa_report and desired_ccaas:\n raise ValueError('choose one, either spa or ccaa report')\n if desired_ccaas and len(desired_ccaas) == 1:\n only_one_ccaa = True\n ccaa_iso = convert_to_ccaa_iso(desired_ccaas[0])\n else:\n only_one_ccaa = False\n ccaa_info = data_sources.get_sorted_downloaded_ccaa_info()\n report = ccaa_info[-1]\n accumulaed_incidence = calc_accumulated_indicende_per_ccaa(report)\n deaths = sorted(ministry_datasources.read_deceased_excel_ministry_files\n (), key=lambda x: x['max_date'])[-1]\n if spa_report:\n accumulated_incidence_table, dates = (\n _create_accumulate_indicence_table_for_spa_chart_from_report(\n report, 15))\n else:\n accumulated_incidence_table, ccaas, dates = (\n _create_table_for_chart_from_dict(accumulaed_incidence,\n desired_ccaas))\n title = 'Resumen situación Covid-19'\n if spa_report:\n title += ' España'\n elif only_one_ccaa:\n title += ': ' + data_sources.convert_to_ccaa_name(ccaa_iso)\n else:\n title += ' por comunidad autónoma'\n html = HEADER.format(title)\n html += HEADER2\n js_function_name = 'drawAccumulatedCasesIncidence'\n columns = [('date', 'fecha')]\n if spa_report:\n columns.extend([('number', 'España')])\n else:\n columns.extend([('number', data_sources.convert_to_ccaa_name(ccaa)) for\n ccaa in ccaas if is_desired_ccaa(ccaa, desired_ccaas)])\n title = 'Incidencia acumulada por 100.000 hab. (15 días)'\n width = 900\n height = 800\n rangeslider_height = 50\n js_sizes = {'dashboard': {'height': height + rangeslider_height,\n 'width': width}, 'chart': {'height': height, 'width': width},\n 'rangeslider': {'height': rangeslider_height, 'width': 600}}\n div_sizes = {}\n for html_element in js_sizes:\n div_sizes[html_element] = {}\n div_sizes[html_element]['height'\n ] = f\"{js_sizes[html_element]['height']}px\"\n div_sizes[html_element]['width'\n ] = f\"{js_sizes[html_element]['width']}px\"\n slider_config = {'column_controlled': 'fecha', 'min_value': dates[0],\n 'max_value': dates[-1], 'min_init_value': date_range[0],\n 'max_init_value': date_range[-1]}\n div_ids_accumulated_cases = {'dashboard': 'accumulated_cases_dashboard',\n 'chart': 'accumulated_cases_chart', 'rangeslider':\n 'accumulated_cases_rangeslider'}\n html += material_line_chart.create_chart_js_with_slider(js_function_name,\n slider_config, div_ids_accumulated_cases, title, columns,\n accumulated_incidence_table, sizes=js_sizes)\n js_function_names = {'hospitalized': 'drawHospitalized', 'icu':\n 'drawICU', 'deceased': 'drawDeceased'}\n div_ids = {'hospitalized': 'hospitalized_chart', 'icu': 'icu_chart',\n 'deceased': 'deceased_chart'}\n titles = {'hospitalized':\n 'Num. hospitalizaciones por 100.000 hab. (media 7 días)', 'icu':\n 'Num. ingresos UCI por 100.000 hab. (media 7 días)', 'deceased':\n 'Num. fallecidos por 100.000 hab. (media 7 días)'}\n if False:\n if spa_report:\n rolling_means = ministry_datasources.get_ministry_rolling_mean_spa(\n )\n titles = {'hospitalized':\n 'Num. hospitalizaciones. (media 7 días)', 'icu':\n 'Num. ingresos UCI. (media 7 días)', 'deceased':\n 'Num. fallecidos. (media 7 días)'}\n else:\n rolling_means = ministry_datasources.get_ministry_rolling_mean()\n titles = {'hospitalized':\n 'Num. hospitalizaciones por 100.000 hab. (media 7 días)',\n 'icu': 'Num. ingresos UCI por 100.000 hab. (media 7 días)',\n 'deceased': 'Num. fallecidos por 100.000 hab. (media 7 días)'}\n div_ids_hospitalized = {'dashboard': 'hospitalized_dashboard', 'chart':\n 'hospitalized_chart', 'rangeslider': 'hospitalized_rangeslider'}\n div_ids_deceased = {'dashboard': 'deceased_dashboard', 'chart':\n 'deceased_chart', 'rangeslider': 'deceased_rangeslider'}\n div_ids = {'hospitalized': div_ids_hospitalized, 'deceased':\n div_ids_deceased}\n if False:\n dframe = rolling_means['hospitalized']\n if spa_report:\n columns = [('date', 'fecha'), ('number', 'España')]\n table = _create_table_for_chart_from_series(dframe)\n else:\n populations = [data_sources.get_population(ccaa) for ccaa in\n dframe.index]\n dframe = dframe.divide(populations, axis=0) * 100000.0\n table, ccaas, _ = _create_table_for_chart_from_dframe(dframe,\n desired_ccaas)\n columns = [('date', 'fecha')]\n columns.extend([('number', data_sources.convert_to_ccaa_name(\n ccaa)) for ccaa in ccaas])\n key = 'hospitalized'\n hospitalized_slider_config = {'column_controlled': 'fecha',\n 'min_value': dates[0], 'max_value': dates[-1], 'min_init_value':\n date_range[0], 'max_init_value': datetime.datetime.now()}\n html += material_line_chart.create_chart_js_with_slider(\n js_function_names[key], hospitalized_slider_config, div_ids[key\n ], title=titles[key], columns=columns, data_table=table, sizes=\n js_sizes)\n num_days = 7\n key = 'deceased'\n deaths_dframe = deaths['dframe']\n if spa_report:\n spa_deaths = deaths_dframe.sum(axis=0)\n deaths_rolling_mean = spa_deaths.rolling(num_days, center=True,\n min_periods=num_days).mean().dropna()\n table = _create_table_for_chart_from_series(deaths_rolling_mean)\n columns = [('date', 'fecha'), ('number', 'España')]\n else:\n deaths_rolling_mean = deaths_dframe.rolling(num_days, center=True,\n min_periods=num_days, axis=1).mean()\n deaths_rolling_mean = deaths_rolling_mean.dropna(axis=1, how='all')\n populations = [data_sources.get_population(ccaa) for ccaa in\n deaths_rolling_mean.index]\n deaths_rolling_mean = deaths_rolling_mean.divide(populations, axis=0\n ) * 100000.0\n table, ccaas, _ = _create_table_for_chart_from_dframe(\n deaths_rolling_mean, desired_ccaas)\n columns = [('date', 'fecha')]\n columns.extend([('number', data_sources.convert_to_ccaa_name(ccaa)) for\n ccaa in ccaas])\n html += material_line_chart.create_chart_js_with_slider(js_function_names\n [key], slider_config, div_ids[key], title=titles[key], columns=\n columns, data_table=table, sizes=js_sizes)\n html += ' </script>\\n </head>\\n <body>\\n'\n today = datetime.datetime.now()\n html += '<p><a href=\"../\">Menu</a></p>'\n html += (\n f'<p>Informe generado el día: {today.day}-{today.month}-{today.year}</p>'\n )\n html += (\n f'<p>Este informe está generado para uso personal por <a href=\"https://twitter.com/jblanca42\">@jblanca42</a>, pero lo sube a la web por si le pudiese ser de utilidad a alguien más.</p>'\n )\n html += (\n f'<p>El código utilizado para generarlo se encuentra en <a href=\"https://github.com/JoseBlanca/seguimiento_covid\">github</a>, si encuentras algún fallo o quieres mejorar algo envía un mensaje o haz un pull request.</p>'\n )\n if desired_ccaas:\n index = [ccaa for ccaa in deaths['dframe'].index if is_desired_ccaa\n (ccaa, desired_ccaas)]\n tot_deaths = deaths['dframe'].loc[index, :].values.sum()\n else:\n tot_deaths = deaths['dframe'].values.sum() + deaths['unassinged_deaths'\n ]\n html += f'<p>Número total de fallecidos: {tot_deaths}</p>'\n if spa_report:\n death_rate = round(sum(data_sources.POPULATION.values()) / tot_deaths)\n html += f'<p>Una de cada {death_rate} personas han fallecido.</p>'\n elif desired_ccaas and len(desired_ccaas) == 1:\n death_rate = round(data_sources.get_population(desired_ccaas[0]) /\n tot_deaths)\n html += (\n f'<p>Una de cada {death_rate} personas han fallecido en esta comunidad autónoma.</p>'\n )\n else:\n deaths_per_ccaa = deaths['dframe'].sum(axis=1)\n populations = [data_sources.get_population(ccaa) for ccaa in\n deaths_per_ccaa.index]\n populations = pandas.Series(populations, index=deaths_per_ccaa.index)\n death_rate = (populations / deaths_per_ccaa).round().sort_values(\n ).astype(int)\n html += (\n '<p>¿Una de cada cuántas personas han fallecido por comunidad autónoma?</p>'\n )\n html += _write_table_from_series(death_rate)\n if False:\n for key in ['hospitalized']:\n html += f'<p>{DESCRIPTIONS[spa_report][key]}</p>\\n'\n html += material_line_chart.create_chart_with_slider_divs(div_ids\n [key], sizes=div_sizes)\n html += f\"<p>{DESCRIPTIONS[spa_report]['incidencia_acumulada']}</p>\\n\"\n html += material_line_chart.create_chart_with_slider_divs(\n div_ids_accumulated_cases, sizes=div_sizes)\n for key in ['deceased']:\n html += f'<p>{DESCRIPTIONS[spa_report][key]}</p>\\n'\n html += material_line_chart.create_chart_with_slider_divs(div_ids[\n key], sizes=div_sizes)\n html += ' </body>\\n</html>'\n out_path.open('wt').write(html)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef calc_accumulated_indicende_per_ccaa(report, num_days=15):\n ccaas = data_sources.get_ccaas_in_dset(report)\n dframe = report['dframe']\n num_cases = dframe['num_casos']\n ccaa_column = data_sources.get_ccaa_column_in_index(num_cases.index)\n index = num_cases.index.to_frame(index=False)\n time_delta = numpy.timedelta64(num_days, 'D')\n accumulated_cases_by_ccaa = {}\n for ccaa in ccaas:\n mask = index[ccaa_column] == ccaa\n mask = mask.values\n num_cases_for_this_ccaa = num_cases[mask]\n this_ccaa_index = num_cases_for_this_ccaa.index.to_frame(index=False)\n this_ccaa_dates = this_ccaa_index['fecha']\n num_accumulated_cases = []\n valid_dates = []\n for date in this_ccaa_dates:\n date0 = date - time_delta\n mask = numpy.logical_and(this_ccaa_dates > date0, \n this_ccaa_dates <= date)\n mask = mask.values\n if numpy.sum(mask) < num_days:\n continue\n num_accumulated_cases.append(numpy.sum(num_cases_for_this_ccaa[\n mask]))\n valid_dates.append(date)\n num_accumulated_cases = pandas.Series(num_accumulated_cases, index=\n valid_dates)\n num_accumulated_cases = (num_accumulated_cases / data_sources.\n POPULATION[ccaa] * 100000.0)\n accumulated_cases_by_ccaa[ccaa] = num_accumulated_cases\n return accumulated_cases_by_ccaa\n\n\ndef _create_js_chart(dframe, date_range, js_function_name, div_id, title,\n width, height):\n table = []\n ccaas = sorted(dframe.index)\n dates = list(dframe.columns)\n if date_range is not None:\n dates = [date for date in dates if date > date_range[0] and date <=\n date_range[1]]\n columns = [('date', 'fecha')]\n columns.extend([('number', data_sources.convert_to_ccaa_name(ccaa)) for\n ccaa in ccaas])\n for date in dates:\n row = [date.date()]\n for ccaa in ccaas:\n value = dframe.loc[ccaa, date]\n row.append(value)\n table.append(row)\n js_function_name = js_function_name\n html = material_line_chart.create_chart_js(js_function_name, div_id,\n title, columns, table, width=width, height=height)\n return html\n\n\ndef _write_table_from_series(series):\n html = '<table>'\n for index, value in zip(series.index, series.values):\n html += f'<tr><td>{index}</td><td>{value}</td></tr>\\n'\n html += '</table>'\n return html\n\n\ndef is_desired_ccaa(ccaa, desired_ccaas):\n return desired_ccaas is None or data_sources.convert_to_ccaa_iso(ccaa\n ) in desired_ccaas\n\n\ndef _create_table_for_chart_from_dict(dict_data, desired_ccaas):\n one_data = list(dict_data.values())[0]\n ccaas = sorted(dict_data.keys())\n ccaas = [ccaa for ccaa in ccaas if is_desired_ccaa(ccaa, desired_ccaas)]\n dates = list(one_data.index)\n table = []\n for date in dates:\n row = [date.date()]\n for ccaa in ccaas:\n row.append(dict_data[ccaa][date])\n table.append(row)\n return table, ccaas, dates\n\n\ndef _create_accumulate_indicence_table_for_spa_chart_from_report(report,\n num_days):\n dframe = report['dframe']\n time_delta = numpy.timedelta64(num_days, 'D')\n num_cases = dframe.groupby(level=1).sum().loc[:, 'num_casos']\n tot_pop = sum(data_sources.POPULATION.values())\n dates = numpy.array(num_cases.index)\n num_accumulated_cases = []\n valid_dates = []\n for date in dates:\n date0 = date - time_delta\n mask = numpy.logical_and(dates > date0, dates <= date)\n if numpy.sum(mask) < num_days:\n continue\n num_accumulated_cases.append(numpy.sum(num_cases[mask]) / tot_pop *\n 100000.0)\n date = datetime.datetime.fromtimestamp(date.astype('O') / 1000000000.0)\n valid_dates.append(date)\n table = [(date.date(), cases) for date, cases in zip(valid_dates,\n num_accumulated_cases)]\n dates = valid_dates\n return table, dates\n\n\ndef _create_table_for_chart_from_dframe(dframe, desired_ccaas):\n ccaas = sorted(dframe.index)\n ccaas = [ccaa for ccaa in ccaas if is_desired_ccaa(ccaa, desired_ccaas)]\n dates = list(dframe.columns)\n table = []\n for date in dates:\n row = [date.date()]\n for ccaa in ccaas:\n row.append(dframe.loc[ccaa, date])\n table.append(row)\n return table, ccaas, dates\n\n\ndef _create_table_for_chart_from_series(series):\n table = [(date.date(), value) for date, value in zip(series.index,\n series.values)]\n return table\n\n\ndef write_html_report(out_path, date_range=None, desired_ccaas=None,\n spa_report=False):\n if spa_report and desired_ccaas:\n raise ValueError('choose one, either spa or ccaa report')\n if desired_ccaas and len(desired_ccaas) == 1:\n only_one_ccaa = True\n ccaa_iso = convert_to_ccaa_iso(desired_ccaas[0])\n else:\n only_one_ccaa = False\n ccaa_info = data_sources.get_sorted_downloaded_ccaa_info()\n report = ccaa_info[-1]\n accumulaed_incidence = calc_accumulated_indicende_per_ccaa(report)\n deaths = sorted(ministry_datasources.read_deceased_excel_ministry_files\n (), key=lambda x: x['max_date'])[-1]\n if spa_report:\n accumulated_incidence_table, dates = (\n _create_accumulate_indicence_table_for_spa_chart_from_report(\n report, 15))\n else:\n accumulated_incidence_table, ccaas, dates = (\n _create_table_for_chart_from_dict(accumulaed_incidence,\n desired_ccaas))\n title = 'Resumen situación Covid-19'\n if spa_report:\n title += ' España'\n elif only_one_ccaa:\n title += ': ' + data_sources.convert_to_ccaa_name(ccaa_iso)\n else:\n title += ' por comunidad autónoma'\n html = HEADER.format(title)\n html += HEADER2\n js_function_name = 'drawAccumulatedCasesIncidence'\n columns = [('date', 'fecha')]\n if spa_report:\n columns.extend([('number', 'España')])\n else:\n columns.extend([('number', data_sources.convert_to_ccaa_name(ccaa)) for\n ccaa in ccaas if is_desired_ccaa(ccaa, desired_ccaas)])\n title = 'Incidencia acumulada por 100.000 hab. (15 días)'\n width = 900\n height = 800\n rangeslider_height = 50\n js_sizes = {'dashboard': {'height': height + rangeslider_height,\n 'width': width}, 'chart': {'height': height, 'width': width},\n 'rangeslider': {'height': rangeslider_height, 'width': 600}}\n div_sizes = {}\n for html_element in js_sizes:\n div_sizes[html_element] = {}\n div_sizes[html_element]['height'\n ] = f\"{js_sizes[html_element]['height']}px\"\n div_sizes[html_element]['width'\n ] = f\"{js_sizes[html_element]['width']}px\"\n slider_config = {'column_controlled': 'fecha', 'min_value': dates[0],\n 'max_value': dates[-1], 'min_init_value': date_range[0],\n 'max_init_value': date_range[-1]}\n div_ids_accumulated_cases = {'dashboard': 'accumulated_cases_dashboard',\n 'chart': 'accumulated_cases_chart', 'rangeslider':\n 'accumulated_cases_rangeslider'}\n html += material_line_chart.create_chart_js_with_slider(js_function_name,\n slider_config, div_ids_accumulated_cases, title, columns,\n accumulated_incidence_table, sizes=js_sizes)\n js_function_names = {'hospitalized': 'drawHospitalized', 'icu':\n 'drawICU', 'deceased': 'drawDeceased'}\n div_ids = {'hospitalized': 'hospitalized_chart', 'icu': 'icu_chart',\n 'deceased': 'deceased_chart'}\n titles = {'hospitalized':\n 'Num. hospitalizaciones por 100.000 hab. (media 7 días)', 'icu':\n 'Num. ingresos UCI por 100.000 hab. (media 7 días)', 'deceased':\n 'Num. fallecidos por 100.000 hab. (media 7 días)'}\n if False:\n if spa_report:\n rolling_means = ministry_datasources.get_ministry_rolling_mean_spa(\n )\n titles = {'hospitalized':\n 'Num. hospitalizaciones. (media 7 días)', 'icu':\n 'Num. ingresos UCI. (media 7 días)', 'deceased':\n 'Num. fallecidos. (media 7 días)'}\n else:\n rolling_means = ministry_datasources.get_ministry_rolling_mean()\n titles = {'hospitalized':\n 'Num. hospitalizaciones por 100.000 hab. (media 7 días)',\n 'icu': 'Num. ingresos UCI por 100.000 hab. (media 7 días)',\n 'deceased': 'Num. fallecidos por 100.000 hab. (media 7 días)'}\n div_ids_hospitalized = {'dashboard': 'hospitalized_dashboard', 'chart':\n 'hospitalized_chart', 'rangeslider': 'hospitalized_rangeslider'}\n div_ids_deceased = {'dashboard': 'deceased_dashboard', 'chart':\n 'deceased_chart', 'rangeslider': 'deceased_rangeslider'}\n div_ids = {'hospitalized': div_ids_hospitalized, 'deceased':\n div_ids_deceased}\n if False:\n dframe = rolling_means['hospitalized']\n if spa_report:\n columns = [('date', 'fecha'), ('number', 'España')]\n table = _create_table_for_chart_from_series(dframe)\n else:\n populations = [data_sources.get_population(ccaa) for ccaa in\n dframe.index]\n dframe = dframe.divide(populations, axis=0) * 100000.0\n table, ccaas, _ = _create_table_for_chart_from_dframe(dframe,\n desired_ccaas)\n columns = [('date', 'fecha')]\n columns.extend([('number', data_sources.convert_to_ccaa_name(\n ccaa)) for ccaa in ccaas])\n key = 'hospitalized'\n hospitalized_slider_config = {'column_controlled': 'fecha',\n 'min_value': dates[0], 'max_value': dates[-1], 'min_init_value':\n date_range[0], 'max_init_value': datetime.datetime.now()}\n html += material_line_chart.create_chart_js_with_slider(\n js_function_names[key], hospitalized_slider_config, div_ids[key\n ], title=titles[key], columns=columns, data_table=table, sizes=\n js_sizes)\n num_days = 7\n key = 'deceased'\n deaths_dframe = deaths['dframe']\n if spa_report:\n spa_deaths = deaths_dframe.sum(axis=0)\n deaths_rolling_mean = spa_deaths.rolling(num_days, center=True,\n min_periods=num_days).mean().dropna()\n table = _create_table_for_chart_from_series(deaths_rolling_mean)\n columns = [('date', 'fecha'), ('number', 'España')]\n else:\n deaths_rolling_mean = deaths_dframe.rolling(num_days, center=True,\n min_periods=num_days, axis=1).mean()\n deaths_rolling_mean = deaths_rolling_mean.dropna(axis=1, how='all')\n populations = [data_sources.get_population(ccaa) for ccaa in\n deaths_rolling_mean.index]\n deaths_rolling_mean = deaths_rolling_mean.divide(populations, axis=0\n ) * 100000.0\n table, ccaas, _ = _create_table_for_chart_from_dframe(\n deaths_rolling_mean, desired_ccaas)\n columns = [('date', 'fecha')]\n columns.extend([('number', data_sources.convert_to_ccaa_name(ccaa)) for\n ccaa in ccaas])\n html += material_line_chart.create_chart_js_with_slider(js_function_names\n [key], slider_config, div_ids[key], title=titles[key], columns=\n columns, data_table=table, sizes=js_sizes)\n html += ' </script>\\n </head>\\n <body>\\n'\n today = datetime.datetime.now()\n html += '<p><a href=\"../\">Menu</a></p>'\n html += (\n f'<p>Informe generado el día: {today.day}-{today.month}-{today.year}</p>'\n )\n html += (\n f'<p>Este informe está generado para uso personal por <a href=\"https://twitter.com/jblanca42\">@jblanca42</a>, pero lo sube a la web por si le pudiese ser de utilidad a alguien más.</p>'\n )\n html += (\n f'<p>El código utilizado para generarlo se encuentra en <a href=\"https://github.com/JoseBlanca/seguimiento_covid\">github</a>, si encuentras algún fallo o quieres mejorar algo envía un mensaje o haz un pull request.</p>'\n )\n if desired_ccaas:\n index = [ccaa for ccaa in deaths['dframe'].index if is_desired_ccaa\n (ccaa, desired_ccaas)]\n tot_deaths = deaths['dframe'].loc[index, :].values.sum()\n else:\n tot_deaths = deaths['dframe'].values.sum() + deaths['unassinged_deaths'\n ]\n html += f'<p>Número total de fallecidos: {tot_deaths}</p>'\n if spa_report:\n death_rate = round(sum(data_sources.POPULATION.values()) / tot_deaths)\n html += f'<p>Una de cada {death_rate} personas han fallecido.</p>'\n elif desired_ccaas and len(desired_ccaas) == 1:\n death_rate = round(data_sources.get_population(desired_ccaas[0]) /\n tot_deaths)\n html += (\n f'<p>Una de cada {death_rate} personas han fallecido en esta comunidad autónoma.</p>'\n )\n else:\n deaths_per_ccaa = deaths['dframe'].sum(axis=1)\n populations = [data_sources.get_population(ccaa) for ccaa in\n deaths_per_ccaa.index]\n populations = pandas.Series(populations, index=deaths_per_ccaa.index)\n death_rate = (populations / deaths_per_ccaa).round().sort_values(\n ).astype(int)\n html += (\n '<p>¿Una de cada cuántas personas han fallecido por comunidad autónoma?</p>'\n )\n html += _write_table_from_series(death_rate)\n if False:\n for key in ['hospitalized']:\n html += f'<p>{DESCRIPTIONS[spa_report][key]}</p>\\n'\n html += material_line_chart.create_chart_with_slider_divs(div_ids\n [key], sizes=div_sizes)\n html += f\"<p>{DESCRIPTIONS[spa_report]['incidencia_acumulada']}</p>\\n\"\n html += material_line_chart.create_chart_with_slider_divs(\n div_ids_accumulated_cases, sizes=div_sizes)\n for key in ['deceased']:\n html += f'<p>{DESCRIPTIONS[spa_report][key]}</p>\\n'\n html += material_line_chart.create_chart_with_slider_divs(div_ids[\n key], sizes=div_sizes)\n html += ' </body>\\n</html>'\n out_path.open('wt').write(html)\n\n\nif __name__ == '__main__':\n ten_days_ago = datetime.datetime.now() - datetime.timedelta(days=10)\n forty_days_ago = datetime.datetime.now() - datetime.timedelta(days=40)\n first_date = datetime.datetime(2020, 9, 1)\n out_dir = config.HTML_REPORTS_DIR\n out_dir.mkdir(exist_ok=True)\n out_path = out_dir / 'situacion_covid_por_ca.html'\n write_html_report(out_path, date_range=[forty_days_ago, ten_days_ago])\n", "step-4": "<mask token>\nHEADER = \"\"\"<html>\n <head>\n <title>{}</title>\n <script type=\"text/javascript\" src=\"https://www.gstatic.com/charts/loader.js\"></script>\n <script type=\"text/javascript\">\n\"\"\"\nHEADER2 = \"\"\"\n google.charts.load('current', {'packages':['line', 'corechart', 'controls']});\n\n\"\"\"\nDESCRIPTIONS_CCAA = {'incidencia_acumulada':\n 'Número de casos informados en los 15 días anteriores por cien mil habitantes. Datos obtenidos de los informes del Carlos III.'\n , 'hospitalized':\n 'Número medio de hospitalizaciones por cien mil habitantes (media de 7 días). Datos obtenidos a partir de las cifras acumuladas que aparecen en los informes diarios del ministerio.'\n , 'deceased':\n 'Número medio de fallecidos por cien mil habitantes (media de 7 días). Datos obtenidos a partir del excel con datos de fallecidos diarios del ministerio.'\n }\nDESCRIPTIONS_SPA = {'incidencia_acumulada':\n 'Número de casos informados en los 15 días anteriores por cien mil habitantes. Datos obtenidos de los informes del Carlos III.'\n , 'hospitalized':\n 'Número medio de hospitalizaciones (media de 7 días). Datos obtenidos a partir de las cifras acumuladas que aparecen en los informes diarios del ministerio.'\n , 'deceased':\n 'Número medio de fallecidos (media de 7 días). Datos obtenidos a partir del excel con datos de fallecidos diarios del ministerio.'\n }\nDESCRIPTIONS = {(True): DESCRIPTIONS_SPA, (False): DESCRIPTIONS_CCAA}\n\n\ndef calc_accumulated_indicende_per_ccaa(report, num_days=15):\n ccaas = data_sources.get_ccaas_in_dset(report)\n dframe = report['dframe']\n num_cases = dframe['num_casos']\n ccaa_column = data_sources.get_ccaa_column_in_index(num_cases.index)\n index = num_cases.index.to_frame(index=False)\n time_delta = numpy.timedelta64(num_days, 'D')\n accumulated_cases_by_ccaa = {}\n for ccaa in ccaas:\n mask = index[ccaa_column] == ccaa\n mask = mask.values\n num_cases_for_this_ccaa = num_cases[mask]\n this_ccaa_index = num_cases_for_this_ccaa.index.to_frame(index=False)\n this_ccaa_dates = this_ccaa_index['fecha']\n num_accumulated_cases = []\n valid_dates = []\n for date in this_ccaa_dates:\n date0 = date - time_delta\n mask = numpy.logical_and(this_ccaa_dates > date0, \n this_ccaa_dates <= date)\n mask = mask.values\n if numpy.sum(mask) < num_days:\n continue\n num_accumulated_cases.append(numpy.sum(num_cases_for_this_ccaa[\n mask]))\n valid_dates.append(date)\n num_accumulated_cases = pandas.Series(num_accumulated_cases, index=\n valid_dates)\n num_accumulated_cases = (num_accumulated_cases / data_sources.\n POPULATION[ccaa] * 100000.0)\n accumulated_cases_by_ccaa[ccaa] = num_accumulated_cases\n return accumulated_cases_by_ccaa\n\n\ndef _create_js_chart(dframe, date_range, js_function_name, div_id, title,\n width, height):\n table = []\n ccaas = sorted(dframe.index)\n dates = list(dframe.columns)\n if date_range is not None:\n dates = [date for date in dates if date > date_range[0] and date <=\n date_range[1]]\n columns = [('date', 'fecha')]\n columns.extend([('number', data_sources.convert_to_ccaa_name(ccaa)) for\n ccaa in ccaas])\n for date in dates:\n row = [date.date()]\n for ccaa in ccaas:\n value = dframe.loc[ccaa, date]\n row.append(value)\n table.append(row)\n js_function_name = js_function_name\n html = material_line_chart.create_chart_js(js_function_name, div_id,\n title, columns, table, width=width, height=height)\n return html\n\n\ndef _write_table_from_series(series):\n html = '<table>'\n for index, value in zip(series.index, series.values):\n html += f'<tr><td>{index}</td><td>{value}</td></tr>\\n'\n html += '</table>'\n return html\n\n\ndef is_desired_ccaa(ccaa, desired_ccaas):\n return desired_ccaas is None or data_sources.convert_to_ccaa_iso(ccaa\n ) in desired_ccaas\n\n\ndef _create_table_for_chart_from_dict(dict_data, desired_ccaas):\n one_data = list(dict_data.values())[0]\n ccaas = sorted(dict_data.keys())\n ccaas = [ccaa for ccaa in ccaas if is_desired_ccaa(ccaa, desired_ccaas)]\n dates = list(one_data.index)\n table = []\n for date in dates:\n row = [date.date()]\n for ccaa in ccaas:\n row.append(dict_data[ccaa][date])\n table.append(row)\n return table, ccaas, dates\n\n\ndef _create_accumulate_indicence_table_for_spa_chart_from_report(report,\n num_days):\n dframe = report['dframe']\n time_delta = numpy.timedelta64(num_days, 'D')\n num_cases = dframe.groupby(level=1).sum().loc[:, 'num_casos']\n tot_pop = sum(data_sources.POPULATION.values())\n dates = numpy.array(num_cases.index)\n num_accumulated_cases = []\n valid_dates = []\n for date in dates:\n date0 = date - time_delta\n mask = numpy.logical_and(dates > date0, dates <= date)\n if numpy.sum(mask) < num_days:\n continue\n num_accumulated_cases.append(numpy.sum(num_cases[mask]) / tot_pop *\n 100000.0)\n date = datetime.datetime.fromtimestamp(date.astype('O') / 1000000000.0)\n valid_dates.append(date)\n table = [(date.date(), cases) for date, cases in zip(valid_dates,\n num_accumulated_cases)]\n dates = valid_dates\n return table, dates\n\n\ndef _create_table_for_chart_from_dframe(dframe, desired_ccaas):\n ccaas = sorted(dframe.index)\n ccaas = [ccaa for ccaa in ccaas if is_desired_ccaa(ccaa, desired_ccaas)]\n dates = list(dframe.columns)\n table = []\n for date in dates:\n row = [date.date()]\n for ccaa in ccaas:\n row.append(dframe.loc[ccaa, date])\n table.append(row)\n return table, ccaas, dates\n\n\ndef _create_table_for_chart_from_series(series):\n table = [(date.date(), value) for date, value in zip(series.index,\n series.values)]\n return table\n\n\ndef write_html_report(out_path, date_range=None, desired_ccaas=None,\n spa_report=False):\n if spa_report and desired_ccaas:\n raise ValueError('choose one, either spa or ccaa report')\n if desired_ccaas and len(desired_ccaas) == 1:\n only_one_ccaa = True\n ccaa_iso = convert_to_ccaa_iso(desired_ccaas[0])\n else:\n only_one_ccaa = False\n ccaa_info = data_sources.get_sorted_downloaded_ccaa_info()\n report = ccaa_info[-1]\n accumulaed_incidence = calc_accumulated_indicende_per_ccaa(report)\n deaths = sorted(ministry_datasources.read_deceased_excel_ministry_files\n (), key=lambda x: x['max_date'])[-1]\n if spa_report:\n accumulated_incidence_table, dates = (\n _create_accumulate_indicence_table_for_spa_chart_from_report(\n report, 15))\n else:\n accumulated_incidence_table, ccaas, dates = (\n _create_table_for_chart_from_dict(accumulaed_incidence,\n desired_ccaas))\n title = 'Resumen situación Covid-19'\n if spa_report:\n title += ' España'\n elif only_one_ccaa:\n title += ': ' + data_sources.convert_to_ccaa_name(ccaa_iso)\n else:\n title += ' por comunidad autónoma'\n html = HEADER.format(title)\n html += HEADER2\n js_function_name = 'drawAccumulatedCasesIncidence'\n columns = [('date', 'fecha')]\n if spa_report:\n columns.extend([('number', 'España')])\n else:\n columns.extend([('number', data_sources.convert_to_ccaa_name(ccaa)) for\n ccaa in ccaas if is_desired_ccaa(ccaa, desired_ccaas)])\n title = 'Incidencia acumulada por 100.000 hab. (15 días)'\n width = 900\n height = 800\n rangeslider_height = 50\n js_sizes = {'dashboard': {'height': height + rangeslider_height,\n 'width': width}, 'chart': {'height': height, 'width': width},\n 'rangeslider': {'height': rangeslider_height, 'width': 600}}\n div_sizes = {}\n for html_element in js_sizes:\n div_sizes[html_element] = {}\n div_sizes[html_element]['height'\n ] = f\"{js_sizes[html_element]['height']}px\"\n div_sizes[html_element]['width'\n ] = f\"{js_sizes[html_element]['width']}px\"\n slider_config = {'column_controlled': 'fecha', 'min_value': dates[0],\n 'max_value': dates[-1], 'min_init_value': date_range[0],\n 'max_init_value': date_range[-1]}\n div_ids_accumulated_cases = {'dashboard': 'accumulated_cases_dashboard',\n 'chart': 'accumulated_cases_chart', 'rangeslider':\n 'accumulated_cases_rangeslider'}\n html += material_line_chart.create_chart_js_with_slider(js_function_name,\n slider_config, div_ids_accumulated_cases, title, columns,\n accumulated_incidence_table, sizes=js_sizes)\n js_function_names = {'hospitalized': 'drawHospitalized', 'icu':\n 'drawICU', 'deceased': 'drawDeceased'}\n div_ids = {'hospitalized': 'hospitalized_chart', 'icu': 'icu_chart',\n 'deceased': 'deceased_chart'}\n titles = {'hospitalized':\n 'Num. hospitalizaciones por 100.000 hab. (media 7 días)', 'icu':\n 'Num. ingresos UCI por 100.000 hab. (media 7 días)', 'deceased':\n 'Num. fallecidos por 100.000 hab. (media 7 días)'}\n if False:\n if spa_report:\n rolling_means = ministry_datasources.get_ministry_rolling_mean_spa(\n )\n titles = {'hospitalized':\n 'Num. hospitalizaciones. (media 7 días)', 'icu':\n 'Num. ingresos UCI. (media 7 días)', 'deceased':\n 'Num. fallecidos. (media 7 días)'}\n else:\n rolling_means = ministry_datasources.get_ministry_rolling_mean()\n titles = {'hospitalized':\n 'Num. hospitalizaciones por 100.000 hab. (media 7 días)',\n 'icu': 'Num. ingresos UCI por 100.000 hab. (media 7 días)',\n 'deceased': 'Num. fallecidos por 100.000 hab. (media 7 días)'}\n div_ids_hospitalized = {'dashboard': 'hospitalized_dashboard', 'chart':\n 'hospitalized_chart', 'rangeslider': 'hospitalized_rangeslider'}\n div_ids_deceased = {'dashboard': 'deceased_dashboard', 'chart':\n 'deceased_chart', 'rangeslider': 'deceased_rangeslider'}\n div_ids = {'hospitalized': div_ids_hospitalized, 'deceased':\n div_ids_deceased}\n if False:\n dframe = rolling_means['hospitalized']\n if spa_report:\n columns = [('date', 'fecha'), ('number', 'España')]\n table = _create_table_for_chart_from_series(dframe)\n else:\n populations = [data_sources.get_population(ccaa) for ccaa in\n dframe.index]\n dframe = dframe.divide(populations, axis=0) * 100000.0\n table, ccaas, _ = _create_table_for_chart_from_dframe(dframe,\n desired_ccaas)\n columns = [('date', 'fecha')]\n columns.extend([('number', data_sources.convert_to_ccaa_name(\n ccaa)) for ccaa in ccaas])\n key = 'hospitalized'\n hospitalized_slider_config = {'column_controlled': 'fecha',\n 'min_value': dates[0], 'max_value': dates[-1], 'min_init_value':\n date_range[0], 'max_init_value': datetime.datetime.now()}\n html += material_line_chart.create_chart_js_with_slider(\n js_function_names[key], hospitalized_slider_config, div_ids[key\n ], title=titles[key], columns=columns, data_table=table, sizes=\n js_sizes)\n num_days = 7\n key = 'deceased'\n deaths_dframe = deaths['dframe']\n if spa_report:\n spa_deaths = deaths_dframe.sum(axis=0)\n deaths_rolling_mean = spa_deaths.rolling(num_days, center=True,\n min_periods=num_days).mean().dropna()\n table = _create_table_for_chart_from_series(deaths_rolling_mean)\n columns = [('date', 'fecha'), ('number', 'España')]\n else:\n deaths_rolling_mean = deaths_dframe.rolling(num_days, center=True,\n min_periods=num_days, axis=1).mean()\n deaths_rolling_mean = deaths_rolling_mean.dropna(axis=1, how='all')\n populations = [data_sources.get_population(ccaa) for ccaa in\n deaths_rolling_mean.index]\n deaths_rolling_mean = deaths_rolling_mean.divide(populations, axis=0\n ) * 100000.0\n table, ccaas, _ = _create_table_for_chart_from_dframe(\n deaths_rolling_mean, desired_ccaas)\n columns = [('date', 'fecha')]\n columns.extend([('number', data_sources.convert_to_ccaa_name(ccaa)) for\n ccaa in ccaas])\n html += material_line_chart.create_chart_js_with_slider(js_function_names\n [key], slider_config, div_ids[key], title=titles[key], columns=\n columns, data_table=table, sizes=js_sizes)\n html += ' </script>\\n </head>\\n <body>\\n'\n today = datetime.datetime.now()\n html += '<p><a href=\"../\">Menu</a></p>'\n html += (\n f'<p>Informe generado el día: {today.day}-{today.month}-{today.year}</p>'\n )\n html += (\n f'<p>Este informe está generado para uso personal por <a href=\"https://twitter.com/jblanca42\">@jblanca42</a>, pero lo sube a la web por si le pudiese ser de utilidad a alguien más.</p>'\n )\n html += (\n f'<p>El código utilizado para generarlo se encuentra en <a href=\"https://github.com/JoseBlanca/seguimiento_covid\">github</a>, si encuentras algún fallo o quieres mejorar algo envía un mensaje o haz un pull request.</p>'\n )\n if desired_ccaas:\n index = [ccaa for ccaa in deaths['dframe'].index if is_desired_ccaa\n (ccaa, desired_ccaas)]\n tot_deaths = deaths['dframe'].loc[index, :].values.sum()\n else:\n tot_deaths = deaths['dframe'].values.sum() + deaths['unassinged_deaths'\n ]\n html += f'<p>Número total de fallecidos: {tot_deaths}</p>'\n if spa_report:\n death_rate = round(sum(data_sources.POPULATION.values()) / tot_deaths)\n html += f'<p>Una de cada {death_rate} personas han fallecido.</p>'\n elif desired_ccaas and len(desired_ccaas) == 1:\n death_rate = round(data_sources.get_population(desired_ccaas[0]) /\n tot_deaths)\n html += (\n f'<p>Una de cada {death_rate} personas han fallecido en esta comunidad autónoma.</p>'\n )\n else:\n deaths_per_ccaa = deaths['dframe'].sum(axis=1)\n populations = [data_sources.get_population(ccaa) for ccaa in\n deaths_per_ccaa.index]\n populations = pandas.Series(populations, index=deaths_per_ccaa.index)\n death_rate = (populations / deaths_per_ccaa).round().sort_values(\n ).astype(int)\n html += (\n '<p>¿Una de cada cuántas personas han fallecido por comunidad autónoma?</p>'\n )\n html += _write_table_from_series(death_rate)\n if False:\n for key in ['hospitalized']:\n html += f'<p>{DESCRIPTIONS[spa_report][key]}</p>\\n'\n html += material_line_chart.create_chart_with_slider_divs(div_ids\n [key], sizes=div_sizes)\n html += f\"<p>{DESCRIPTIONS[spa_report]['incidencia_acumulada']}</p>\\n\"\n html += material_line_chart.create_chart_with_slider_divs(\n div_ids_accumulated_cases, sizes=div_sizes)\n for key in ['deceased']:\n html += f'<p>{DESCRIPTIONS[spa_report][key]}</p>\\n'\n html += material_line_chart.create_chart_with_slider_divs(div_ids[\n key], sizes=div_sizes)\n html += ' </body>\\n</html>'\n out_path.open('wt').write(html)\n\n\nif __name__ == '__main__':\n ten_days_ago = datetime.datetime.now() - datetime.timedelta(days=10)\n forty_days_ago = datetime.datetime.now() - datetime.timedelta(days=40)\n first_date = datetime.datetime(2020, 9, 1)\n out_dir = config.HTML_REPORTS_DIR\n out_dir.mkdir(exist_ok=True)\n out_path = out_dir / 'situacion_covid_por_ca.html'\n write_html_report(out_path, date_range=[forty_days_ago, ten_days_ago])\n", "step-5": "\nfrom datetime import date\nimport config\n\nimport datetime\n\nimport numpy\nimport pandas\n\nimport data_sources\nfrom data_sources import POPULATION, convert_to_ccaa_iso\nimport material_line_chart\nimport ministry_datasources\n\n\nHEADER = '''<html>\n <head>\n <title>{}</title>\n <script type=\"text/javascript\" src=\"https://www.gstatic.com/charts/loader.js\"></script>\n <script type=\"text/javascript\">\n'''\n\nHEADER2 = '''\n google.charts.load('current', {'packages':['line', 'corechart', 'controls']});\n\n'''\n\n\nDESCRIPTIONS_CCAA = {\n'incidencia_acumulada': 'Número de casos informados en los 15 días anteriores por cien mil habitantes. Datos obtenidos de los informes del Carlos III.',\n'hospitalized': 'Número medio de hospitalizaciones por cien mil habitantes (media de 7 días). Datos obtenidos a partir de las cifras acumuladas que aparecen en los informes diarios del ministerio.',\n'deceased': 'Número medio de fallecidos por cien mil habitantes (media de 7 días). Datos obtenidos a partir del excel con datos de fallecidos diarios del ministerio.',\n}\nDESCRIPTIONS_SPA = {\n'incidencia_acumulada': 'Número de casos informados en los 15 días anteriores por cien mil habitantes. Datos obtenidos de los informes del Carlos III.',\n'hospitalized': 'Número medio de hospitalizaciones (media de 7 días). Datos obtenidos a partir de las cifras acumuladas que aparecen en los informes diarios del ministerio.',\n'deceased': 'Número medio de fallecidos (media de 7 días). Datos obtenidos a partir del excel con datos de fallecidos diarios del ministerio.',\n}\nDESCRIPTIONS = {True: DESCRIPTIONS_SPA, False: DESCRIPTIONS_CCAA}\n\n\ndef calc_accumulated_indicende_per_ccaa(report, num_days=15):\n ccaas = data_sources.get_ccaas_in_dset(report)\n dframe = report['dframe']\n num_cases = dframe['num_casos'] \n ccaa_column = data_sources.get_ccaa_column_in_index(num_cases.index)\n index = num_cases.index.to_frame(index=False)\n\n time_delta = numpy.timedelta64(num_days, 'D')\n\n accumulated_cases_by_ccaa = {}\n for ccaa in ccaas:\n mask = index[ccaa_column] == ccaa\n mask = mask.values\n num_cases_for_this_ccaa = num_cases[mask]\n this_ccaa_index = num_cases_for_this_ccaa.index.to_frame(index=False)\n this_ccaa_dates = this_ccaa_index['fecha']\n num_accumulated_cases = []\n valid_dates = []\n for date in this_ccaa_dates:\n date0 = date - time_delta\n mask = numpy.logical_and(this_ccaa_dates > date0,\n this_ccaa_dates <= date)\n mask = mask.values\n if numpy.sum(mask) < num_days:\n continue\n num_accumulated_cases.append(numpy.sum(num_cases_for_this_ccaa[mask]))\n valid_dates.append(date)\n \n num_accumulated_cases = pandas.Series(num_accumulated_cases, index=valid_dates)\n num_accumulated_cases = num_accumulated_cases / data_sources.POPULATION[ccaa] * 1e5\n accumulated_cases_by_ccaa[ccaa] = num_accumulated_cases\n return accumulated_cases_by_ccaa\n\n\ndef _create_js_chart(dframe, date_range, js_function_name, div_id, title, width, height):\n table = []\n ccaas = sorted(dframe.index)\n dates = list(dframe.columns)\n\n if date_range is not None:\n dates = [date for date in dates if date > date_range[0] and date <= date_range[1]]\n\n columns = [('date', 'fecha')]\n columns.extend([('number', data_sources.convert_to_ccaa_name(ccaa)) for ccaa in ccaas])\n\n for date in dates:\n row = [date.date()]\n for ccaa in ccaas:\n value = dframe.loc[ccaa, date]\n row.append(value)\n table.append(row)\n js_function_name = js_function_name\n html = material_line_chart.create_chart_js(js_function_name, div_id, title,\n columns, table,\n width=width, height=height)\n return html\n\n\ndef _write_table_from_series(series):\n html = '<table>'\n for index, value in zip(series.index, series.values):\n html += f'<tr><td>{index}</td><td>{value}</td></tr>\\n'\n html += '</table>'\n return html\n\n\ndef is_desired_ccaa(ccaa, desired_ccaas):\n return desired_ccaas is None or data_sources.convert_to_ccaa_iso(ccaa) in desired_ccaas\n\n\ndef _create_table_for_chart_from_dict(dict_data, desired_ccaas):\n one_data = list(dict_data.values())[0]\n\n ccaas = sorted(dict_data.keys())\n ccaas = [ccaa for ccaa in ccaas if is_desired_ccaa(ccaa, desired_ccaas)]\n\n dates = list(one_data.index)\n table = []\n for date in dates:\n row = [date.date()]\n for ccaa in ccaas:\n row.append(dict_data[ccaa][date])\n table.append(row)\n return table, ccaas, dates\n\n\ndef _create_accumulate_indicence_table_for_spa_chart_from_report(report, num_days):\n dframe = report['dframe']\n time_delta = numpy.timedelta64(num_days, 'D')\n\n num_cases = dframe.groupby(level=1).sum().loc[:, 'num_casos']\n\n tot_pop = sum(data_sources.POPULATION.values())\n dates = numpy.array(num_cases.index)\n num_accumulated_cases = []\n valid_dates = []\n for date in dates:\n date0 = date - time_delta\n mask = numpy.logical_and(dates > date0,\n dates <= date)\n if numpy.sum(mask) < num_days:\n continue\n num_accumulated_cases.append(numpy.sum(num_cases[mask]) / tot_pop * 1e5)\n date = datetime.datetime.fromtimestamp(date.astype('O') / 1e9)\n valid_dates.append(date)\n\n table = [(date.date(), cases) for date, cases in zip(valid_dates, num_accumulated_cases)]\n dates = valid_dates\n\n return table, dates\n\n\ndef _create_table_for_chart_from_dframe(dframe, desired_ccaas):\n\n ccaas = sorted(dframe.index)\n ccaas = [ccaa for ccaa in ccaas if is_desired_ccaa(ccaa, desired_ccaas)]\n dates = list(dframe.columns)\n table = []\n for date in dates:\n row = [date.date()]\n for ccaa in ccaas:\n row.append(dframe.loc[ccaa, date])\n table.append(row)\n return table, ccaas, dates\n\n\ndef _create_table_for_chart_from_series(series):\n table = [(date.date(), value) for date, value in zip(series.index, series.values)]\n return table\n\n\ndef write_html_report(out_path, date_range=None, desired_ccaas=None, spa_report=False):\n\n if spa_report and desired_ccaas:\n raise ValueError('choose one, either spa or ccaa report')\n\n if desired_ccaas and len(desired_ccaas) == 1:\n only_one_ccaa = True\n ccaa_iso = convert_to_ccaa_iso(desired_ccaas[0])\n else:\n only_one_ccaa = False\n\n ccaa_info = data_sources.get_sorted_downloaded_ccaa_info()\n report = ccaa_info[-1]\n accumulaed_incidence = calc_accumulated_indicende_per_ccaa(report)\n\n deaths = sorted(ministry_datasources.read_deceased_excel_ministry_files(),\n key=lambda x: x['max_date'])[-1]\n\n if spa_report:\n accumulated_incidence_table, dates = _create_accumulate_indicence_table_for_spa_chart_from_report(report, 15)\n else:\n accumulated_incidence_table, ccaas, dates = _create_table_for_chart_from_dict(accumulaed_incidence, desired_ccaas)\n\n title = 'Resumen situación Covid-19'\n if spa_report:\n title += ' España'\n elif only_one_ccaa:\n title += ': ' + data_sources.convert_to_ccaa_name(ccaa_iso)\n else:\n title += ' por comunidad autónoma'\n html = HEADER.format(title)\n html += HEADER2\n\n js_function_name = 'drawAccumulatedCasesIncidence'\n columns = [('date', 'fecha')]\n if spa_report:\n columns.extend([('number', 'España')])\n else:\n columns.extend([('number', data_sources.convert_to_ccaa_name(ccaa)) for ccaa in ccaas if is_desired_ccaa(ccaa, desired_ccaas)])\n title = 'Incidencia acumulada por 100.000 hab. (15 días)'\n\n width =900\n height = 800\n rangeslider_height = 50\n js_sizes = {'dashboard': {'height': height + rangeslider_height, 'width': width},\n 'chart': {'height': height, 'width': width},\n 'rangeslider': {'height': rangeslider_height, 'width': 600},\n }\n div_sizes = {}\n for html_element in js_sizes:\n div_sizes[html_element] = {}\n div_sizes[html_element]['height'] = f\"{js_sizes[html_element]['height']}px\"\n div_sizes[html_element]['width'] = f\"{js_sizes[html_element]['width']}px\"\n\n slider_config = {'column_controlled': 'fecha',\n 'min_value': dates[0],\n 'max_value': dates[-1],\n 'min_init_value': date_range[0],\n 'max_init_value': date_range[-1]}\n div_ids_accumulated_cases = {'dashboard': 'accumulated_cases_dashboard',\n 'chart': 'accumulated_cases_chart',\n 'rangeslider': 'accumulated_cases_rangeslider'}\n\n html += material_line_chart.create_chart_js_with_slider(js_function_name,\n slider_config,\n div_ids_accumulated_cases,\n title,\n columns,\n accumulated_incidence_table,\n sizes=js_sizes)\n\n js_function_names = {'hospitalized': 'drawHospitalized',\n 'icu': 'drawICU',\n 'deceased': 'drawDeceased'}\n div_ids = {'hospitalized': 'hospitalized_chart',\n 'icu': 'icu_chart',\n 'deceased': 'deceased_chart'\n }\n titles = {'hospitalized': 'Num. hospitalizaciones por 100.000 hab. (media 7 días)',\n 'icu': 'Num. ingresos UCI por 100.000 hab. (media 7 días)',\n 'deceased': 'Num. fallecidos por 100.000 hab. (media 7 días)'\n }\n\n if False:\n if spa_report:\n rolling_means = ministry_datasources.get_ministry_rolling_mean_spa()\n titles = {'hospitalized': 'Num. hospitalizaciones. (media 7 días)',\n 'icu': 'Num. ingresos UCI. (media 7 días)',\n 'deceased': 'Num. fallecidos. (media 7 días)'\n }\n else:\n rolling_means = ministry_datasources.get_ministry_rolling_mean()\n titles = {'hospitalized': 'Num. hospitalizaciones por 100.000 hab. (media 7 días)',\n 'icu': 'Num. ingresos UCI por 100.000 hab. (media 7 días)',\n 'deceased': 'Num. fallecidos por 100.000 hab. (media 7 días)'\n }\n\n div_ids_hospitalized = {'dashboard': 'hospitalized_dashboard',\n 'chart': 'hospitalized_chart',\n 'rangeslider': 'hospitalized_rangeslider'}\n div_ids_deceased = {'dashboard': 'deceased_dashboard',\n 'chart': 'deceased_chart',\n 'rangeslider': 'deceased_rangeslider'}\n div_ids = {'hospitalized': div_ids_hospitalized,\n 'deceased': div_ids_deceased,\n }\n\n if False:\n dframe = rolling_means['hospitalized']\n if spa_report:\n columns = [('date', 'fecha'), ('number', 'España')]\n table = _create_table_for_chart_from_series(dframe)\n else:\n populations = [data_sources.get_population(ccaa) for ccaa in dframe.index]\n dframe = dframe.divide(populations, axis=0) * 1e5\n table, ccaas, _ = _create_table_for_chart_from_dframe(dframe, desired_ccaas)\n columns = [('date', 'fecha')]\n columns.extend([('number', data_sources.convert_to_ccaa_name(ccaa)) for ccaa in ccaas])\n\n key = 'hospitalized'\n hospitalized_slider_config = {'column_controlled': 'fecha',\n 'min_value': dates[0],\n 'max_value': dates[-1],\n 'min_init_value': date_range[0],\n 'max_init_value': datetime.datetime.now()}\n html += material_line_chart.create_chart_js_with_slider(js_function_names[key],\n hospitalized_slider_config,\n div_ids[key],\n title=titles[key],\n columns=columns,\n data_table=table,\n sizes=js_sizes)\n\n num_days = 7\n key = 'deceased'\n deaths_dframe = deaths['dframe']\n if spa_report:\n spa_deaths = deaths_dframe.sum(axis=0)\n deaths_rolling_mean = spa_deaths.rolling(num_days, center=True, min_periods=num_days).mean().dropna()\n table = _create_table_for_chart_from_series(deaths_rolling_mean)\n columns = [('date', 'fecha'), ('number', 'España')]\n else:\n deaths_rolling_mean = deaths_dframe.rolling(num_days, center=True, min_periods=num_days, axis=1).mean()\n deaths_rolling_mean = deaths_rolling_mean.dropna(axis=1, how='all')\n populations = [data_sources.get_population(ccaa) for ccaa in deaths_rolling_mean.index]\n deaths_rolling_mean = deaths_rolling_mean.divide(populations, axis=0) * 1e5\n\n table, ccaas, _ = _create_table_for_chart_from_dframe(deaths_rolling_mean, desired_ccaas)\n columns = [('date', 'fecha')]\n columns.extend([('number', data_sources.convert_to_ccaa_name(ccaa)) for ccaa in ccaas])\n\n html += material_line_chart.create_chart_js_with_slider(js_function_names[key],\n slider_config,\n div_ids[key],\n title=titles[key],\n columns=columns,\n data_table=table,\n sizes=js_sizes)\n\n html += ' </script>\\n </head>\\n <body>\\n'\n today = datetime.datetime.now()\n html += '<p><a href=\"../\">Menu</a></p>'\n html += f'<p>Informe generado el día: {today.day}-{today.month}-{today.year}</p>'\n\n html += f'<p>Este informe está generado para uso personal por <a href=\"https://twitter.com/jblanca42\">@jblanca42</a>, pero lo sube a la web por si le pudiese ser de utilidad a alguien más.</p>'\n html += f'<p>El código utilizado para generarlo se encuentra en <a href=\"https://github.com/JoseBlanca/seguimiento_covid\">github</a>, si encuentras algún fallo o quieres mejorar algo envía un mensaje o haz un pull request.</p>'\n\n if desired_ccaas:\n index = [ccaa for ccaa in deaths['dframe'].index if is_desired_ccaa(ccaa, desired_ccaas)]\n tot_deaths = deaths['dframe'].loc[index, :].values.sum()\n else:\n tot_deaths = deaths['dframe'].values.sum() + deaths['unassinged_deaths']\n html += f'<p>Número total de fallecidos: {tot_deaths}</p>'\n\n if spa_report:\n death_rate = round(sum(data_sources.POPULATION.values()) / tot_deaths)\n html += f'<p>Una de cada {death_rate} personas han fallecido.</p>'\n elif desired_ccaas and len(desired_ccaas) == 1:\n death_rate = round(data_sources.get_population(desired_ccaas[0]) / tot_deaths)\n html += f'<p>Una de cada {death_rate} personas han fallecido en esta comunidad autónoma.</p>'\n else:\n deaths_per_ccaa = deaths['dframe'].sum(axis=1)\n populations = [data_sources.get_population(ccaa) for ccaa in deaths_per_ccaa.index]\n populations = pandas.Series(populations, index=deaths_per_ccaa.index)\n death_rate = (populations / deaths_per_ccaa).round().sort_values().astype(int)\n html += '<p>¿Una de cada cuántas personas han fallecido por comunidad autónoma?</p>'\n html += _write_table_from_series(death_rate)\n\n if False:\n for key in ['hospitalized']:\n html += f\"<p>{DESCRIPTIONS[spa_report][key]}</p>\\n\"\n html += material_line_chart.create_chart_with_slider_divs(div_ids[key],\n sizes=div_sizes)\n\n html += f\"<p>{DESCRIPTIONS[spa_report]['incidencia_acumulada']}</p>\\n\"\n\n html += material_line_chart.create_chart_with_slider_divs(div_ids_accumulated_cases,\n sizes=div_sizes)\n for key in ['deceased']:\n html += f\"<p>{DESCRIPTIONS[spa_report][key]}</p>\\n\"\n html += material_line_chart.create_chart_with_slider_divs(div_ids[key],\n sizes=div_sizes)\n\n html += ' </body>\\n</html>'\n\n out_path.open('wt').write(html)\n\n\nif __name__ == '__main__':\n\n ten_days_ago = datetime.datetime.now() - datetime.timedelta(days=10)\n forty_days_ago = datetime.datetime.now() - datetime.timedelta(days=40)\n first_date = datetime.datetime(2020, 9, 1)\n\n out_dir = config.HTML_REPORTS_DIR\n out_dir.mkdir(exist_ok=True)\n out_path = out_dir / 'situacion_covid_por_ca.html'\n write_html_report(out_path, date_range=[forty_days_ago, ten_days_ago])\n", "step-ids": [ 4, 9, 10, 11, 13 ] }
[ 4, 9, 10, 11, 13 ]
import os from pathlib import Path from sphinx_testing import with_app @with_app(buildername="html", srcdir="doc_test/doc_role_need_max_title_length_unlimited") def test_max_title_length_unlimited(app, status, warning): os.environ["MAX_TITLE_LENGTH"] = "-1" app.build() html = Path(app.outdir, "index.html").read_text() assert "ROLE NEED TEMPLATE" in html assert ( "[SP_TOO_001] Command line interface (implemented) Specification/spec - test;test2 - SP_TOO_002 - - " "The Tool awesome shall have a command line interface." in html ) @with_app(buildername="html", srcdir="doc_test/doc_role_need_max_title_length") def test_max_title_length_10(app, status, warning): os.environ["MAX_TITLE_LENGTH"] = "10" app.build() html = Path(app.outdir, "index.html").read_text() assert "ROLE NEED TEMPLATE" in html assert ( "[SP_TOO_001] Command... (implemented) Specification/spec - test;test2 - SP_TOO_002 - - " "The Tool awesome shall have a command line interface." in html )
normal
{ "blob_id": "3346ca7cdcfe9d9627bfe08be2b282897b3c319c", "index": 6943, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\n@with_app(buildername='html', srcdir=\n 'doc_test/doc_role_need_max_title_length_unlimited')\ndef test_max_title_length_unlimited(app, status, warning):\n os.environ['MAX_TITLE_LENGTH'] = '-1'\n app.build()\n html = Path(app.outdir, 'index.html').read_text()\n assert 'ROLE NEED TEMPLATE' in html\n assert '[SP_TOO_001] Command line interface (implemented) Specification/spec - test;test2 - SP_TOO_002 - - The Tool awesome shall have a command line interface.' in html\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\n@with_app(buildername='html', srcdir=\n 'doc_test/doc_role_need_max_title_length_unlimited')\ndef test_max_title_length_unlimited(app, status, warning):\n os.environ['MAX_TITLE_LENGTH'] = '-1'\n app.build()\n html = Path(app.outdir, 'index.html').read_text()\n assert 'ROLE NEED TEMPLATE' in html\n assert '[SP_TOO_001] Command line interface (implemented) Specification/spec - test;test2 - SP_TOO_002 - - The Tool awesome shall have a command line interface.' in html\n\n\n@with_app(buildername='html', srcdir='doc_test/doc_role_need_max_title_length')\ndef test_max_title_length_10(app, status, warning):\n os.environ['MAX_TITLE_LENGTH'] = '10'\n app.build()\n html = Path(app.outdir, 'index.html').read_text()\n assert 'ROLE NEED TEMPLATE' in html\n assert '[SP_TOO_001] Command... (implemented) Specification/spec - test;test2 - SP_TOO_002 - - The Tool awesome shall have a command line interface.' in html\n", "step-4": "import os\nfrom pathlib import Path\nfrom sphinx_testing import with_app\n\n\n@with_app(buildername='html', srcdir=\n 'doc_test/doc_role_need_max_title_length_unlimited')\ndef test_max_title_length_unlimited(app, status, warning):\n os.environ['MAX_TITLE_LENGTH'] = '-1'\n app.build()\n html = Path(app.outdir, 'index.html').read_text()\n assert 'ROLE NEED TEMPLATE' in html\n assert '[SP_TOO_001] Command line interface (implemented) Specification/spec - test;test2 - SP_TOO_002 - - The Tool awesome shall have a command line interface.' in html\n\n\n@with_app(buildername='html', srcdir='doc_test/doc_role_need_max_title_length')\ndef test_max_title_length_10(app, status, warning):\n os.environ['MAX_TITLE_LENGTH'] = '10'\n app.build()\n html = Path(app.outdir, 'index.html').read_text()\n assert 'ROLE NEED TEMPLATE' in html\n assert '[SP_TOO_001] Command... (implemented) Specification/spec - test;test2 - SP_TOO_002 - - The Tool awesome shall have a command line interface.' in html\n", "step-5": "import os\nfrom pathlib import Path\n\nfrom sphinx_testing import with_app\n\n\n@with_app(buildername=\"html\", srcdir=\"doc_test/doc_role_need_max_title_length_unlimited\")\ndef test_max_title_length_unlimited(app, status, warning):\n\n os.environ[\"MAX_TITLE_LENGTH\"] = \"-1\"\n app.build()\n html = Path(app.outdir, \"index.html\").read_text()\n assert \"ROLE NEED TEMPLATE\" in html\n assert (\n \"[SP_TOO_001] Command line interface (implemented) Specification/spec - test;test2 - SP_TOO_002 - - \"\n \"The Tool awesome shall have a command line interface.\" in html\n )\n\n\n@with_app(buildername=\"html\", srcdir=\"doc_test/doc_role_need_max_title_length\")\ndef test_max_title_length_10(app, status, warning):\n\n os.environ[\"MAX_TITLE_LENGTH\"] = \"10\"\n app.build()\n html = Path(app.outdir, \"index.html\").read_text()\n assert \"ROLE NEED TEMPLATE\" in html\n assert (\n \"[SP_TOO_001] Command... (implemented) Specification/spec - test;test2 - SP_TOO_002 - - \"\n \"The Tool awesome shall have a command line interface.\" in html\n )\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
class UrlPath: @staticmethod def combine(*args): result = '' for path in args: result += path if path.endswith('/') else '{}/'.format(path) #result = result[:-1] return result
normal
{ "blob_id": "aa579025cacd11486a101b2dc51b5ba4997bf84a", "index": 95, "step-1": "<mask token>\n", "step-2": "class UrlPath:\n <mask token>\n", "step-3": "class UrlPath:\n\n @staticmethod\n def combine(*args):\n result = ''\n for path in args:\n result += path if path.endswith('/') else '{}/'.format(path)\n return result\n", "step-4": "class UrlPath:\n @staticmethod\n def combine(*args):\n result = ''\n for path in args:\n result += path if path.endswith('/') else '{}/'.format(path)\n #result = result[:-1]\n return result", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
#! /usr/bin/python2.7 # -*- coding: utf-8 -*- import numpy as np import pandas as pd import pylab as pl x = range(1, 19) d = pd.read_csv('data.csv') pl.clf() pl.plot(x, d['reelection'], 'o-', label='reelection') pl.plot(x, d['rerun'], 'o-', label='rerun') pl.plot(x, d['ratio'], 'o-', label='incumbent ratio') pl.fill_between(x, d['ratio'], np.zeros(len(d.index)), facecolor='red',\ alpha=0.1) pl.legend(loc='upper left') pl.xlabel('assembly_id') pl.ylabel('rate') pl.xlim([1, max(x)]) pl.ylim([0, 1]) pl.xticks(x) pl.savefig('drawing.png')
normal
{ "blob_id": "156b3e09a65402d4f964c2886b8f5519168eb13a", "index": 2894, "step-1": "<mask token>\n", "step-2": "<mask token>\npl.clf()\npl.plot(x, d['reelection'], 'o-', label='reelection')\npl.plot(x, d['rerun'], 'o-', label='rerun')\npl.plot(x, d['ratio'], 'o-', label='incumbent ratio')\npl.fill_between(x, d['ratio'], np.zeros(len(d.index)), facecolor='red',\n alpha=0.1)\npl.legend(loc='upper left')\npl.xlabel('assembly_id')\npl.ylabel('rate')\npl.xlim([1, max(x)])\npl.ylim([0, 1])\npl.xticks(x)\npl.savefig('drawing.png')\n", "step-3": "<mask token>\nx = range(1, 19)\nd = pd.read_csv('data.csv')\npl.clf()\npl.plot(x, d['reelection'], 'o-', label='reelection')\npl.plot(x, d['rerun'], 'o-', label='rerun')\npl.plot(x, d['ratio'], 'o-', label='incumbent ratio')\npl.fill_between(x, d['ratio'], np.zeros(len(d.index)), facecolor='red',\n alpha=0.1)\npl.legend(loc='upper left')\npl.xlabel('assembly_id')\npl.ylabel('rate')\npl.xlim([1, max(x)])\npl.ylim([0, 1])\npl.xticks(x)\npl.savefig('drawing.png')\n", "step-4": "import numpy as np\nimport pandas as pd\nimport pylab as pl\nx = range(1, 19)\nd = pd.read_csv('data.csv')\npl.clf()\npl.plot(x, d['reelection'], 'o-', label='reelection')\npl.plot(x, d['rerun'], 'o-', label='rerun')\npl.plot(x, d['ratio'], 'o-', label='incumbent ratio')\npl.fill_between(x, d['ratio'], np.zeros(len(d.index)), facecolor='red',\n alpha=0.1)\npl.legend(loc='upper left')\npl.xlabel('assembly_id')\npl.ylabel('rate')\npl.xlim([1, max(x)])\npl.ylim([0, 1])\npl.xticks(x)\npl.savefig('drawing.png')\n", "step-5": "#! /usr/bin/python2.7\n# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport pandas as pd\nimport pylab as pl\n\nx = range(1, 19)\nd = pd.read_csv('data.csv')\n\npl.clf()\npl.plot(x, d['reelection'], 'o-', label='reelection')\npl.plot(x, d['rerun'], 'o-', label='rerun')\npl.plot(x, d['ratio'], 'o-', label='incumbent ratio')\npl.fill_between(x, d['ratio'], np.zeros(len(d.index)), facecolor='red',\\\n alpha=0.1)\npl.legend(loc='upper left')\npl.xlabel('assembly_id')\npl.ylabel('rate')\npl.xlim([1, max(x)])\npl.ylim([0, 1])\npl.xticks(x)\npl.savefig('drawing.png')\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
rate=69 dollar=int(input("enter an dollars to convert:")) inr=dollar*rate print('INR :Rs.',inr,'/-')
normal
{ "blob_id": "62018b32bf0c66fa7ec3cc0fcbdc16e28b4ef2d6", "index": 2396, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint('INR :Rs.', inr, '/-')\n", "step-3": "rate = 69\ndollar = int(input('enter an dollars to convert:'))\ninr = dollar * rate\nprint('INR :Rs.', inr, '/-')\n", "step-4": "rate=69\ndollar=int(input(\"enter an dollars to convert:\"))\ninr=dollar*rate\nprint('INR :Rs.',inr,'/-')", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
#Aplicacion de la funcion super() class Persona(): def __init__(self,nombre,edad,lugar_residencia): self.nombre = nombre self.edad = edad self.residencia = lugar_residencia def descripcion(self): print("Nombre: ",self.nombre," Edad: ", self.edad," Lugar de residencia: ",self.residencia) def hola(self): print("Hola Mundo") class Empleado(Persona): def __init__(self,salario,antiguedad,nombre_empleado,edad_empleado,residencia_empleado): super().__init__(nombre_empleado,edad_empleado,residencia_empleado)#Hace la llamada al constructor de la clase padre que esta heredando self.salario = salario self.antiguedad_persona=antiguedad super().hola() def descripcion(self): super().descripcion() print("Salario: " ,self.salario, "Antiguedad: ",self.antiguedad_persona) Antonio = Persona("Alex",23,"Merida") Antonio.descripcion() print(isinstance(Antonio,Empleado)) #Principio de sustitucion #consiste en plantearse las siguientes preguntas: #es siempre un o una #funcion isinstance()--> nos informa si un objeto es instancia de una clase determinada devuelve verdadero o falso
normal
{ "blob_id": "92a50bcdbb4c03d1a4813a93c2e0986250516f14", "index": 1117, "step-1": "class Persona:\n <mask token>\n <mask token>\n\n def hola(self):\n print('Hola Mundo')\n\n\nclass Empleado(Persona):\n\n def __init__(self, salario, antiguedad, nombre_empleado, edad_empleado,\n residencia_empleado):\n super().__init__(nombre_empleado, edad_empleado, residencia_empleado)\n self.salario = salario\n self.antiguedad_persona = antiguedad\n super().hola()\n\n def descripcion(self):\n super().descripcion()\n print('Salario: ', self.salario, 'Antiguedad: ', self.\n antiguedad_persona)\n\n\n<mask token>\n", "step-2": "class Persona:\n\n def __init__(self, nombre, edad, lugar_residencia):\n self.nombre = nombre\n self.edad = edad\n self.residencia = lugar_residencia\n <mask token>\n\n def hola(self):\n print('Hola Mundo')\n\n\nclass Empleado(Persona):\n\n def __init__(self, salario, antiguedad, nombre_empleado, edad_empleado,\n residencia_empleado):\n super().__init__(nombre_empleado, edad_empleado, residencia_empleado)\n self.salario = salario\n self.antiguedad_persona = antiguedad\n super().hola()\n\n def descripcion(self):\n super().descripcion()\n print('Salario: ', self.salario, 'Antiguedad: ', self.\n antiguedad_persona)\n\n\n<mask token>\n", "step-3": "class Persona:\n\n def __init__(self, nombre, edad, lugar_residencia):\n self.nombre = nombre\n self.edad = edad\n self.residencia = lugar_residencia\n\n def descripcion(self):\n print('Nombre: ', self.nombre, ' Edad: ', self.edad,\n ' Lugar de residencia: ', self.residencia)\n\n def hola(self):\n print('Hola Mundo')\n\n\nclass Empleado(Persona):\n\n def __init__(self, salario, antiguedad, nombre_empleado, edad_empleado,\n residencia_empleado):\n super().__init__(nombre_empleado, edad_empleado, residencia_empleado)\n self.salario = salario\n self.antiguedad_persona = antiguedad\n super().hola()\n\n def descripcion(self):\n super().descripcion()\n print('Salario: ', self.salario, 'Antiguedad: ', self.\n antiguedad_persona)\n\n\n<mask token>\nAntonio.descripcion()\nprint(isinstance(Antonio, Empleado))\n", "step-4": "class Persona:\n\n def __init__(self, nombre, edad, lugar_residencia):\n self.nombre = nombre\n self.edad = edad\n self.residencia = lugar_residencia\n\n def descripcion(self):\n print('Nombre: ', self.nombre, ' Edad: ', self.edad,\n ' Lugar de residencia: ', self.residencia)\n\n def hola(self):\n print('Hola Mundo')\n\n\nclass Empleado(Persona):\n\n def __init__(self, salario, antiguedad, nombre_empleado, edad_empleado,\n residencia_empleado):\n super().__init__(nombre_empleado, edad_empleado, residencia_empleado)\n self.salario = salario\n self.antiguedad_persona = antiguedad\n super().hola()\n\n def descripcion(self):\n super().descripcion()\n print('Salario: ', self.salario, 'Antiguedad: ', self.\n antiguedad_persona)\n\n\nAntonio = Persona('Alex', 23, 'Merida')\nAntonio.descripcion()\nprint(isinstance(Antonio, Empleado))\n", "step-5": "\n\n#Aplicacion de la funcion super()\n\nclass Persona():\n def __init__(self,nombre,edad,lugar_residencia):\n self.nombre = nombre\n self.edad = edad\n self.residencia = lugar_residencia\n \n def descripcion(self):\n print(\"Nombre: \",self.nombre,\" Edad: \", self.edad,\" Lugar de residencia: \",self.residencia)\n \n def hola(self):\n print(\"Hola Mundo\")\n\nclass Empleado(Persona):\n\n def __init__(self,salario,antiguedad,nombre_empleado,edad_empleado,residencia_empleado):\n\n super().__init__(nombre_empleado,edad_empleado,residencia_empleado)#Hace la llamada al constructor de la clase padre que esta heredando\n self.salario = salario\n self.antiguedad_persona=antiguedad\n\n super().hola()\n \n def descripcion(self):\n super().descripcion()\n print(\"Salario: \" ,self.salario, \"Antiguedad: \",self.antiguedad_persona)\n\n\nAntonio = Persona(\"Alex\",23,\"Merida\")\nAntonio.descripcion()\n\nprint(isinstance(Antonio,Empleado))\n\n\n#Principio de sustitucion\n#consiste en plantearse las siguientes preguntas:\n\n#es siempre un o una\n\n#funcion isinstance()--> nos informa si un objeto es instancia de una clase determinada devuelve verdadero o falso\n\n\n\n", "step-ids": [ 5, 6, 8, 9, 10 ] }
[ 5, 6, 8, 9, 10 ]
from locations.storefinders.storelocatorwidgets import StoreLocatorWidgetsSpider class Pharmacy4LessAUSpider(StoreLocatorWidgetsSpider): name = "pharmacy_4_less_au" item_attributes = {"brand": "Pharmacy 4 Less", "brand_wikidata": "Q63367608"} key = "6c0hBJeL5yk8cmaKJGNjTu0JhWNaMQpX"
normal
{ "blob_id": "aad3c104432a1a028d96263236133e495536ee69", "index": 6644, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Pharmacy4LessAUSpider(StoreLocatorWidgetsSpider):\n <mask token>\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Pharmacy4LessAUSpider(StoreLocatorWidgetsSpider):\n name = 'pharmacy_4_less_au'\n item_attributes = {'brand': 'Pharmacy 4 Less', 'brand_wikidata':\n 'Q63367608'}\n key = '6c0hBJeL5yk8cmaKJGNjTu0JhWNaMQpX'\n", "step-4": "from locations.storefinders.storelocatorwidgets import StoreLocatorWidgetsSpider\n\n\nclass Pharmacy4LessAUSpider(StoreLocatorWidgetsSpider):\n name = 'pharmacy_4_less_au'\n item_attributes = {'brand': 'Pharmacy 4 Less', 'brand_wikidata':\n 'Q63367608'}\n key = '6c0hBJeL5yk8cmaKJGNjTu0JhWNaMQpX'\n", "step-5": "from locations.storefinders.storelocatorwidgets import StoreLocatorWidgetsSpider\n\n\nclass Pharmacy4LessAUSpider(StoreLocatorWidgetsSpider):\n name = \"pharmacy_4_less_au\"\n item_attributes = {\"brand\": \"Pharmacy 4 Less\", \"brand_wikidata\": \"Q63367608\"}\n key = \"6c0hBJeL5yk8cmaKJGNjTu0JhWNaMQpX\"\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
from typing import List, Optional from backend.domain.well import FacilityState, Well from backend.repository.persistence.well import WellPersistenceSchema class WellRepository: schema = WellPersistenceSchema() def __init__(self, db): self._db = db def list(self) -> List[Well]: return [self.schema.load(doc) for doc in self._db.wells.find({})] def save_many(self, wells: List[Well]): self._db.wells.insert_many([self.schema.dump(well) for well in wells]) def filter_by_facility_status(self, statuses: List[FacilityState]) -> List[Well]: return [ self.schema.load(doc) for doc in self._db.wells.find({"facility.lifecycle.name": {"$in": [status.value for status in statuses]}}) ] def find_well_by_facility_id(self, identifier: str) -> Optional[Well]: doc = self._db.wells.find_one({"facility.id": identifier}) if doc: return self.schema.load(doc)
normal
{ "blob_id": "5a181b0c22faa47c6c887daac675dd7374037f30", "index": 3056, "step-1": "<mask token>\n\n\nclass WellRepository:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n", "step-2": "<mask token>\n\n\nclass WellRepository:\n <mask token>\n\n def __init__(self, db):\n self._db = db\n\n def list(self) ->List[Well]:\n return [self.schema.load(doc) for doc in self._db.wells.find({})]\n\n def save_many(self, wells: List[Well]):\n self._db.wells.insert_many([self.schema.dump(well) for well in wells])\n\n def filter_by_facility_status(self, statuses: List[FacilityState]) ->List[\n Well]:\n return [self.schema.load(doc) for doc in self._db.wells.find({\n 'facility.lifecycle.name': {'$in': [status.value for status in\n statuses]}})]\n <mask token>\n", "step-3": "<mask token>\n\n\nclass WellRepository:\n schema = WellPersistenceSchema()\n\n def __init__(self, db):\n self._db = db\n\n def list(self) ->List[Well]:\n return [self.schema.load(doc) for doc in self._db.wells.find({})]\n\n def save_many(self, wells: List[Well]):\n self._db.wells.insert_many([self.schema.dump(well) for well in wells])\n\n def filter_by_facility_status(self, statuses: List[FacilityState]) ->List[\n Well]:\n return [self.schema.load(doc) for doc in self._db.wells.find({\n 'facility.lifecycle.name': {'$in': [status.value for status in\n statuses]}})]\n\n def find_well_by_facility_id(self, identifier: str) ->Optional[Well]:\n doc = self._db.wells.find_one({'facility.id': identifier})\n if doc:\n return self.schema.load(doc)\n", "step-4": "from typing import List, Optional\nfrom backend.domain.well import FacilityState, Well\nfrom backend.repository.persistence.well import WellPersistenceSchema\n\n\nclass WellRepository:\n schema = WellPersistenceSchema()\n\n def __init__(self, db):\n self._db = db\n\n def list(self) ->List[Well]:\n return [self.schema.load(doc) for doc in self._db.wells.find({})]\n\n def save_many(self, wells: List[Well]):\n self._db.wells.insert_many([self.schema.dump(well) for well in wells])\n\n def filter_by_facility_status(self, statuses: List[FacilityState]) ->List[\n Well]:\n return [self.schema.load(doc) for doc in self._db.wells.find({\n 'facility.lifecycle.name': {'$in': [status.value for status in\n statuses]}})]\n\n def find_well_by_facility_id(self, identifier: str) ->Optional[Well]:\n doc = self._db.wells.find_one({'facility.id': identifier})\n if doc:\n return self.schema.load(doc)\n", "step-5": "from typing import List, Optional\n\nfrom backend.domain.well import FacilityState, Well\nfrom backend.repository.persistence.well import WellPersistenceSchema\n\n\nclass WellRepository:\n schema = WellPersistenceSchema()\n\n def __init__(self, db):\n self._db = db\n\n def list(self) -> List[Well]:\n return [self.schema.load(doc) for doc in self._db.wells.find({})]\n\n def save_many(self, wells: List[Well]):\n self._db.wells.insert_many([self.schema.dump(well) for well in wells])\n\n def filter_by_facility_status(self, statuses: List[FacilityState]) -> List[Well]:\n return [\n self.schema.load(doc)\n for doc in self._db.wells.find({\"facility.lifecycle.name\": {\"$in\": [status.value for status in statuses]}})\n ]\n\n def find_well_by_facility_id(self, identifier: str) -> Optional[Well]:\n doc = self._db.wells.find_one({\"facility.id\": identifier})\n if doc:\n return self.schema.load(doc)\n", "step-ids": [ 1, 5, 7, 8, 9 ] }
[ 1, 5, 7, 8, 9 ]
# -*- coding: utf-8 -*- """ ====================== @author : Zhang Xu @time : 2021/9/8:16:29 @email : [email protected] @content : tensorflow subclassing 复现 NPA ====================== """ import tensorflow as tf from tensorflow.keras import * from tensorflow.keras.layers import * from keras import backend as K npratio = 4 MAX_SENT_LENGTH = 30 # 一篇news的单词数量 MAX_SENTS = 50 # 一个用户的点击的news的数量 # news encoder # 输入:user id, 1篇news的信息 # 输出:news representation class NewsEncoder(tf.keras.Model): def __init__(self): super(NewsEncoder, self).__init__(name='NewsEncoder') # user_id 部分 self.userid_input_layer = Input() self.userid_embedding_layer = Embedding() self.userid_dense_layer = Dense() self.userid_flatten_layer = Flatten() # news 部分 self.news_input_layer = Input() self.news_embedding_layer = Embedding() self.news_conv_layer = Conv1D() self.news_dropout_layer_1 = Dropout(0.2) self.news_dropout_layer_2 = Dropout(0.2) # personalized attention 部分 self.pa_dense_layer = Dense() self.pa_2_1_dot_layer = Dot() self.pa_softmax_layer = Activation('softmax') self.pa_1_1_dot_layer = Dot() def call(self, inputs): '''多输入:输入 user_id、 news_input''' '''输入单个用户的 user id 和 一篇 news 的信息''' user_id, news_input = inputs[0], inputs[1] # qw x1 = self.userid_input_layer(user_id) x1 = self.userid_embedding_layer(x1) x1 = self.userid_dense_layer(x1) qw = self.userid_flatten_layer(x1) # news representation x2 = self.news_input_layer(news_input) x2 = self.news_embedding_layer(x2) x2 = self.news_dropout_layer_1(x2) x2 = self.news_conv_layer(x2) x2 = self.news_dropout_layer_2(x2) # personalized attention qw = self.pa_dense_layer(qw) attention_a = self.pa_2_1_dot_layer([x2, qw]) attention_weight = self.pa_softmax_layer(attention_a) news_rep = self.pa_1_1_dot_layer([x2, attention_weight]) return news_rep # NPA # 输入:user id 和 该用户所有的 clicked news(N篇) 和 candidate news(K篇) # 输出:对K篇 candidate news 做出预测,分别给出点击的概率 class NPA(tf.keras.Model): def __init__(self): super(NPA, self).__init__(name='NPA') # user id 部分 self.userid_input_layer = Input() self.userid_embedding_layer = Embedding() self.userid_dense_layer = Dense() self.userid_flatten_layer = Flatten() # clicked news 部分 self.clickednews_input_layer = [Input((MAX_SENT_LENGTH,), dtype='int32') for _ in range(MAX_SENTS)] self.clickednews_encoder = [NewsEncoder() for _ in range(MAX_SENTS)] self.clickednews_dense_layer = Dense() self.clickednews_2_1_dot_layer = Dot((2, 1)) self.clickednews_softmax_layer = Activation('softmax') self.clickednews_1_1_dot_layer = Dot((1, 1)) # candidate news 部分 self.candidatenews_input_layer = [Input((MAX_SENT_LENGTH,), dtype='int32') for _ in range(1 + npratio)] self.candidatenews_encoder = [NewsEncoder() for _ in range(1 + npratio)] # click prediction self.cp_dot_layer = dot() self.cp_concatenate = concatenate() self.cp_activation_layer = Activation('softmax') def call(self, inputs): user_id, clicked_news, candidate_news = inputs[0], inputs[1], inputs[2] # qd x1 = self.userid_input_layer(user_id) x1 = self.userid_embedding_layer(x1) x1 = self.userid_dense_layer(x1) qd = self.userid_flatten_layer(x1) # clicked news clicked_news_vec = [0]*MAX_SENTS for i in range(len(clicked_news)): xx = self.clickednews_input_layer[i](clicked_news[i]) clicked_news_vec[i] = self.clickednews_encoder[i]([user_id, xx]) clicked_news_rep = concatenate([Lambda(lambda x: K.expand_dims(x, axis=1))(news) for news in clicked_news_vec], axis=1) # qd 与 click_news_rep 进行 personalized attention news_temp_dense = self.clickednews_dense_layer(qd) attention_news = self.clickednews_2_1_dot_layer([clicked_news_rep, news_temp_dense]) attention_news_weight = self.clickednews_softmax_layer(attention_news) user_rep = self.clickednews_1_1_dot_layer([clicked_news_rep, attention_news_weight]) # candidate news candidate_news_vec = [0]*(1+npratio) for i in range(len(candidate_news)): xx = self.candidatenews_input_layer[i](candidate_news[i]) candidate_news_vec[i] = self.candidatenews_encoder[i]([user_id, xx]) # click prediction # candidate news representation 与 user representation 进行 dot 和 softmax logits = [self.cp_dot_layer([user_rep, candidate_news], axes=-1) for candidate_news in candidate_news_vec] logits = self.cp_activation_layer(self.cp_concatenate(logits)) return logits
normal
{ "blob_id": "f3789d70f784345881f705fc809c49ad4e3526bc", "index": 1287, "step-1": "<mask token>\n\n\nclass NewsEncoder(tf.keras.Model):\n\n def __init__(self):\n super(NewsEncoder, self).__init__(name='NewsEncoder')\n self.userid_input_layer = Input()\n self.userid_embedding_layer = Embedding()\n self.userid_dense_layer = Dense()\n self.userid_flatten_layer = Flatten()\n self.news_input_layer = Input()\n self.news_embedding_layer = Embedding()\n self.news_conv_layer = Conv1D()\n self.news_dropout_layer_1 = Dropout(0.2)\n self.news_dropout_layer_2 = Dropout(0.2)\n self.pa_dense_layer = Dense()\n self.pa_2_1_dot_layer = Dot()\n self.pa_softmax_layer = Activation('softmax')\n self.pa_1_1_dot_layer = Dot()\n <mask token>\n\n\nclass NPA(tf.keras.Model):\n\n def __init__(self):\n super(NPA, self).__init__(name='NPA')\n self.userid_input_layer = Input()\n self.userid_embedding_layer = Embedding()\n self.userid_dense_layer = Dense()\n self.userid_flatten_layer = Flatten()\n self.clickednews_input_layer = [Input((MAX_SENT_LENGTH,), dtype=\n 'int32') for _ in range(MAX_SENTS)]\n self.clickednews_encoder = [NewsEncoder() for _ in range(MAX_SENTS)]\n self.clickednews_dense_layer = Dense()\n self.clickednews_2_1_dot_layer = Dot((2, 1))\n self.clickednews_softmax_layer = Activation('softmax')\n self.clickednews_1_1_dot_layer = Dot((1, 1))\n self.candidatenews_input_layer = [Input((MAX_SENT_LENGTH,), dtype=\n 'int32') for _ in range(1 + npratio)]\n self.candidatenews_encoder = [NewsEncoder() for _ in range(1 + npratio)\n ]\n self.cp_dot_layer = dot()\n self.cp_concatenate = concatenate()\n self.cp_activation_layer = Activation('softmax')\n\n def call(self, inputs):\n user_id, clicked_news, candidate_news = inputs[0], inputs[1], inputs[2]\n x1 = self.userid_input_layer(user_id)\n x1 = self.userid_embedding_layer(x1)\n x1 = self.userid_dense_layer(x1)\n qd = self.userid_flatten_layer(x1)\n clicked_news_vec = [0] * MAX_SENTS\n for i in range(len(clicked_news)):\n xx = self.clickednews_input_layer[i](clicked_news[i])\n clicked_news_vec[i] = self.clickednews_encoder[i]([user_id, xx])\n clicked_news_rep = concatenate([Lambda(lambda x: K.expand_dims(x,\n axis=1))(news) for news in clicked_news_vec], axis=1)\n news_temp_dense = self.clickednews_dense_layer(qd)\n attention_news = self.clickednews_2_1_dot_layer([clicked_news_rep,\n news_temp_dense])\n attention_news_weight = self.clickednews_softmax_layer(attention_news)\n user_rep = self.clickednews_1_1_dot_layer([clicked_news_rep,\n attention_news_weight])\n candidate_news_vec = [0] * (1 + npratio)\n for i in range(len(candidate_news)):\n xx = self.candidatenews_input_layer[i](candidate_news[i])\n candidate_news_vec[i] = self.candidatenews_encoder[i]([user_id, xx]\n )\n logits = [self.cp_dot_layer([user_rep, candidate_news], axes=-1) for\n candidate_news in candidate_news_vec]\n logits = self.cp_activation_layer(self.cp_concatenate(logits))\n return logits\n", "step-2": "<mask token>\n\n\nclass NewsEncoder(tf.keras.Model):\n\n def __init__(self):\n super(NewsEncoder, self).__init__(name='NewsEncoder')\n self.userid_input_layer = Input()\n self.userid_embedding_layer = Embedding()\n self.userid_dense_layer = Dense()\n self.userid_flatten_layer = Flatten()\n self.news_input_layer = Input()\n self.news_embedding_layer = Embedding()\n self.news_conv_layer = Conv1D()\n self.news_dropout_layer_1 = Dropout(0.2)\n self.news_dropout_layer_2 = Dropout(0.2)\n self.pa_dense_layer = Dense()\n self.pa_2_1_dot_layer = Dot()\n self.pa_softmax_layer = Activation('softmax')\n self.pa_1_1_dot_layer = Dot()\n\n def call(self, inputs):\n \"\"\"多输入:输入 user_id、 news_input\"\"\"\n \"\"\"输入单个用户的 user id 和 一篇 news 的信息\"\"\"\n user_id, news_input = inputs[0], inputs[1]\n x1 = self.userid_input_layer(user_id)\n x1 = self.userid_embedding_layer(x1)\n x1 = self.userid_dense_layer(x1)\n qw = self.userid_flatten_layer(x1)\n x2 = self.news_input_layer(news_input)\n x2 = self.news_embedding_layer(x2)\n x2 = self.news_dropout_layer_1(x2)\n x2 = self.news_conv_layer(x2)\n x2 = self.news_dropout_layer_2(x2)\n qw = self.pa_dense_layer(qw)\n attention_a = self.pa_2_1_dot_layer([x2, qw])\n attention_weight = self.pa_softmax_layer(attention_a)\n news_rep = self.pa_1_1_dot_layer([x2, attention_weight])\n return news_rep\n\n\nclass NPA(tf.keras.Model):\n\n def __init__(self):\n super(NPA, self).__init__(name='NPA')\n self.userid_input_layer = Input()\n self.userid_embedding_layer = Embedding()\n self.userid_dense_layer = Dense()\n self.userid_flatten_layer = Flatten()\n self.clickednews_input_layer = [Input((MAX_SENT_LENGTH,), dtype=\n 'int32') for _ in range(MAX_SENTS)]\n self.clickednews_encoder = [NewsEncoder() for _ in range(MAX_SENTS)]\n self.clickednews_dense_layer = Dense()\n self.clickednews_2_1_dot_layer = Dot((2, 1))\n self.clickednews_softmax_layer = Activation('softmax')\n self.clickednews_1_1_dot_layer = Dot((1, 1))\n self.candidatenews_input_layer = [Input((MAX_SENT_LENGTH,), dtype=\n 'int32') for _ in range(1 + npratio)]\n self.candidatenews_encoder = [NewsEncoder() for _ in range(1 + npratio)\n ]\n self.cp_dot_layer = dot()\n self.cp_concatenate = concatenate()\n self.cp_activation_layer = Activation('softmax')\n\n def call(self, inputs):\n user_id, clicked_news, candidate_news = inputs[0], inputs[1], inputs[2]\n x1 = self.userid_input_layer(user_id)\n x1 = self.userid_embedding_layer(x1)\n x1 = self.userid_dense_layer(x1)\n qd = self.userid_flatten_layer(x1)\n clicked_news_vec = [0] * MAX_SENTS\n for i in range(len(clicked_news)):\n xx = self.clickednews_input_layer[i](clicked_news[i])\n clicked_news_vec[i] = self.clickednews_encoder[i]([user_id, xx])\n clicked_news_rep = concatenate([Lambda(lambda x: K.expand_dims(x,\n axis=1))(news) for news in clicked_news_vec], axis=1)\n news_temp_dense = self.clickednews_dense_layer(qd)\n attention_news = self.clickednews_2_1_dot_layer([clicked_news_rep,\n news_temp_dense])\n attention_news_weight = self.clickednews_softmax_layer(attention_news)\n user_rep = self.clickednews_1_1_dot_layer([clicked_news_rep,\n attention_news_weight])\n candidate_news_vec = [0] * (1 + npratio)\n for i in range(len(candidate_news)):\n xx = self.candidatenews_input_layer[i](candidate_news[i])\n candidate_news_vec[i] = self.candidatenews_encoder[i]([user_id, xx]\n )\n logits = [self.cp_dot_layer([user_rep, candidate_news], axes=-1) for\n candidate_news in candidate_news_vec]\n logits = self.cp_activation_layer(self.cp_concatenate(logits))\n return logits\n", "step-3": "<mask token>\nnpratio = 4\nMAX_SENT_LENGTH = 30\nMAX_SENTS = 50\n\n\nclass NewsEncoder(tf.keras.Model):\n\n def __init__(self):\n super(NewsEncoder, self).__init__(name='NewsEncoder')\n self.userid_input_layer = Input()\n self.userid_embedding_layer = Embedding()\n self.userid_dense_layer = Dense()\n self.userid_flatten_layer = Flatten()\n self.news_input_layer = Input()\n self.news_embedding_layer = Embedding()\n self.news_conv_layer = Conv1D()\n self.news_dropout_layer_1 = Dropout(0.2)\n self.news_dropout_layer_2 = Dropout(0.2)\n self.pa_dense_layer = Dense()\n self.pa_2_1_dot_layer = Dot()\n self.pa_softmax_layer = Activation('softmax')\n self.pa_1_1_dot_layer = Dot()\n\n def call(self, inputs):\n \"\"\"多输入:输入 user_id、 news_input\"\"\"\n \"\"\"输入单个用户的 user id 和 一篇 news 的信息\"\"\"\n user_id, news_input = inputs[0], inputs[1]\n x1 = self.userid_input_layer(user_id)\n x1 = self.userid_embedding_layer(x1)\n x1 = self.userid_dense_layer(x1)\n qw = self.userid_flatten_layer(x1)\n x2 = self.news_input_layer(news_input)\n x2 = self.news_embedding_layer(x2)\n x2 = self.news_dropout_layer_1(x2)\n x2 = self.news_conv_layer(x2)\n x2 = self.news_dropout_layer_2(x2)\n qw = self.pa_dense_layer(qw)\n attention_a = self.pa_2_1_dot_layer([x2, qw])\n attention_weight = self.pa_softmax_layer(attention_a)\n news_rep = self.pa_1_1_dot_layer([x2, attention_weight])\n return news_rep\n\n\nclass NPA(tf.keras.Model):\n\n def __init__(self):\n super(NPA, self).__init__(name='NPA')\n self.userid_input_layer = Input()\n self.userid_embedding_layer = Embedding()\n self.userid_dense_layer = Dense()\n self.userid_flatten_layer = Flatten()\n self.clickednews_input_layer = [Input((MAX_SENT_LENGTH,), dtype=\n 'int32') for _ in range(MAX_SENTS)]\n self.clickednews_encoder = [NewsEncoder() for _ in range(MAX_SENTS)]\n self.clickednews_dense_layer = Dense()\n self.clickednews_2_1_dot_layer = Dot((2, 1))\n self.clickednews_softmax_layer = Activation('softmax')\n self.clickednews_1_1_dot_layer = Dot((1, 1))\n self.candidatenews_input_layer = [Input((MAX_SENT_LENGTH,), dtype=\n 'int32') for _ in range(1 + npratio)]\n self.candidatenews_encoder = [NewsEncoder() for _ in range(1 + npratio)\n ]\n self.cp_dot_layer = dot()\n self.cp_concatenate = concatenate()\n self.cp_activation_layer = Activation('softmax')\n\n def call(self, inputs):\n user_id, clicked_news, candidate_news = inputs[0], inputs[1], inputs[2]\n x1 = self.userid_input_layer(user_id)\n x1 = self.userid_embedding_layer(x1)\n x1 = self.userid_dense_layer(x1)\n qd = self.userid_flatten_layer(x1)\n clicked_news_vec = [0] * MAX_SENTS\n for i in range(len(clicked_news)):\n xx = self.clickednews_input_layer[i](clicked_news[i])\n clicked_news_vec[i] = self.clickednews_encoder[i]([user_id, xx])\n clicked_news_rep = concatenate([Lambda(lambda x: K.expand_dims(x,\n axis=1))(news) for news in clicked_news_vec], axis=1)\n news_temp_dense = self.clickednews_dense_layer(qd)\n attention_news = self.clickednews_2_1_dot_layer([clicked_news_rep,\n news_temp_dense])\n attention_news_weight = self.clickednews_softmax_layer(attention_news)\n user_rep = self.clickednews_1_1_dot_layer([clicked_news_rep,\n attention_news_weight])\n candidate_news_vec = [0] * (1 + npratio)\n for i in range(len(candidate_news)):\n xx = self.candidatenews_input_layer[i](candidate_news[i])\n candidate_news_vec[i] = self.candidatenews_encoder[i]([user_id, xx]\n )\n logits = [self.cp_dot_layer([user_rep, candidate_news], axes=-1) for\n candidate_news in candidate_news_vec]\n logits = self.cp_activation_layer(self.cp_concatenate(logits))\n return logits\n", "step-4": "<mask token>\nimport tensorflow as tf\nfrom tensorflow.keras import *\nfrom tensorflow.keras.layers import *\nfrom keras import backend as K\nnpratio = 4\nMAX_SENT_LENGTH = 30\nMAX_SENTS = 50\n\n\nclass NewsEncoder(tf.keras.Model):\n\n def __init__(self):\n super(NewsEncoder, self).__init__(name='NewsEncoder')\n self.userid_input_layer = Input()\n self.userid_embedding_layer = Embedding()\n self.userid_dense_layer = Dense()\n self.userid_flatten_layer = Flatten()\n self.news_input_layer = Input()\n self.news_embedding_layer = Embedding()\n self.news_conv_layer = Conv1D()\n self.news_dropout_layer_1 = Dropout(0.2)\n self.news_dropout_layer_2 = Dropout(0.2)\n self.pa_dense_layer = Dense()\n self.pa_2_1_dot_layer = Dot()\n self.pa_softmax_layer = Activation('softmax')\n self.pa_1_1_dot_layer = Dot()\n\n def call(self, inputs):\n \"\"\"多输入:输入 user_id、 news_input\"\"\"\n \"\"\"输入单个用户的 user id 和 一篇 news 的信息\"\"\"\n user_id, news_input = inputs[0], inputs[1]\n x1 = self.userid_input_layer(user_id)\n x1 = self.userid_embedding_layer(x1)\n x1 = self.userid_dense_layer(x1)\n qw = self.userid_flatten_layer(x1)\n x2 = self.news_input_layer(news_input)\n x2 = self.news_embedding_layer(x2)\n x2 = self.news_dropout_layer_1(x2)\n x2 = self.news_conv_layer(x2)\n x2 = self.news_dropout_layer_2(x2)\n qw = self.pa_dense_layer(qw)\n attention_a = self.pa_2_1_dot_layer([x2, qw])\n attention_weight = self.pa_softmax_layer(attention_a)\n news_rep = self.pa_1_1_dot_layer([x2, attention_weight])\n return news_rep\n\n\nclass NPA(tf.keras.Model):\n\n def __init__(self):\n super(NPA, self).__init__(name='NPA')\n self.userid_input_layer = Input()\n self.userid_embedding_layer = Embedding()\n self.userid_dense_layer = Dense()\n self.userid_flatten_layer = Flatten()\n self.clickednews_input_layer = [Input((MAX_SENT_LENGTH,), dtype=\n 'int32') for _ in range(MAX_SENTS)]\n self.clickednews_encoder = [NewsEncoder() for _ in range(MAX_SENTS)]\n self.clickednews_dense_layer = Dense()\n self.clickednews_2_1_dot_layer = Dot((2, 1))\n self.clickednews_softmax_layer = Activation('softmax')\n self.clickednews_1_1_dot_layer = Dot((1, 1))\n self.candidatenews_input_layer = [Input((MAX_SENT_LENGTH,), dtype=\n 'int32') for _ in range(1 + npratio)]\n self.candidatenews_encoder = [NewsEncoder() for _ in range(1 + npratio)\n ]\n self.cp_dot_layer = dot()\n self.cp_concatenate = concatenate()\n self.cp_activation_layer = Activation('softmax')\n\n def call(self, inputs):\n user_id, clicked_news, candidate_news = inputs[0], inputs[1], inputs[2]\n x1 = self.userid_input_layer(user_id)\n x1 = self.userid_embedding_layer(x1)\n x1 = self.userid_dense_layer(x1)\n qd = self.userid_flatten_layer(x1)\n clicked_news_vec = [0] * MAX_SENTS\n for i in range(len(clicked_news)):\n xx = self.clickednews_input_layer[i](clicked_news[i])\n clicked_news_vec[i] = self.clickednews_encoder[i]([user_id, xx])\n clicked_news_rep = concatenate([Lambda(lambda x: K.expand_dims(x,\n axis=1))(news) for news in clicked_news_vec], axis=1)\n news_temp_dense = self.clickednews_dense_layer(qd)\n attention_news = self.clickednews_2_1_dot_layer([clicked_news_rep,\n news_temp_dense])\n attention_news_weight = self.clickednews_softmax_layer(attention_news)\n user_rep = self.clickednews_1_1_dot_layer([clicked_news_rep,\n attention_news_weight])\n candidate_news_vec = [0] * (1 + npratio)\n for i in range(len(candidate_news)):\n xx = self.candidatenews_input_layer[i](candidate_news[i])\n candidate_news_vec[i] = self.candidatenews_encoder[i]([user_id, xx]\n )\n logits = [self.cp_dot_layer([user_rep, candidate_news], axes=-1) for\n candidate_news in candidate_news_vec]\n logits = self.cp_activation_layer(self.cp_concatenate(logits))\n return logits\n", "step-5": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\n======================\r\n@author : Zhang Xu\r\n@time : 2021/9/8:16:29\r\n@email : [email protected]\r\n@content : tensorflow subclassing 复现 NPA\r\n======================\r\n\"\"\"\r\nimport tensorflow as tf\r\nfrom tensorflow.keras import *\r\nfrom tensorflow.keras.layers import *\r\nfrom keras import backend as K\r\n\r\nnpratio = 4\r\n\r\nMAX_SENT_LENGTH = 30 # 一篇news的单词数量\r\nMAX_SENTS = 50 # 一个用户的点击的news的数量\r\n\r\n# news encoder\r\n# 输入:user id, 1篇news的信息\r\n# 输出:news representation\r\nclass NewsEncoder(tf.keras.Model):\r\n\r\n def __init__(self):\r\n super(NewsEncoder, self).__init__(name='NewsEncoder')\r\n\r\n # user_id 部分\r\n self.userid_input_layer = Input()\r\n self.userid_embedding_layer = Embedding()\r\n self.userid_dense_layer = Dense()\r\n self.userid_flatten_layer = Flatten()\r\n\r\n # news 部分\r\n self.news_input_layer = Input()\r\n self.news_embedding_layer = Embedding()\r\n self.news_conv_layer = Conv1D()\r\n self.news_dropout_layer_1 = Dropout(0.2)\r\n self.news_dropout_layer_2 = Dropout(0.2)\r\n\r\n # personalized attention 部分\r\n self.pa_dense_layer = Dense()\r\n self.pa_2_1_dot_layer = Dot()\r\n self.pa_softmax_layer = Activation('softmax')\r\n self.pa_1_1_dot_layer = Dot()\r\n\r\n def call(self, inputs):\r\n '''多输入:输入 user_id、 news_input'''\r\n '''输入单个用户的 user id 和 一篇 news 的信息'''\r\n user_id, news_input = inputs[0], inputs[1]\r\n\r\n # qw\r\n x1 = self.userid_input_layer(user_id)\r\n x1 = self.userid_embedding_layer(x1)\r\n x1 = self.userid_dense_layer(x1)\r\n qw = self.userid_flatten_layer(x1)\r\n\r\n # news representation\r\n x2 = self.news_input_layer(news_input)\r\n x2 = self.news_embedding_layer(x2)\r\n x2 = self.news_dropout_layer_1(x2)\r\n x2 = self.news_conv_layer(x2)\r\n x2 = self.news_dropout_layer_2(x2)\r\n\r\n # personalized attention\r\n qw = self.pa_dense_layer(qw)\r\n attention_a = self.pa_2_1_dot_layer([x2, qw])\r\n attention_weight = self.pa_softmax_layer(attention_a)\r\n news_rep = self.pa_1_1_dot_layer([x2, attention_weight])\r\n\r\n return news_rep\r\n\r\n\r\n# NPA\r\n# 输入:user id 和 该用户所有的 clicked news(N篇) 和 candidate news(K篇)\r\n# 输出:对K篇 candidate news 做出预测,分别给出点击的概率\r\nclass NPA(tf.keras.Model):\r\n\r\n def __init__(self):\r\n super(NPA, self).__init__(name='NPA')\r\n\r\n # user id 部分\r\n self.userid_input_layer = Input()\r\n self.userid_embedding_layer = Embedding()\r\n self.userid_dense_layer = Dense()\r\n self.userid_flatten_layer = Flatten()\r\n\r\n # clicked news 部分\r\n self.clickednews_input_layer = [Input((MAX_SENT_LENGTH,), dtype='int32') for _ in range(MAX_SENTS)]\r\n self.clickednews_encoder = [NewsEncoder() for _ in range(MAX_SENTS)]\r\n self.clickednews_dense_layer = Dense()\r\n self.clickednews_2_1_dot_layer = Dot((2, 1))\r\n self.clickednews_softmax_layer = Activation('softmax')\r\n self.clickednews_1_1_dot_layer = Dot((1, 1))\r\n\r\n # candidate news 部分\r\n self.candidatenews_input_layer = [Input((MAX_SENT_LENGTH,), dtype='int32') for _ in range(1 + npratio)]\r\n self.candidatenews_encoder = [NewsEncoder() for _ in range(1 + npratio)]\r\n\r\n # click prediction\r\n self.cp_dot_layer = dot()\r\n self.cp_concatenate = concatenate()\r\n self.cp_activation_layer = Activation('softmax')\r\n\r\n\r\n def call(self, inputs):\r\n user_id, clicked_news, candidate_news = inputs[0], inputs[1], inputs[2]\r\n\r\n # qd\r\n x1 = self.userid_input_layer(user_id)\r\n x1 = self.userid_embedding_layer(x1)\r\n x1 = self.userid_dense_layer(x1)\r\n qd = self.userid_flatten_layer(x1)\r\n\r\n # clicked news\r\n clicked_news_vec = [0]*MAX_SENTS\r\n for i in range(len(clicked_news)):\r\n xx = self.clickednews_input_layer[i](clicked_news[i])\r\n clicked_news_vec[i] = self.clickednews_encoder[i]([user_id, xx])\r\n clicked_news_rep = concatenate([Lambda(lambda x: K.expand_dims(x, axis=1))(news) for news in clicked_news_vec], axis=1)\r\n\r\n # qd 与 click_news_rep 进行 personalized attention\r\n news_temp_dense = self.clickednews_dense_layer(qd)\r\n attention_news = self.clickednews_2_1_dot_layer([clicked_news_rep, news_temp_dense])\r\n attention_news_weight = self.clickednews_softmax_layer(attention_news)\r\n user_rep = self.clickednews_1_1_dot_layer([clicked_news_rep, attention_news_weight])\r\n\r\n # candidate news\r\n candidate_news_vec = [0]*(1+npratio)\r\n for i in range(len(candidate_news)):\r\n xx = self.candidatenews_input_layer[i](candidate_news[i])\r\n candidate_news_vec[i] = self.candidatenews_encoder[i]([user_id, xx])\r\n\r\n # click prediction\r\n # candidate news representation 与 user representation 进行 dot 和 softmax\r\n logits = [self.cp_dot_layer([user_rep, candidate_news], axes=-1) for candidate_news in candidate_news_vec]\r\n logits = self.cp_activation_layer(self.cp_concatenate(logits))\r\n\r\n return logits", "step-ids": [ 5, 6, 7, 8, 9 ] }
[ 5, 6, 7, 8, 9 ]
from typing import Tuple #Creating a trie structure and it's node class TrieNode(object): def __init__(self, char: str): self.char = char self.children = [] #the last character of the word.` self.word_finished = False #counter for this character self.counter = 1 #list of all the occurences of the prefix in the documents self.OccurrenceList={} #Initialize the root of the trie root = TrieNode('*') #Adding a word in the trie structure def insert(root, word: str,document): node = root for char in word: found_in_child = False # Search for the character in the children of the present `node` for child in node.children: if child.char == char: #the char of the word to be inserted is already present in trie; increment the counter of this char child.counter += 1 # move the pointer to the node's child to continue the insertion of the rest of the word node = child found_in_child = True break # this char has never been inserted before, create node and insert it if not found_in_child: new_node = TrieNode(char) node.children.append(new_node) # And then point node to the new child node = new_node # At this point, word is inserted- we mark the end of this word node.word_finished = True if document not in node.OccurrenceList: #If document is not in OccurenceList for that word node.OccurrenceList[document]=1 # Create a new key with document name node.OccurrenceList[document]= node.OccurrenceList[document]+1 # We append the position in the document #Performing the search in our files for the input word, using the trie structure we created above #We will first check for the word's existence, if it exists- return file name and occurence number def find_prefix(root, prefix: str) -> Tuple[bool, int]: node = root #handling the case of an empty trie ie the root node has no children if not root.children: return False, 0 for char in prefix: char_not_found = True # Search through all the children of the node the pointer is pointing to for child in node.children: if child.char == char: #the char of the input word exists in trie char_not_found = False # increment the pointer to go further down the trie to check for the remaining chars in prefix node = child break #letting the user know that the input word of prefix doesn't exist in the trie if char_not_found: print("Word Not Found: " +prefix) #input word found, return the found status, along the files in which it exists else: print("Word Found: " +prefix) return True,node.OccurrenceList #for scrapping words from website from bs4 import BeautifulSoup import nltk nltk.download('stopwords') nltk.download('punkt') from nltk.corpus import stopwords from nltk.tokenize import word_tokenize import re import string stop_words = set(stopwords.words('english')) stop_words.update(string.punctuation) import os #selecting file for scrapping into fdata->files #please change the dircectory to run on your device fdata = r"./input/" files=os.listdir(fdata) #cleaning the text in every every file from punctuations, stop words, digits, words less than length 2 and other symbols for file in files: fname=file #called later, while associating word with the file it exists in for insertion in trie file=open(fdata+str(file), encoding="utf8") soup = BeautifulSoup(file.read(), 'html.parser') #filter the soup [script.extract() for script in soup.findAll('script')] [style.extract() for style in soup.findAll('style')] #gather words from filtered soup words = word_tokenize(soup.get_text()) # remove the words containing punctuation words = [i for i in words if all(j not in string.punctuation for j in i)] #filtering words and cleaning the data to insert in trie for word in words: if word.lower() not in stop_words and len(word) > 2 and word.isdigit() == False: # build compressed trie tree try: # remove the words whcih can't encode to ascII word = word.lower().strip().encode('ascII') except: # print word a = 1 else: #inserting words into tree insert(root, word.decode("utf-8"), fname) # Asking the user for input word that we search Enter = input("Please enter what you would like to search for: ") #In case if multiple word search inp = Enter.split(' ') rank = {} #searching for each word of the input for word in inp: #search in trie, store the result in dic boolw,dic = find_prefix(root, word.lower()) #ranking the files in which the word was present for key in dic: if key not in rank: rank[key] = dic[key] else: rank[key] = rank[key] + dic[key] #ranking website based on number of time word present - sort them in acsending order and reversing them so we display # the websites in order of relevance items=[(v,k) for k,v in rank.items()] items.sort() items.reverse() #displaying search results if not items: print("No results") else: print("Results : ") #printing all the files the input was found in, in order of maximum occurences for key in items: print(key)
normal
{ "blob_id": "dcda8f26a06145579a9be6e5fbfdaed83d4908da", "index": 2459, "step-1": "<mask token>\n\n\nclass TrieNode(object):\n\n def __init__(self, char: str):\n self.char = char\n self.children = []\n self.word_finished = False\n self.counter = 1\n self.OccurrenceList = {}\n\n\n<mask token>\n\n\ndef find_prefix(root, prefix: str) ->Tuple[bool, int]:\n node = root\n if not root.children:\n return False, 0\n for char in prefix:\n char_not_found = True\n for child in node.children:\n if child.char == char:\n char_not_found = False\n node = child\n break\n if char_not_found:\n print('Word Not Found: ' + prefix)\n else:\n print('Word Found: ' + prefix)\n return True, node.OccurrenceList\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass TrieNode(object):\n\n def __init__(self, char: str):\n self.char = char\n self.children = []\n self.word_finished = False\n self.counter = 1\n self.OccurrenceList = {}\n\n\n<mask token>\n\n\ndef insert(root, word: str, document):\n node = root\n for char in word:\n found_in_child = False\n for child in node.children:\n if child.char == char:\n child.counter += 1\n node = child\n found_in_child = True\n break\n if not found_in_child:\n new_node = TrieNode(char)\n node.children.append(new_node)\n node = new_node\n node.word_finished = True\n if document not in node.OccurrenceList:\n node.OccurrenceList[document] = 1\n node.OccurrenceList[document] = node.OccurrenceList[document] + 1\n\n\ndef find_prefix(root, prefix: str) ->Tuple[bool, int]:\n node = root\n if not root.children:\n return False, 0\n for char in prefix:\n char_not_found = True\n for child in node.children:\n if child.char == char:\n char_not_found = False\n node = child\n break\n if char_not_found:\n print('Word Not Found: ' + prefix)\n else:\n print('Word Found: ' + prefix)\n return True, node.OccurrenceList\n\n\n<mask token>\nnltk.download('stopwords')\nnltk.download('punkt')\n<mask token>\nstop_words.update(string.punctuation)\n<mask token>\nfor file in files:\n fname = file\n file = open(fdata + str(file), encoding='utf8')\n soup = BeautifulSoup(file.read(), 'html.parser')\n [script.extract() for script in soup.findAll('script')]\n [style.extract() for style in soup.findAll('style')]\n words = word_tokenize(soup.get_text())\n words = [i for i in words if all(j not in string.punctuation for j in i)]\n for word in words:\n if word.lower() not in stop_words and len(word) > 2 and word.isdigit(\n ) == False:\n try:\n word = word.lower().strip().encode('ascII')\n except:\n a = 1\n else:\n insert(root, word.decode('utf-8'), fname)\n<mask token>\nfor word in inp:\n boolw, dic = find_prefix(root, word.lower())\n for key in dic:\n if key not in rank:\n rank[key] = dic[key]\n else:\n rank[key] = rank[key] + dic[key]\n<mask token>\nitems.sort()\nitems.reverse()\nif not items:\n print('No results')\nelse:\n print('Results : ')\n for key in items:\n print(key)\n", "step-3": "<mask token>\n\n\nclass TrieNode(object):\n\n def __init__(self, char: str):\n self.char = char\n self.children = []\n self.word_finished = False\n self.counter = 1\n self.OccurrenceList = {}\n\n\nroot = TrieNode('*')\n\n\ndef insert(root, word: str, document):\n node = root\n for char in word:\n found_in_child = False\n for child in node.children:\n if child.char == char:\n child.counter += 1\n node = child\n found_in_child = True\n break\n if not found_in_child:\n new_node = TrieNode(char)\n node.children.append(new_node)\n node = new_node\n node.word_finished = True\n if document not in node.OccurrenceList:\n node.OccurrenceList[document] = 1\n node.OccurrenceList[document] = node.OccurrenceList[document] + 1\n\n\ndef find_prefix(root, prefix: str) ->Tuple[bool, int]:\n node = root\n if not root.children:\n return False, 0\n for char in prefix:\n char_not_found = True\n for child in node.children:\n if child.char == char:\n char_not_found = False\n node = child\n break\n if char_not_found:\n print('Word Not Found: ' + prefix)\n else:\n print('Word Found: ' + prefix)\n return True, node.OccurrenceList\n\n\n<mask token>\nnltk.download('stopwords')\nnltk.download('punkt')\n<mask token>\nstop_words = set(stopwords.words('english'))\nstop_words.update(string.punctuation)\n<mask token>\nfdata = './input/'\nfiles = os.listdir(fdata)\nfor file in files:\n fname = file\n file = open(fdata + str(file), encoding='utf8')\n soup = BeautifulSoup(file.read(), 'html.parser')\n [script.extract() for script in soup.findAll('script')]\n [style.extract() for style in soup.findAll('style')]\n words = word_tokenize(soup.get_text())\n words = [i for i in words if all(j not in string.punctuation for j in i)]\n for word in words:\n if word.lower() not in stop_words and len(word) > 2 and word.isdigit(\n ) == False:\n try:\n word = word.lower().strip().encode('ascII')\n except:\n a = 1\n else:\n insert(root, word.decode('utf-8'), fname)\nEnter = input('Please enter what you would like to search for: ')\ninp = Enter.split(' ')\nrank = {}\nfor word in inp:\n boolw, dic = find_prefix(root, word.lower())\n for key in dic:\n if key not in rank:\n rank[key] = dic[key]\n else:\n rank[key] = rank[key] + dic[key]\nitems = [(v, k) for k, v in rank.items()]\nitems.sort()\nitems.reverse()\nif not items:\n print('No results')\nelse:\n print('Results : ')\n for key in items:\n print(key)\n", "step-4": "from typing import Tuple\n\n\nclass TrieNode(object):\n\n def __init__(self, char: str):\n self.char = char\n self.children = []\n self.word_finished = False\n self.counter = 1\n self.OccurrenceList = {}\n\n\nroot = TrieNode('*')\n\n\ndef insert(root, word: str, document):\n node = root\n for char in word:\n found_in_child = False\n for child in node.children:\n if child.char == char:\n child.counter += 1\n node = child\n found_in_child = True\n break\n if not found_in_child:\n new_node = TrieNode(char)\n node.children.append(new_node)\n node = new_node\n node.word_finished = True\n if document not in node.OccurrenceList:\n node.OccurrenceList[document] = 1\n node.OccurrenceList[document] = node.OccurrenceList[document] + 1\n\n\ndef find_prefix(root, prefix: str) ->Tuple[bool, int]:\n node = root\n if not root.children:\n return False, 0\n for char in prefix:\n char_not_found = True\n for child in node.children:\n if child.char == char:\n char_not_found = False\n node = child\n break\n if char_not_found:\n print('Word Not Found: ' + prefix)\n else:\n print('Word Found: ' + prefix)\n return True, node.OccurrenceList\n\n\nfrom bs4 import BeautifulSoup\nimport nltk\nnltk.download('stopwords')\nnltk.download('punkt')\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize\nimport re\nimport string\nstop_words = set(stopwords.words('english'))\nstop_words.update(string.punctuation)\nimport os\nfdata = './input/'\nfiles = os.listdir(fdata)\nfor file in files:\n fname = file\n file = open(fdata + str(file), encoding='utf8')\n soup = BeautifulSoup(file.read(), 'html.parser')\n [script.extract() for script in soup.findAll('script')]\n [style.extract() for style in soup.findAll('style')]\n words = word_tokenize(soup.get_text())\n words = [i for i in words if all(j not in string.punctuation for j in i)]\n for word in words:\n if word.lower() not in stop_words and len(word) > 2 and word.isdigit(\n ) == False:\n try:\n word = word.lower().strip().encode('ascII')\n except:\n a = 1\n else:\n insert(root, word.decode('utf-8'), fname)\nEnter = input('Please enter what you would like to search for: ')\ninp = Enter.split(' ')\nrank = {}\nfor word in inp:\n boolw, dic = find_prefix(root, word.lower())\n for key in dic:\n if key not in rank:\n rank[key] = dic[key]\n else:\n rank[key] = rank[key] + dic[key]\nitems = [(v, k) for k, v in rank.items()]\nitems.sort()\nitems.reverse()\nif not items:\n print('No results')\nelse:\n print('Results : ')\n for key in items:\n print(key)\n", "step-5": "from typing import Tuple\n\n#Creating a trie structure and it's node\nclass TrieNode(object): \n def __init__(self, char: str):\n self.char = char\n self.children = []\n #the last character of the word.`\n self.word_finished = False\n #counter for this character\n self.counter = 1\n #list of all the occurences of the prefix in the documents \n self.OccurrenceList={}\n \n#Initialize the root of the trie \nroot = TrieNode('*')\n\n#Adding a word in the trie structure\ndef insert(root, word: str,document):\n node = root\n for char in word:\n found_in_child = False\n # Search for the character in the children of the present `node`\n for child in node.children:\n if child.char == char:\n #the char of the word to be inserted is already present in trie; increment the counter of this char \n child.counter += 1\n # move the pointer to the node's child to continue the insertion of the rest of the word\n node = child\n found_in_child = True\n break\n # this char has never been inserted before, create node and insert it\n if not found_in_child:\n new_node = TrieNode(char)\n node.children.append(new_node)\n # And then point node to the new child\n node = new_node\n \n # At this point, word is inserted- we mark the end of this word\n node.word_finished = True\n if document not in node.OccurrenceList: #If document is not in OccurenceList for that word\n node.OccurrenceList[document]=1 # Create a new key with document name\n node.OccurrenceList[document]= node.OccurrenceList[document]+1 # We append the position in the document \n \n#Performing the search in our files for the input word, using the trie structure we created above\n#We will first check for the word's existence, if it exists- return file name and occurence number \ndef find_prefix(root, prefix: str) -> Tuple[bool, int]:\n node = root\n #handling the case of an empty trie ie the root node has no children\n if not root.children:\n return False, 0\n for char in prefix:\n char_not_found = True\n # Search through all the children of the node the pointer is pointing to\n for child in node.children:\n if child.char == char:\n #the char of the input word exists in trie\n char_not_found = False\n # increment the pointer to go further down the trie to check for the remaining chars in prefix\n node = child\n break\n #letting the user know that the input word of prefix doesn't exist in the trie \n if char_not_found:\n print(\"Word Not Found: \" +prefix)\n #input word found, return the found status, along the files in which it exists\n else: \n print(\"Word Found: \" +prefix)\n return True,node.OccurrenceList\n\n#for scrapping words from website\nfrom bs4 import BeautifulSoup\nimport nltk\nnltk.download('stopwords')\nnltk.download('punkt')\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize\nimport re\nimport string\nstop_words = set(stopwords.words('english'))\nstop_words.update(string.punctuation) \nimport os\n\n#selecting file for scrapping into fdata->files\n#please change the dircectory to run on your device\nfdata = r\"./input/\"\nfiles=os.listdir(fdata)\n#cleaning the text in every every file from punctuations, stop words, digits, words less than length 2 and other symbols\nfor file in files: \n fname=file #called later, while associating word with the file it exists in for insertion in trie\n file=open(fdata+str(file), encoding=\"utf8\")\n soup = BeautifulSoup(file.read(), 'html.parser')\n #filter the soup\n [script.extract() for script in soup.findAll('script')]\n [style.extract() for style in soup.findAll('style')]\n #gather words from filtered soup\n words = word_tokenize(soup.get_text())\n # remove the words containing punctuation\n words = [i for i in words if all(j not in string.punctuation for j in i)]\n #filtering words and cleaning the data to insert in trie\n for word in words:\n if word.lower() not in stop_words and len(word) > 2 and word.isdigit() == False:\n # build compressed trie tree\n try:\n # remove the words whcih can't encode to ascII\n word = word.lower().strip().encode('ascII')\n except:\n # print word\n a = 1\n else:\n #inserting words into tree\n insert(root, word.decode(\"utf-8\"), fname)\n \n# Asking the user for input word that we search \nEnter = input(\"Please enter what you would like to search for: \")\n#In case if multiple word search\ninp = Enter.split(' ')\nrank = {}\n#searching for each word of the input\nfor word in inp:\n #search in trie, store the result in dic\n boolw,dic = find_prefix(root, word.lower())\n#ranking the files in which the word was present\n for key in dic:\n if key not in rank:\n rank[key] = dic[key]\n else:\n rank[key] = rank[key] + dic[key]\n#ranking website based on number of time word present - sort them in acsending order and reversing them so we display \n# the websites in order of relevance\nitems=[(v,k) for k,v in rank.items()]\nitems.sort()\nitems.reverse()\n#displaying search results\nif not items:\n print(\"No results\")\nelse:\n print(\"Results : \")\n#printing all the files the input was found in, in order of maximum occurences \n for key in items:\n print(key)\n \n", "step-ids": [ 3, 5, 6, 7, 8 ] }
[ 3, 5, 6, 7, 8 ]
fname = input('Enter the file name to open') fh = open(fname) lst1 = list() data = dict() for ln in fh : if ln.startswith("From"): if ln.startswith('From:'): continue else : word = ln.split() lst1.append(word[1]) for word in lst1: data[word] = data.get(word,0)+1 bigcount = None bigword = None for word,count in data.items(): if bigcount is None or bigcount<count: bigcount = count bigword = word print(bigword,bigcount)
normal
{ "blob_id": "4fba13d051a3aceb393a4473cdbf6d4fc684c7ac", "index": 9473, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor ln in fh:\n if ln.startswith('From'):\n if ln.startswith('From:'):\n continue\n else:\n word = ln.split()\n lst1.append(word[1])\nfor word in lst1:\n data[word] = data.get(word, 0) + 1\n<mask token>\nfor word, count in data.items():\n if bigcount is None or bigcount < count:\n bigcount = count\n bigword = word\nprint(bigword, bigcount)\n", "step-3": "fname = input('Enter the file name to open')\nfh = open(fname)\nlst1 = list()\ndata = dict()\nfor ln in fh:\n if ln.startswith('From'):\n if ln.startswith('From:'):\n continue\n else:\n word = ln.split()\n lst1.append(word[1])\nfor word in lst1:\n data[word] = data.get(word, 0) + 1\nbigcount = None\nbigword = None\nfor word, count in data.items():\n if bigcount is None or bigcount < count:\n bigcount = count\n bigword = word\nprint(bigword, bigcount)\n", "step-4": "fname = input('Enter the file name to open')\r\nfh = open(fname)\r\nlst1 = list()\r\ndata = dict()\r\nfor ln in fh :\r\n if ln.startswith(\"From\"):\r\n if ln.startswith('From:'):\r\n continue\r\n else :\r\n word = ln.split()\r\n lst1.append(word[1])\r\nfor word in lst1:\r\n data[word] = data.get(word,0)+1\r\nbigcount = None\r\nbigword = None\r\nfor word,count in data.items():\r\n if bigcount is None or bigcount<count:\r\n bigcount = count\r\n bigword = word\r\nprint(bigword,bigcount)\r\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
#determines where the robot is located. def sense(p, Z, colors, sensor_right): #initialization q = [] pHit = sensor_right; pMiss = 1 - sensor_right; #number of rows m = len(colors) #number of columns n = len(colors[0]) #sum s = 0 for i in range(m): temp = [] for j in range(n): hit = (Z == colors[i][j]) #product temp.append(p[i][j] * (hit * pHit + (1-hit) * pMiss)) q.append(temp) s = s + sum(temp) #normalization if(s != 0): for i in range(m): for j in range(n): q[i][j] = q[i][j] / s return q #moves the robot by U units. def move(p, U, p_move, m, n): #initialization q = [] pExact = p_move; pUndershoot = 1 - p_move;#probability of staying at the same location for i in range(m): temp = [] for j in range(n): s = pExact * p[(i - U[0])% m][(j - U[1])% n] #convolution /addition s = s + pUndershoot * p[i][j] temp.append(s) q.append(temp) return q #p_move probablity that motion is correct #sensor_right probability that the sensor is correct def localize(colors, measurements, motions, sensor_right, p_move): p = [] #start with uniform distribution #number of rows m = len(colors) #number of columns n = len(colors[0]) #size size = m * n; for i in range(m): temp = []; for j in range(n): temp.append(1/size); p.append(temp) for k in range(len(measurements)): p = move(p, motions[k], p_move, m, n) p = sense(p, measurements[k], colors, sensor_right) return p
normal
{ "blob_id": "10937ee1e48d23b12b76a2abc44ee8bd0647aef5", "index": 9248, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef localize(colors, measurements, motions, sensor_right, p_move):\n p = []\n m = len(colors)\n n = len(colors[0])\n size = m * n\n for i in range(m):\n temp = []\n for j in range(n):\n temp.append(1 / size)\n p.append(temp)\n for k in range(len(measurements)):\n p = move(p, motions[k], p_move, m, n)\n p = sense(p, measurements[k], colors, sensor_right)\n return p\n", "step-3": "<mask token>\n\n\ndef move(p, U, p_move, m, n):\n q = []\n pExact = p_move\n pUndershoot = 1 - p_move\n for i in range(m):\n temp = []\n for j in range(n):\n s = pExact * p[(i - U[0]) % m][(j - U[1]) % n]\n s = s + pUndershoot * p[i][j]\n temp.append(s)\n q.append(temp)\n return q\n\n\ndef localize(colors, measurements, motions, sensor_right, p_move):\n p = []\n m = len(colors)\n n = len(colors[0])\n size = m * n\n for i in range(m):\n temp = []\n for j in range(n):\n temp.append(1 / size)\n p.append(temp)\n for k in range(len(measurements)):\n p = move(p, motions[k], p_move, m, n)\n p = sense(p, measurements[k], colors, sensor_right)\n return p\n", "step-4": "def sense(p, Z, colors, sensor_right):\n q = []\n pHit = sensor_right\n pMiss = 1 - sensor_right\n m = len(colors)\n n = len(colors[0])\n s = 0\n for i in range(m):\n temp = []\n for j in range(n):\n hit = Z == colors[i][j]\n temp.append(p[i][j] * (hit * pHit + (1 - hit) * pMiss))\n q.append(temp)\n s = s + sum(temp)\n if s != 0:\n for i in range(m):\n for j in range(n):\n q[i][j] = q[i][j] / s\n return q\n\n\ndef move(p, U, p_move, m, n):\n q = []\n pExact = p_move\n pUndershoot = 1 - p_move\n for i in range(m):\n temp = []\n for j in range(n):\n s = pExact * p[(i - U[0]) % m][(j - U[1]) % n]\n s = s + pUndershoot * p[i][j]\n temp.append(s)\n q.append(temp)\n return q\n\n\ndef localize(colors, measurements, motions, sensor_right, p_move):\n p = []\n m = len(colors)\n n = len(colors[0])\n size = m * n\n for i in range(m):\n temp = []\n for j in range(n):\n temp.append(1 / size)\n p.append(temp)\n for k in range(len(measurements)):\n p = move(p, motions[k], p_move, m, n)\n p = sense(p, measurements[k], colors, sensor_right)\n return p\n", "step-5": "#determines where the robot is located.\ndef sense(p, Z, colors, sensor_right):\n #initialization\n q = []\n pHit = sensor_right;\n pMiss = 1 - sensor_right;\n #number of rows\n m = len(colors) \n #number of columns\n n = len(colors[0])\n #sum \n s = 0\n for i in range(m):\n temp = []\n \n for j in range(n):\n hit = (Z == colors[i][j]) \n #product \n temp.append(p[i][j] * (hit * pHit + (1-hit) * pMiss))\n q.append(temp)\n s = s + sum(temp) \n \n #normalization\n if(s != 0):\n for i in range(m):\n for j in range(n):\n q[i][j] = q[i][j] / s\n return q\n\n#moves the robot by U units.\ndef move(p, U, p_move, m, n):\n #initialization\n q = []\n pExact = p_move;\n pUndershoot = 1 - p_move;#probability of staying at the same location\n \n for i in range(m):\n temp = []\n \n for j in range(n):\n s = pExact * p[(i - U[0])% m][(j - U[1])% n]\n #convolution /addition\n s = s + pUndershoot * p[i][j]\n temp.append(s)\n q.append(temp)\n\n return q\n\n#p_move probablity that motion is correct\n#sensor_right probability that the sensor is correct \ndef localize(colors, measurements, motions, sensor_right, p_move):\n p = []\n #start with uniform distribution\n #number of rows\n m = len(colors) \n #number of columns\n n = len(colors[0])\n #size \n size = m * n;\n \n for i in range(m):\n temp = [];\n for j in range(n):\n temp.append(1/size);\n p.append(temp)\n \n\n for k in range(len(measurements)):\n p = move(p, motions[k], p_move, m, n)\n p = sense(p, measurements[k], colors, sensor_right) \n\n return p", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
"""Toggle the proof color. Like operating in the menu: **View** > **Proof Colors** (Ctrl + Y) """ # Import local modules from photoshop import Session with Session() as ps: ps.app.runMenuItem(ps.app.stringIDToTypeID("toggleProofColors"))
normal
{ "blob_id": "1db866ca73bc264d474d5e5086c4a047d7e46546", "index": 2299, "step-1": "<mask token>\n", "step-2": "<mask token>\nwith Session() as ps:\n ps.app.runMenuItem(ps.app.stringIDToTypeID('toggleProofColors'))\n", "step-3": "<mask token>\nfrom photoshop import Session\nwith Session() as ps:\n ps.app.runMenuItem(ps.app.stringIDToTypeID('toggleProofColors'))\n", "step-4": "\"\"\"Toggle the proof color.\n\nLike operating in the menu:\n**View** > **Proof Colors** (Ctrl + Y)\n\n\"\"\"\n# Import local modules\nfrom photoshop import Session\n\n\nwith Session() as ps:\n ps.app.runMenuItem(ps.app.stringIDToTypeID(\"toggleProofColors\"))\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
import os, argparse,collections defaults ={'color':'red','user':'guest'} parser=argparse.ArgumentParser() parser.add_argument('-u','--user') parser.add_argument('-c','--color') #a simple Namespace object will be built up from attributes parsed out of the command lin namespace= parser.parse_args() command_line_args= {k: v for k , v in vars(namespace).items()if v is not None} combined= collections.ChainMap(command_line_args,os.environ,defaults) print(combined['color']) print(combined['user'])
normal
{ "blob_id": "3c31e3f2a6f320bc5ae33f0ba1d234a089371899", "index": 9199, "step-1": "<mask token>\n", "step-2": "<mask token>\nparser.add_argument('-u', '--user')\nparser.add_argument('-c', '--color')\n<mask token>\nprint(combined['color'])\nprint(combined['user'])\n", "step-3": "<mask token>\ndefaults = {'color': 'red', 'user': 'guest'}\nparser = argparse.ArgumentParser()\nparser.add_argument('-u', '--user')\nparser.add_argument('-c', '--color')\nnamespace = parser.parse_args()\ncommand_line_args = {k: v for k, v in vars(namespace).items() if v is not None}\ncombined = collections.ChainMap(command_line_args, os.environ, defaults)\nprint(combined['color'])\nprint(combined['user'])\n", "step-4": "import os, argparse, collections\ndefaults = {'color': 'red', 'user': 'guest'}\nparser = argparse.ArgumentParser()\nparser.add_argument('-u', '--user')\nparser.add_argument('-c', '--color')\nnamespace = parser.parse_args()\ncommand_line_args = {k: v for k, v in vars(namespace).items() if v is not None}\ncombined = collections.ChainMap(command_line_args, os.environ, defaults)\nprint(combined['color'])\nprint(combined['user'])\n", "step-5": "import os, argparse,collections\n\ndefaults ={'color':'red','user':'guest'}\nparser=argparse.ArgumentParser()\nparser.add_argument('-u','--user')\nparser.add_argument('-c','--color')\n\n#a simple Namespace object will be built up from attributes parsed out of the command lin\n\nnamespace= parser.parse_args()\ncommand_line_args= {k: v for k , v in vars(namespace).items()if v is not None}\n\ncombined= collections.ChainMap(command_line_args,os.environ,defaults)\n\nprint(combined['color'])\nprint(combined['user'])", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
from newspaper import Article import random import string from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics.pairwise import cosine_similarity import nltk import numpy as np import warnings import speech_recognition as sr warnings.filterwarnings('ignore') nltk.download('punkt',quiet=True) nltk.download('wordnet',quiet=True) article=Article('https://www.mayoclinic.org/diseases-conditions/chronic-kidney-disease/symptoms-causes/syc-20354521') article.download() article.parse() article.nlp() corpus=article.text #print(corpus) text=corpus sent_tokens=nltk.sent_tokenize(text)#convert the text into a alist of sentences #print(sent_tokens) #creatre a dictionary (key:value) pair to remove punctuations remove_punct_dict=dict( (ord(punct),None) for punct in string.punctuation) #print(string.punctuation) #print(remove_punct_dict) #create ala function to return a list of lenmatized lowercase words after removing puctuatuins.i,e all the sentences in the article are now converted into a list def LemNormalize(text): return nltk.word_tokenize(text.lower().translate(remove_punct_dict)) #prints the tokenozation text by removing the punctuation #print(LemNormalize(text)) #keyword matching #GREETINGS INPUT GREETING_INPUTS=["hi","hello","hola","greetings","wassup","hey"] #greeting response back GREETING_RESPONSE=["howdy","hi","hey","what's good","hello"] #function to return a random greeting response def greeting(sentence): #return a randomly choosen responce for word in sentence.split(): if word.lower() in GREETING_INPUTS: return random.choice(GREETING_RESPONSE) #generate the respnse to the given question def responce(user_responce): #the user's query is taken #user_responce='what is chronic kidney disease' #the user may give his input as capitals so we should convert them into lower() user_responce=user_responce.lower() #set the chat bot respnse to an empt srting i.e declare the roborespnse as a string robo_responce='' #convert the user_responce into a list sent_tokens.append(user_responce) #create a TfidVectorizer object it is used to know how man tomes a word has occured TfidVec=TfidfVectorizer(tokenizer=LemNormalize,stop_words='english') #convert the text into a matrix of TF-IDF features tfidf=TfidVec.fit_transform(sent_tokens) #print(tfidf) #get the measure of similarity(similarit scores) vals=cosine_similarity(tfidf[-1],tfidf) #print(vals) #get the index of the most similar text/sentence to the user response idx=vals.argsort()[0][-2] #reduce the domensionalit of vals flat=vals.flatten() #sort the list in asc flat.sort() #get the most simliar score for the user's responce score=flat[-2] #print the similarit score #print(score) #if the score is 0 then the most similar score to the user resoponce if(score==0): robo_responce=robo_responce+"i aplogise i didn't understand" else: robo_responce=robo_responce+sent_tokens[idx] #pritn the chat bot respnce #print(robo_responce) sent_tokens.remove(user_responce) return robo_responce r=sr.Recognizer() with sr.Microphone() as source: flag=True print("BOT:Iam doctor bot and iam going to answeer your questions") while(flag==True): print("speak:") audio=r.listen(source) try: text=r.recognize_google(audio) print("you said:{}".format(text)) user_responce=text if(user_responce!='bye'): if(user_responce=='thanks' or user_responce=='thank you'): flag=False print("BOT:you are welcome") else: if(greeting(user_responce)!=None): print("BOT:"+greeting(user_responce)) else: print("BOT: "+responce(user_responce)) else: flag=False print("BOT:chat with u later") except: print("could not recognize")
normal
{ "blob_id": "53b56cf9265a658d999388f0a1e03d7ceb186213", "index": 2836, "step-1": "<mask token>\n\n\ndef LemNormalize(text):\n return nltk.word_tokenize(text.lower().translate(remove_punct_dict))\n\n\n<mask token>\n\n\ndef greeting(sentence):\n for word in sentence.split():\n if word.lower() in GREETING_INPUTS:\n return random.choice(GREETING_RESPONSE)\n\n\ndef responce(user_responce):\n user_responce = user_responce.lower()\n robo_responce = ''\n sent_tokens.append(user_responce)\n TfidVec = TfidfVectorizer(tokenizer=LemNormalize, stop_words='english')\n tfidf = TfidVec.fit_transform(sent_tokens)\n vals = cosine_similarity(tfidf[-1], tfidf)\n idx = vals.argsort()[0][-2]\n flat = vals.flatten()\n flat.sort()\n score = flat[-2]\n if score == 0:\n robo_responce = robo_responce + \"i aplogise i didn't understand\"\n else:\n robo_responce = robo_responce + sent_tokens[idx]\n sent_tokens.remove(user_responce)\n return robo_responce\n\n\n<mask token>\n", "step-2": "<mask token>\nwarnings.filterwarnings('ignore')\nnltk.download('punkt', quiet=True)\nnltk.download('wordnet', quiet=True)\n<mask token>\narticle.download()\narticle.parse()\narticle.nlp()\n<mask token>\n\n\ndef LemNormalize(text):\n return nltk.word_tokenize(text.lower().translate(remove_punct_dict))\n\n\n<mask token>\n\n\ndef greeting(sentence):\n for word in sentence.split():\n if word.lower() in GREETING_INPUTS:\n return random.choice(GREETING_RESPONSE)\n\n\ndef responce(user_responce):\n user_responce = user_responce.lower()\n robo_responce = ''\n sent_tokens.append(user_responce)\n TfidVec = TfidfVectorizer(tokenizer=LemNormalize, stop_words='english')\n tfidf = TfidVec.fit_transform(sent_tokens)\n vals = cosine_similarity(tfidf[-1], tfidf)\n idx = vals.argsort()[0][-2]\n flat = vals.flatten()\n flat.sort()\n score = flat[-2]\n if score == 0:\n robo_responce = robo_responce + \"i aplogise i didn't understand\"\n else:\n robo_responce = robo_responce + sent_tokens[idx]\n sent_tokens.remove(user_responce)\n return robo_responce\n\n\n<mask token>\nwith sr.Microphone() as source:\n flag = True\n print('BOT:Iam doctor bot and iam going to answeer your questions')\n while flag == True:\n print('speak:')\n audio = r.listen(source)\n try:\n text = r.recognize_google(audio)\n print('you said:{}'.format(text))\n user_responce = text\n if user_responce != 'bye':\n if user_responce == 'thanks' or user_responce == 'thank you':\n flag = False\n print('BOT:you are welcome')\n elif greeting(user_responce) != None:\n print('BOT:' + greeting(user_responce))\n else:\n print('BOT: ' + responce(user_responce))\n else:\n flag = False\n print('BOT:chat with u later')\n except:\n print('could not recognize')\n", "step-3": "<mask token>\nwarnings.filterwarnings('ignore')\nnltk.download('punkt', quiet=True)\nnltk.download('wordnet', quiet=True)\narticle = Article(\n 'https://www.mayoclinic.org/diseases-conditions/chronic-kidney-disease/symptoms-causes/syc-20354521'\n )\narticle.download()\narticle.parse()\narticle.nlp()\ncorpus = article.text\ntext = corpus\nsent_tokens = nltk.sent_tokenize(text)\nremove_punct_dict = dict((ord(punct), None) for punct in string.punctuation)\n\n\ndef LemNormalize(text):\n return nltk.word_tokenize(text.lower().translate(remove_punct_dict))\n\n\nGREETING_INPUTS = ['hi', 'hello', 'hola', 'greetings', 'wassup', 'hey']\nGREETING_RESPONSE = ['howdy', 'hi', 'hey', \"what's good\", 'hello']\n\n\ndef greeting(sentence):\n for word in sentence.split():\n if word.lower() in GREETING_INPUTS:\n return random.choice(GREETING_RESPONSE)\n\n\ndef responce(user_responce):\n user_responce = user_responce.lower()\n robo_responce = ''\n sent_tokens.append(user_responce)\n TfidVec = TfidfVectorizer(tokenizer=LemNormalize, stop_words='english')\n tfidf = TfidVec.fit_transform(sent_tokens)\n vals = cosine_similarity(tfidf[-1], tfidf)\n idx = vals.argsort()[0][-2]\n flat = vals.flatten()\n flat.sort()\n score = flat[-2]\n if score == 0:\n robo_responce = robo_responce + \"i aplogise i didn't understand\"\n else:\n robo_responce = robo_responce + sent_tokens[idx]\n sent_tokens.remove(user_responce)\n return robo_responce\n\n\nr = sr.Recognizer()\nwith sr.Microphone() as source:\n flag = True\n print('BOT:Iam doctor bot and iam going to answeer your questions')\n while flag == True:\n print('speak:')\n audio = r.listen(source)\n try:\n text = r.recognize_google(audio)\n print('you said:{}'.format(text))\n user_responce = text\n if user_responce != 'bye':\n if user_responce == 'thanks' or user_responce == 'thank you':\n flag = False\n print('BOT:you are welcome')\n elif greeting(user_responce) != None:\n print('BOT:' + greeting(user_responce))\n else:\n print('BOT: ' + responce(user_responce))\n else:\n flag = False\n print('BOT:chat with u later')\n except:\n print('could not recognize')\n", "step-4": "from newspaper import Article\nimport random\nimport string\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\nimport nltk\nimport numpy as np\nimport warnings\nimport speech_recognition as sr\nwarnings.filterwarnings('ignore')\nnltk.download('punkt', quiet=True)\nnltk.download('wordnet', quiet=True)\narticle = Article(\n 'https://www.mayoclinic.org/diseases-conditions/chronic-kidney-disease/symptoms-causes/syc-20354521'\n )\narticle.download()\narticle.parse()\narticle.nlp()\ncorpus = article.text\ntext = corpus\nsent_tokens = nltk.sent_tokenize(text)\nremove_punct_dict = dict((ord(punct), None) for punct in string.punctuation)\n\n\ndef LemNormalize(text):\n return nltk.word_tokenize(text.lower().translate(remove_punct_dict))\n\n\nGREETING_INPUTS = ['hi', 'hello', 'hola', 'greetings', 'wassup', 'hey']\nGREETING_RESPONSE = ['howdy', 'hi', 'hey', \"what's good\", 'hello']\n\n\ndef greeting(sentence):\n for word in sentence.split():\n if word.lower() in GREETING_INPUTS:\n return random.choice(GREETING_RESPONSE)\n\n\ndef responce(user_responce):\n user_responce = user_responce.lower()\n robo_responce = ''\n sent_tokens.append(user_responce)\n TfidVec = TfidfVectorizer(tokenizer=LemNormalize, stop_words='english')\n tfidf = TfidVec.fit_transform(sent_tokens)\n vals = cosine_similarity(tfidf[-1], tfidf)\n idx = vals.argsort()[0][-2]\n flat = vals.flatten()\n flat.sort()\n score = flat[-2]\n if score == 0:\n robo_responce = robo_responce + \"i aplogise i didn't understand\"\n else:\n robo_responce = robo_responce + sent_tokens[idx]\n sent_tokens.remove(user_responce)\n return robo_responce\n\n\nr = sr.Recognizer()\nwith sr.Microphone() as source:\n flag = True\n print('BOT:Iam doctor bot and iam going to answeer your questions')\n while flag == True:\n print('speak:')\n audio = r.listen(source)\n try:\n text = r.recognize_google(audio)\n print('you said:{}'.format(text))\n user_responce = text\n if user_responce != 'bye':\n if user_responce == 'thanks' or user_responce == 'thank you':\n flag = False\n print('BOT:you are welcome')\n elif greeting(user_responce) != None:\n print('BOT:' + greeting(user_responce))\n else:\n print('BOT: ' + responce(user_responce))\n else:\n flag = False\n print('BOT:chat with u later')\n except:\n print('could not recognize')\n", "step-5": "from newspaper import Article\r\nimport random\r\nimport string\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom sklearn.metrics.pairwise import cosine_similarity\r\nimport nltk\r\nimport numpy as np\r\nimport warnings\r\nimport speech_recognition as sr\r\n\r\n\r\n\r\nwarnings.filterwarnings('ignore')\r\nnltk.download('punkt',quiet=True)\r\nnltk.download('wordnet',quiet=True)\r\narticle=Article('https://www.mayoclinic.org/diseases-conditions/chronic-kidney-disease/symptoms-causes/syc-20354521')\r\narticle.download()\r\narticle.parse()\r\narticle.nlp()\r\ncorpus=article.text\r\n#print(corpus)\r\n \r\ntext=corpus\r\nsent_tokens=nltk.sent_tokenize(text)#convert the text into a alist of sentences\r\n#print(sent_tokens)\r\n\r\n#creatre a dictionary (key:value) pair to remove punctuations\r\nremove_punct_dict=dict( (ord(punct),None) for punct in string.punctuation)\r\n#print(string.punctuation)\r\n#print(remove_punct_dict)\r\n\r\n#create ala function to return a list of lenmatized lowercase words after removing puctuatuins.i,e all the sentences in the article are now converted into a list\r\ndef LemNormalize(text):\r\n return nltk.word_tokenize(text.lower().translate(remove_punct_dict))\r\n#prints the tokenozation text by removing the punctuation\r\n\r\n#print(LemNormalize(text))\r\n\r\n#keyword matching\r\n#GREETINGS INPUT\r\nGREETING_INPUTS=[\"hi\",\"hello\",\"hola\",\"greetings\",\"wassup\",\"hey\"]\r\n#greeting response back\r\nGREETING_RESPONSE=[\"howdy\",\"hi\",\"hey\",\"what's good\",\"hello\"]\r\n#function to return a random greeting response\r\ndef greeting(sentence):\r\n #return a randomly choosen responce\r\n for word in sentence.split():\r\n if word.lower() in GREETING_INPUTS:\r\n return random.choice(GREETING_RESPONSE)\r\n\r\n\r\n\r\n\r\n#generate the respnse to the given question\r\ndef responce(user_responce):\r\n \r\n#the user's query is taken \r\n #user_responce='what is chronic kidney disease'\r\n#the user may give his input as capitals so we should convert them into lower()\r\n user_responce=user_responce.lower()\r\n#set the chat bot respnse to an empt srting i.e declare the roborespnse as a string\r\n robo_responce=''\r\n#convert the user_responce into a list\r\n sent_tokens.append(user_responce)\r\n#create a TfidVectorizer object it is used to know how man tomes a word has occured\r\n TfidVec=TfidfVectorizer(tokenizer=LemNormalize,stop_words='english')\r\n#convert the text into a matrix of TF-IDF features\r\n tfidf=TfidVec.fit_transform(sent_tokens)\r\n#print(tfidf)\r\n\r\n#get the measure of similarity(similarit scores)\r\n vals=cosine_similarity(tfidf[-1],tfidf)\r\n#print(vals)\r\n#get the index of the most similar text/sentence to the user response\r\n idx=vals.argsort()[0][-2]\r\n\r\n #reduce the domensionalit of vals\r\n flat=vals.flatten()\r\n#sort the list in asc\r\n flat.sort()\r\n#get the most simliar score for the user's responce\r\n score=flat[-2]\r\n\r\n\r\n#print the similarit score\r\n#print(score) \r\n#if the score is 0 then the most similar score to the user resoponce\r\n if(score==0):\r\n robo_responce=robo_responce+\"i aplogise i didn't understand\"\r\n else:\r\n robo_responce=robo_responce+sent_tokens[idx]\r\n \r\n#pritn the chat bot respnce\r\n #print(robo_responce)\r\n sent_tokens.remove(user_responce)\r\n return robo_responce\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nr=sr.Recognizer()\r\nwith sr.Microphone() as source:\r\n\r\n\r\n flag=True\r\n print(\"BOT:Iam doctor bot and iam going to answeer your questions\")\r\n while(flag==True):\r\n print(\"speak:\")\r\n audio=r.listen(source)\r\n try:\r\n text=r.recognize_google(audio)\r\n print(\"you said:{}\".format(text))\r\n user_responce=text\r\n if(user_responce!='bye'):\r\n if(user_responce=='thanks' or user_responce=='thank you'):\r\n flag=False\r\n print(\"BOT:you are welcome\")\r\n else:\r\n if(greeting(user_responce)!=None):\r\n print(\"BOT:\"+greeting(user_responce))\r\n else:\r\n print(\"BOT: \"+responce(user_responce))\r\n \r\n else:\r\n flag=False\r\n print(\"BOT:chat with u later\")\r\n except:\r\n print(\"could not recognize\")\r\n ", "step-ids": [ 3, 4, 5, 6, 7 ] }
[ 3, 4, 5, 6, 7 ]
from src.secStructure import * from suffix_trees import STree import math import re def test_processData(): # Test1: ignoring peak position data = ['example/example1.fa', 'example/example2.fa'] struct_data = ['example/exampleStrucData/exampleStructuralData1.fa', 'example/exampleStrucData/exampleStructuralData2.fa'] k = 3 top = 10 peak = None feature = None cmd = False no_sec_peak = 1 # True # Executing process = SecStructure(data, data, k, peak, top, feature, cmd, struct_data, no_sec_peak) alphabet1 = process.getStructProfile1().getAlphabet() alphabet2 = process.getStructProfile2().getAlphabet() kmer_counts1 = process.getStructProfile1().getProfile() kmer_counts2 = process.getStructProfile2().getProfile() results = SecStructure.processData(process) template1 = results[0][0] template2 = results[1][0] dotbracket_string1 = results[0][1] dotbracket_string2 = results[1][1] # Testing assert len(alphabet1) == 6 for e in ["S", "H", "B", "I", "M", "E"]: assert e in alphabet1 assert len(alphabet2) == 2 assert "S" in alphabet2 assert "E" in alphabet2 assert kmer_counts1 == {'EE': 4, 'ES': 1, 'SS': 11, 'SH': 1, 'HH': 3, 'II': 4, 'IS': 1, 'SM': 1, 'MM': 1, 'BB': 4, 'BS': 1} assert kmer_counts2 == {'SS': 20, 'EE': 7, 'ES': 3, 'SE': 2} assert template1 == "EEESSSIIISSSBBBSSSHHHSSSSSSIIISSSMMMSSSHHHSSSEEE" assert dotbracket_string1 == "...(((...(((...(((...))))))...)))...(((...)))..." assert template2 == "EEESSSSSSEEE" assert dotbracket_string2 == "...((()))..." # Test2: with peak position no_sec_peak = 0 # True # Executing process2 = SecStructure(data, data, k, peak, top, feature, cmd, struct_data, no_sec_peak) alphabet1 = process2.getStructProfile1().getAlphabet() alphabet2 = process2.getStructProfile2().getAlphabet() kmer_counts1 = process2.getStructProfile1().getProfile() kmer_counts2 = process2.getStructProfile2().getProfile() results = SecStructure.processData(process2) template1 = results[0][0] template2 = results[1][0] dotbracket_string1 = results[0][1] dotbracket_string2 = results[1][1] # Testing assert len(alphabet1) == 10 for e in ["s", "h", "b", "i", "m", "E", "S", "B", "I", "E"]: assert e in alphabet1 assert len(alphabet2) == 4 for e in ["s", "S", "e", "E"]: assert e in alphabet2 assert kmer_counts1 == {'eE': 1, 'Es': 1, 'sS': 1, 'Sh': 1, 'iI': 1, 'Is': 1, 'bB': 1, 'Bs': 1} assert kmer_counts2 == {'sS': 3, 'Ss': 2, 'sE': 1, 'Ee': 1, 'Se': 1} assert template1 == "EEESSSIIISSSBBBSSSSSSSSSIIISSSEEE" assert dotbracket_string1 == "...(((...(((...((())))))...)))..." assert template2 == "EEESSSSSSEEE" assert dotbracket_string2 == "...((()))..." # Test3: different alphabets sProfile1 = process.getStructProfile1() sProfile2 = process.getStructProfile2() # Test3a: alphabets with no multiloop alphabet3 = ["S", "B", "E"] alphabet4 = ["S", "I", "E"] sProfile1.setAlphabet(alphabet3) sProfile2.setAlphabet(alphabet4) results = SecStructure.processData(process) template1 = results[0][0] template2 = results[1][0] dotbracket_string1 = results[0][1] dotbracket_string2 = results[1][1] assert template1 == "EEESSSBBBSSSSSSSSSEEE" assert dotbracket_string1 == "...(((...((())))))..." assert template2 == "EEESSSIIISSSSSSIIISSSEEE" assert dotbracket_string2 == "...(((...((()))...)))..." # Test3b: alphabets with only hairpin or hairpin and multiloop alphabet5 = ["S", "H", "E"] alphabet6 = ["S", "H", "M", "E"] sProfile1.setAlphabet(alphabet5) sProfile2.setAlphabet(alphabet6) results = SecStructure.processData(process) template1 = results[0][0] template2 = results[1][0] dotbracket_string1 = results[0][1] dotbracket_string2 = results[1][1] assert template1 == "EEESSSHHHSSSEEE" assert dotbracket_string1 == "...(((...)))..." assert template2 == "EEESSSHHHSSSMMMSSSHHHSSSEEE" assert dotbracket_string2 == "...(((...)))...(((...)))..." # Test3c: ('flawed') alphabets with no multiloops alphabet7 = ["S", "H", "E", "B", "I"] alphabet8 = ["S", "M", "E"] # should be equal to ["S","E"] sProfile1.setAlphabet(alphabet7) sProfile2.setAlphabet(alphabet8) results = SecStructure.processData(process) template1 = results[0][0] template2 = results[1][0] dotbracket_string1 = results[0][1] dotbracket_string2 = results[1][1] assert template1 == "EEESSSIIISSSBBBSSSHHHSSSSSSIIISSSEEE" assert dotbracket_string1 == "...(((...(((...(((...))))))...)))..." assert template2 == "EEESSSSSSEEE" assert dotbracket_string2 == "...((()))..." def test_createColorVector(): # Test1: no normalization vector wanted k = 2 no_sec_peak = 1 template = "EEESSSIIISSSBBBSSSHHHSSSSSSIIISSSEEE" kmer_counts = {"EE": 5, "ES": 7, "SS": 20, "SI": 10, "II": 15, "IS": 11, "SB": 5, "BB": 6, "BS": 5, "SH": 4, "HH": 5, "HS": 4, "SE": 7} template_sTree = STree.STree(template) normalization_vector1 = None color_hm = {str(i): 0 for i in range(1, len(template) + 1)} # Executing new_color_hm1, not_matched1, color_domain_max1 = createColorVector(k, template_sTree, kmer_counts, color_hm, no_sec_peak, normalization_vector1) assert len(color_hm) == len(new_color_hm1) for i in color_hm.keys(): x = color_hm[i] if x > 0: assert new_color_hm1[i] == math.log(x, 2) else: assert new_color_hm1[i] == 0 assert len(not_matched1) == 0 assert color_domain_max1 == 4.954196310386876 # Test2: with normalization vector normalization_vector2 = {"EE": 0, "ES": 0, "SS": 0.7, "SI": 0.1, "II": 0.2, "IS": 0, "SB": 0, "BB": 0, "BS": 0, "SH": 0, "HH": 0, "HS": 0, "SE": 0} # Execution color_hm = {str(i): 0 for i in range(1, len(template) + 1)} new_color_hm2, not_matched2, color_domain_max2 = createColorVector(k, template_sTree, kmer_counts, color_hm, no_sec_peak, normalization_vector2) last_idx = -1 last_kmer = "" test_color_hm = {str(i): 0 for i in range(1, len(template) + 1)} for kmer in normalization_vector2: indices_list = [t.start() for t in re.finditer('(?={0})'.format(re.escape(kmer)), template)] indices_list.sort() norm = normalization_vector2[kmer] if norm == 0: norm = 1 for idx in indices_list: for i in range(0, k): current_idx = str(idx + i + 1) if last_idx + 2 == int(current_idx) and last_kmer == kmer: continue test_color_hm[current_idx] += (kmer_counts[kmer] / norm) last_idx = idx last_kmer = kmer test_color_hm = {x: math.log(y, 2) if y > 0 else y for x, y in test_color_hm.items()} test_color_domain_max = max(test_color_hm.values()) # Testing assert new_color_hm1 is not new_color_hm2 assert len(color_hm) == len(new_color_hm2) assert len(not_matched2) == 0 assert color_domain_max2 == test_color_domain_max for i in new_color_hm2.keys(): assert new_color_hm2[i] == test_color_hm[i] # Test3: normalization vector and secondary peak position kmer_counts2 = {"Ee": 5, "eS": 7, "sS": 20, "Si": 10, "iI": 15, "iS": 11, "Sb": 5, "Bb": 6, "bS": 5, "sH": 4, "Hh": 5, "hS": 4, "Se": 7} no_sec_peak2 = 0 # Execution color_hm = {str(i): 0 for i in range(1, len(template) + 1)} new_color_hm3, not_matched3, color_domain_max3 = createColorVector(k, template_sTree, kmer_counts2, color_hm, no_sec_peak2, normalization_vector2) test_color_hm2 = {str(i): 0 for i in range(1, len(template) + 1)} for kmer in kmer_counts2.keys(): indices_list = [t.start() for t in re.finditer('(?={0})'.format(re.escape(kmer.upper())), template)] indices_list.sort() norm = normalization_vector2[kmer.upper()] if norm == 0: norm = 1 for idx in indices_list: # use only peak-position in 2-mer for visualization idx = [idx + i for i in range(0, len(kmer)) if kmer[i].isupper()][0] test_color_hm2[str(idx + 1)] += (kmer_counts2[kmer] / norm) test_color_hm2 = {x: math.log(y, 2) if y > 0 else y for x, y in test_color_hm2.items()} test_color_domain_max2 = max(test_color_hm2.values()) # Testing assert len(not_matched3) == 0 assert new_color_hm2 is not new_color_hm3 assert len(color_hm) == len(new_color_hm3) for i in test_color_hm2: assert test_color_hm2[i] == new_color_hm3[i] assert test_color_domain_max2 == color_domain_max3 def test_helpAddIBloop(): k = 3 # Test 1: forward and all true template1 = ["EEE"] internalloop = True bulge = True forward = True # Execution new_template1 = helpAddIBloop(k, template1, internalloop, bulge, forward) # Test 2: backward and all true template2 = ["EEE", "SSS", "III", "SSS", "BBB", "SSS", "HHH"] internalloop = True bulge = True forward = False # Execution new_template2 = helpAddIBloop(k, template2, internalloop, bulge, forward) # Test 3: only internal loops, forward and backward template3_f = ["EEE"] template3_b = ["EEE", "SSS", "III", "SSS", "HHH"] internalloop = True bulge = False forward = True # Execution new_template3_f = helpAddIBloop(k, template3_f, internalloop, bulge, forward) forward = False new_template3_b = helpAddIBloop(k, template3_b, internalloop, bulge, forward) # Test 4: only bulges, forward and backward template4_f = ["EEE"] template4_b = ["EEE", "SSS", "BBB", "SSS", "HHH"] internalloop = False bulge = True forward = True # Execution new_template4_f = helpAddIBloop(k, template4_f, internalloop, bulge, forward) forward = False new_template4_b = helpAddIBloop(k, template4_b, internalloop, bulge, forward) # Testing assert new_template1 == ["EEE", "SSS", "III", "SSS", "BBB"] assert new_template2 == ["EEE", "SSS", "III", "SSS", "BBB", "SSS", "HHH", "SSS", "SSS", "III"] assert new_template3_f == ["EEE", "SSS", "III"] assert new_template3_b == ["EEE", "SSS", "III", "SSS", "HHH", "SSS", "III"] assert new_template4_f == ["EEE", "SSS", "BBB"] assert new_template4_b == ["EEE", "SSS", "BBB", "SSS", "HHH", "SSS"] def test_element2dotbracket(): k3 = 3 k2 = 2 k4 = 4 # Test1 without multiloop elem_list1 = ["EEE", "SSS", "III", "SSS", "BBB", "SSS", "HHH", "SSS", "SSS", "III", "SSS", "EEE"] dotbracket_string1 = "...(((...(((...(((...))))))...)))..." # Test2 with multiloop elem_list2 = ["EE", "SS", "II", "SS", "HH", "SS", "II", "SS", "MM", "SS", "BB", "SS", "HH", "SS", "SS", "EE"] dotbracket_string2 = "..((..((..))..))..((..((..)))).." # Test 3 without loops elem_list3 = ["EEEE", "SSSS", "SSSS", "EEEE"] dotbracket_string3 = "....(((())))...." # Test 5 with everything elem_list4 = ["EEE", "SSS", "III", "SSS", "BBB", "SSS", "HHH", "SSS", "SSS", "III", "SSS", "MMM", "SSS", "HHH", "SSS", "EEE"] dotbracket_string4 = "...(((...(((...(((...))))))...)))...(((...)))..." # Execution db1 = [] db1.extend(element2dotbracket(elem_list1, k3, 0, 6, True)) db1.extend(element2dotbracket(elem_list1, k3, 7, len(elem_list1) - 1, False)) db1 = ''.join(db1) db2 = [] db2.extend(element2dotbracket(elem_list2, k2, 0, 4, True)) db2.extend(element2dotbracket(elem_list2, k2, 5, 8, False)) db2.extend(element2dotbracket(elem_list2, k2, 9, 12, True)) db2.extend(element2dotbracket(elem_list2, k2, 13, len(elem_list2) - 1, False)) db2 = ''.join(db2) db3 = [] db3.extend(element2dotbracket(elem_list3, k4, 0, 1, True)) db3.extend(element2dotbracket(elem_list3, k4, 2, len(elem_list3) - 1, False)) db3 = ''.join(db3) db4 = [] db4.extend(element2dotbracket(elem_list4, k3, 0, 6, True)) db4.extend(element2dotbracket(elem_list4, k3, 7, 11, False)) db4.extend(element2dotbracket(elem_list4, k3, 12, 13, True)) db4.extend(element2dotbracket(elem_list4, k3, 14, len(elem_list4) - 1, False)) db4 = ''.join(db4) # testing assert db1 == dotbracket_string1 assert db2 == dotbracket_string2 assert db3 == dotbracket_string3 assert db4 == dotbracket_string4
normal
{ "blob_id": "60b1a77d2de4a52ae9597f88917c4a3996c99923", "index": 5626, "step-1": "<mask token>\n\n\ndef test_createColorVector():\n k = 2\n no_sec_peak = 1\n template = 'EEESSSIIISSSBBBSSSHHHSSSSSSIIISSSEEE'\n kmer_counts = {'EE': 5, 'ES': 7, 'SS': 20, 'SI': 10, 'II': 15, 'IS': 11,\n 'SB': 5, 'BB': 6, 'BS': 5, 'SH': 4, 'HH': 5, 'HS': 4, 'SE': 7}\n template_sTree = STree.STree(template)\n normalization_vector1 = None\n color_hm = {str(i): (0) for i in range(1, len(template) + 1)}\n new_color_hm1, not_matched1, color_domain_max1 = createColorVector(k,\n template_sTree, kmer_counts, color_hm, no_sec_peak,\n normalization_vector1)\n assert len(color_hm) == len(new_color_hm1)\n for i in color_hm.keys():\n x = color_hm[i]\n if x > 0:\n assert new_color_hm1[i] == math.log(x, 2)\n else:\n assert new_color_hm1[i] == 0\n assert len(not_matched1) == 0\n assert color_domain_max1 == 4.954196310386876\n normalization_vector2 = {'EE': 0, 'ES': 0, 'SS': 0.7, 'SI': 0.1, 'II': \n 0.2, 'IS': 0, 'SB': 0, 'BB': 0, 'BS': 0, 'SH': 0, 'HH': 0, 'HS': 0,\n 'SE': 0}\n color_hm = {str(i): (0) for i in range(1, len(template) + 1)}\n new_color_hm2, not_matched2, color_domain_max2 = createColorVector(k,\n template_sTree, kmer_counts, color_hm, no_sec_peak,\n normalization_vector2)\n last_idx = -1\n last_kmer = ''\n test_color_hm = {str(i): (0) for i in range(1, len(template) + 1)}\n for kmer in normalization_vector2:\n indices_list = [t.start() for t in re.finditer('(?={0})'.format(re.\n escape(kmer)), template)]\n indices_list.sort()\n norm = normalization_vector2[kmer]\n if norm == 0:\n norm = 1\n for idx in indices_list:\n for i in range(0, k):\n current_idx = str(idx + i + 1)\n if last_idx + 2 == int(current_idx) and last_kmer == kmer:\n continue\n test_color_hm[current_idx] += kmer_counts[kmer] / norm\n last_idx = idx\n last_kmer = kmer\n test_color_hm = {x: (math.log(y, 2) if y > 0 else y) for x, y in\n test_color_hm.items()}\n test_color_domain_max = max(test_color_hm.values())\n assert new_color_hm1 is not new_color_hm2\n assert len(color_hm) == len(new_color_hm2)\n assert len(not_matched2) == 0\n assert color_domain_max2 == test_color_domain_max\n for i in new_color_hm2.keys():\n assert new_color_hm2[i] == test_color_hm[i]\n kmer_counts2 = {'Ee': 5, 'eS': 7, 'sS': 20, 'Si': 10, 'iI': 15, 'iS': \n 11, 'Sb': 5, 'Bb': 6, 'bS': 5, 'sH': 4, 'Hh': 5, 'hS': 4, 'Se': 7}\n no_sec_peak2 = 0\n color_hm = {str(i): (0) for i in range(1, len(template) + 1)}\n new_color_hm3, not_matched3, color_domain_max3 = createColorVector(k,\n template_sTree, kmer_counts2, color_hm, no_sec_peak2,\n normalization_vector2)\n test_color_hm2 = {str(i): (0) for i in range(1, len(template) + 1)}\n for kmer in kmer_counts2.keys():\n indices_list = [t.start() for t in re.finditer('(?={0})'.format(re.\n escape(kmer.upper())), template)]\n indices_list.sort()\n norm = normalization_vector2[kmer.upper()]\n if norm == 0:\n norm = 1\n for idx in indices_list:\n idx = [(idx + i) for i in range(0, len(kmer)) if kmer[i].isupper()\n ][0]\n test_color_hm2[str(idx + 1)] += kmer_counts2[kmer] / norm\n test_color_hm2 = {x: (math.log(y, 2) if y > 0 else y) for x, y in\n test_color_hm2.items()}\n test_color_domain_max2 = max(test_color_hm2.values())\n assert len(not_matched3) == 0\n assert new_color_hm2 is not new_color_hm3\n assert len(color_hm) == len(new_color_hm3)\n for i in test_color_hm2:\n assert test_color_hm2[i] == new_color_hm3[i]\n assert test_color_domain_max2 == color_domain_max3\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef test_createColorVector():\n k = 2\n no_sec_peak = 1\n template = 'EEESSSIIISSSBBBSSSHHHSSSSSSIIISSSEEE'\n kmer_counts = {'EE': 5, 'ES': 7, 'SS': 20, 'SI': 10, 'II': 15, 'IS': 11,\n 'SB': 5, 'BB': 6, 'BS': 5, 'SH': 4, 'HH': 5, 'HS': 4, 'SE': 7}\n template_sTree = STree.STree(template)\n normalization_vector1 = None\n color_hm = {str(i): (0) for i in range(1, len(template) + 1)}\n new_color_hm1, not_matched1, color_domain_max1 = createColorVector(k,\n template_sTree, kmer_counts, color_hm, no_sec_peak,\n normalization_vector1)\n assert len(color_hm) == len(new_color_hm1)\n for i in color_hm.keys():\n x = color_hm[i]\n if x > 0:\n assert new_color_hm1[i] == math.log(x, 2)\n else:\n assert new_color_hm1[i] == 0\n assert len(not_matched1) == 0\n assert color_domain_max1 == 4.954196310386876\n normalization_vector2 = {'EE': 0, 'ES': 0, 'SS': 0.7, 'SI': 0.1, 'II': \n 0.2, 'IS': 0, 'SB': 0, 'BB': 0, 'BS': 0, 'SH': 0, 'HH': 0, 'HS': 0,\n 'SE': 0}\n color_hm = {str(i): (0) for i in range(1, len(template) + 1)}\n new_color_hm2, not_matched2, color_domain_max2 = createColorVector(k,\n template_sTree, kmer_counts, color_hm, no_sec_peak,\n normalization_vector2)\n last_idx = -1\n last_kmer = ''\n test_color_hm = {str(i): (0) for i in range(1, len(template) + 1)}\n for kmer in normalization_vector2:\n indices_list = [t.start() for t in re.finditer('(?={0})'.format(re.\n escape(kmer)), template)]\n indices_list.sort()\n norm = normalization_vector2[kmer]\n if norm == 0:\n norm = 1\n for idx in indices_list:\n for i in range(0, k):\n current_idx = str(idx + i + 1)\n if last_idx + 2 == int(current_idx) and last_kmer == kmer:\n continue\n test_color_hm[current_idx] += kmer_counts[kmer] / norm\n last_idx = idx\n last_kmer = kmer\n test_color_hm = {x: (math.log(y, 2) if y > 0 else y) for x, y in\n test_color_hm.items()}\n test_color_domain_max = max(test_color_hm.values())\n assert new_color_hm1 is not new_color_hm2\n assert len(color_hm) == len(new_color_hm2)\n assert len(not_matched2) == 0\n assert color_domain_max2 == test_color_domain_max\n for i in new_color_hm2.keys():\n assert new_color_hm2[i] == test_color_hm[i]\n kmer_counts2 = {'Ee': 5, 'eS': 7, 'sS': 20, 'Si': 10, 'iI': 15, 'iS': \n 11, 'Sb': 5, 'Bb': 6, 'bS': 5, 'sH': 4, 'Hh': 5, 'hS': 4, 'Se': 7}\n no_sec_peak2 = 0\n color_hm = {str(i): (0) for i in range(1, len(template) + 1)}\n new_color_hm3, not_matched3, color_domain_max3 = createColorVector(k,\n template_sTree, kmer_counts2, color_hm, no_sec_peak2,\n normalization_vector2)\n test_color_hm2 = {str(i): (0) for i in range(1, len(template) + 1)}\n for kmer in kmer_counts2.keys():\n indices_list = [t.start() for t in re.finditer('(?={0})'.format(re.\n escape(kmer.upper())), template)]\n indices_list.sort()\n norm = normalization_vector2[kmer.upper()]\n if norm == 0:\n norm = 1\n for idx in indices_list:\n idx = [(idx + i) for i in range(0, len(kmer)) if kmer[i].isupper()\n ][0]\n test_color_hm2[str(idx + 1)] += kmer_counts2[kmer] / norm\n test_color_hm2 = {x: (math.log(y, 2) if y > 0 else y) for x, y in\n test_color_hm2.items()}\n test_color_domain_max2 = max(test_color_hm2.values())\n assert len(not_matched3) == 0\n assert new_color_hm2 is not new_color_hm3\n assert len(color_hm) == len(new_color_hm3)\n for i in test_color_hm2:\n assert test_color_hm2[i] == new_color_hm3[i]\n assert test_color_domain_max2 == color_domain_max3\n\n\ndef test_helpAddIBloop():\n k = 3\n template1 = ['EEE']\n internalloop = True\n bulge = True\n forward = True\n new_template1 = helpAddIBloop(k, template1, internalloop, bulge, forward)\n template2 = ['EEE', 'SSS', 'III', 'SSS', 'BBB', 'SSS', 'HHH']\n internalloop = True\n bulge = True\n forward = False\n new_template2 = helpAddIBloop(k, template2, internalloop, bulge, forward)\n template3_f = ['EEE']\n template3_b = ['EEE', 'SSS', 'III', 'SSS', 'HHH']\n internalloop = True\n bulge = False\n forward = True\n new_template3_f = helpAddIBloop(k, template3_f, internalloop, bulge,\n forward)\n forward = False\n new_template3_b = helpAddIBloop(k, template3_b, internalloop, bulge,\n forward)\n template4_f = ['EEE']\n template4_b = ['EEE', 'SSS', 'BBB', 'SSS', 'HHH']\n internalloop = False\n bulge = True\n forward = True\n new_template4_f = helpAddIBloop(k, template4_f, internalloop, bulge,\n forward)\n forward = False\n new_template4_b = helpAddIBloop(k, template4_b, internalloop, bulge,\n forward)\n assert new_template1 == ['EEE', 'SSS', 'III', 'SSS', 'BBB']\n assert new_template2 == ['EEE', 'SSS', 'III', 'SSS', 'BBB', 'SSS',\n 'HHH', 'SSS', 'SSS', 'III']\n assert new_template3_f == ['EEE', 'SSS', 'III']\n assert new_template3_b == ['EEE', 'SSS', 'III', 'SSS', 'HHH', 'SSS', 'III']\n assert new_template4_f == ['EEE', 'SSS', 'BBB']\n assert new_template4_b == ['EEE', 'SSS', 'BBB', 'SSS', 'HHH', 'SSS']\n\n\ndef test_element2dotbracket():\n k3 = 3\n k2 = 2\n k4 = 4\n elem_list1 = ['EEE', 'SSS', 'III', 'SSS', 'BBB', 'SSS', 'HHH', 'SSS',\n 'SSS', 'III', 'SSS', 'EEE']\n dotbracket_string1 = '...(((...(((...(((...))))))...)))...'\n elem_list2 = ['EE', 'SS', 'II', 'SS', 'HH', 'SS', 'II', 'SS', 'MM',\n 'SS', 'BB', 'SS', 'HH', 'SS', 'SS', 'EE']\n dotbracket_string2 = '..((..((..))..))..((..((..))))..'\n elem_list3 = ['EEEE', 'SSSS', 'SSSS', 'EEEE']\n dotbracket_string3 = '....(((())))....'\n elem_list4 = ['EEE', 'SSS', 'III', 'SSS', 'BBB', 'SSS', 'HHH', 'SSS',\n 'SSS', 'III', 'SSS', 'MMM', 'SSS', 'HHH', 'SSS', 'EEE']\n dotbracket_string4 = '...(((...(((...(((...))))))...)))...(((...)))...'\n db1 = []\n db1.extend(element2dotbracket(elem_list1, k3, 0, 6, True))\n db1.extend(element2dotbracket(elem_list1, k3, 7, len(elem_list1) - 1, \n False))\n db1 = ''.join(db1)\n db2 = []\n db2.extend(element2dotbracket(elem_list2, k2, 0, 4, True))\n db2.extend(element2dotbracket(elem_list2, k2, 5, 8, False))\n db2.extend(element2dotbracket(elem_list2, k2, 9, 12, True))\n db2.extend(element2dotbracket(elem_list2, k2, 13, len(elem_list2) - 1, \n False))\n db2 = ''.join(db2)\n db3 = []\n db3.extend(element2dotbracket(elem_list3, k4, 0, 1, True))\n db3.extend(element2dotbracket(elem_list3, k4, 2, len(elem_list3) - 1, \n False))\n db3 = ''.join(db3)\n db4 = []\n db4.extend(element2dotbracket(elem_list4, k3, 0, 6, True))\n db4.extend(element2dotbracket(elem_list4, k3, 7, 11, False))\n db4.extend(element2dotbracket(elem_list4, k3, 12, 13, True))\n db4.extend(element2dotbracket(elem_list4, k3, 14, len(elem_list4) - 1, \n False))\n db4 = ''.join(db4)\n assert db1 == dotbracket_string1\n assert db2 == dotbracket_string2\n assert db3 == dotbracket_string3\n assert db4 == dotbracket_string4\n", "step-3": "<mask token>\n\n\ndef test_processData():\n data = ['example/example1.fa', 'example/example2.fa']\n struct_data = ['example/exampleStrucData/exampleStructuralData1.fa',\n 'example/exampleStrucData/exampleStructuralData2.fa']\n k = 3\n top = 10\n peak = None\n feature = None\n cmd = False\n no_sec_peak = 1\n process = SecStructure(data, data, k, peak, top, feature, cmd,\n struct_data, no_sec_peak)\n alphabet1 = process.getStructProfile1().getAlphabet()\n alphabet2 = process.getStructProfile2().getAlphabet()\n kmer_counts1 = process.getStructProfile1().getProfile()\n kmer_counts2 = process.getStructProfile2().getProfile()\n results = SecStructure.processData(process)\n template1 = results[0][0]\n template2 = results[1][0]\n dotbracket_string1 = results[0][1]\n dotbracket_string2 = results[1][1]\n assert len(alphabet1) == 6\n for e in ['S', 'H', 'B', 'I', 'M', 'E']:\n assert e in alphabet1\n assert len(alphabet2) == 2\n assert 'S' in alphabet2\n assert 'E' in alphabet2\n assert kmer_counts1 == {'EE': 4, 'ES': 1, 'SS': 11, 'SH': 1, 'HH': 3,\n 'II': 4, 'IS': 1, 'SM': 1, 'MM': 1, 'BB': 4, 'BS': 1}\n assert kmer_counts2 == {'SS': 20, 'EE': 7, 'ES': 3, 'SE': 2}\n assert template1 == 'EEESSSIIISSSBBBSSSHHHSSSSSSIIISSSMMMSSSHHHSSSEEE'\n assert dotbracket_string1 == '...(((...(((...(((...))))))...)))...(((...)))...'\n assert template2 == 'EEESSSSSSEEE'\n assert dotbracket_string2 == '...((()))...'\n no_sec_peak = 0\n process2 = SecStructure(data, data, k, peak, top, feature, cmd,\n struct_data, no_sec_peak)\n alphabet1 = process2.getStructProfile1().getAlphabet()\n alphabet2 = process2.getStructProfile2().getAlphabet()\n kmer_counts1 = process2.getStructProfile1().getProfile()\n kmer_counts2 = process2.getStructProfile2().getProfile()\n results = SecStructure.processData(process2)\n template1 = results[0][0]\n template2 = results[1][0]\n dotbracket_string1 = results[0][1]\n dotbracket_string2 = results[1][1]\n assert len(alphabet1) == 10\n for e in ['s', 'h', 'b', 'i', 'm', 'E', 'S', 'B', 'I', 'E']:\n assert e in alphabet1\n assert len(alphabet2) == 4\n for e in ['s', 'S', 'e', 'E']:\n assert e in alphabet2\n assert kmer_counts1 == {'eE': 1, 'Es': 1, 'sS': 1, 'Sh': 1, 'iI': 1,\n 'Is': 1, 'bB': 1, 'Bs': 1}\n assert kmer_counts2 == {'sS': 3, 'Ss': 2, 'sE': 1, 'Ee': 1, 'Se': 1}\n assert template1 == 'EEESSSIIISSSBBBSSSSSSSSSIIISSSEEE'\n assert dotbracket_string1 == '...(((...(((...((())))))...)))...'\n assert template2 == 'EEESSSSSSEEE'\n assert dotbracket_string2 == '...((()))...'\n sProfile1 = process.getStructProfile1()\n sProfile2 = process.getStructProfile2()\n alphabet3 = ['S', 'B', 'E']\n alphabet4 = ['S', 'I', 'E']\n sProfile1.setAlphabet(alphabet3)\n sProfile2.setAlphabet(alphabet4)\n results = SecStructure.processData(process)\n template1 = results[0][0]\n template2 = results[1][0]\n dotbracket_string1 = results[0][1]\n dotbracket_string2 = results[1][1]\n assert template1 == 'EEESSSBBBSSSSSSSSSEEE'\n assert dotbracket_string1 == '...(((...((())))))...'\n assert template2 == 'EEESSSIIISSSSSSIIISSSEEE'\n assert dotbracket_string2 == '...(((...((()))...)))...'\n alphabet5 = ['S', 'H', 'E']\n alphabet6 = ['S', 'H', 'M', 'E']\n sProfile1.setAlphabet(alphabet5)\n sProfile2.setAlphabet(alphabet6)\n results = SecStructure.processData(process)\n template1 = results[0][0]\n template2 = results[1][0]\n dotbracket_string1 = results[0][1]\n dotbracket_string2 = results[1][1]\n assert template1 == 'EEESSSHHHSSSEEE'\n assert dotbracket_string1 == '...(((...)))...'\n assert template2 == 'EEESSSHHHSSSMMMSSSHHHSSSEEE'\n assert dotbracket_string2 == '...(((...)))...(((...)))...'\n alphabet7 = ['S', 'H', 'E', 'B', 'I']\n alphabet8 = ['S', 'M', 'E']\n sProfile1.setAlphabet(alphabet7)\n sProfile2.setAlphabet(alphabet8)\n results = SecStructure.processData(process)\n template1 = results[0][0]\n template2 = results[1][0]\n dotbracket_string1 = results[0][1]\n dotbracket_string2 = results[1][1]\n assert template1 == 'EEESSSIIISSSBBBSSSHHHSSSSSSIIISSSEEE'\n assert dotbracket_string1 == '...(((...(((...(((...))))))...)))...'\n assert template2 == 'EEESSSSSSEEE'\n assert dotbracket_string2 == '...((()))...'\n\n\ndef test_createColorVector():\n k = 2\n no_sec_peak = 1\n template = 'EEESSSIIISSSBBBSSSHHHSSSSSSIIISSSEEE'\n kmer_counts = {'EE': 5, 'ES': 7, 'SS': 20, 'SI': 10, 'II': 15, 'IS': 11,\n 'SB': 5, 'BB': 6, 'BS': 5, 'SH': 4, 'HH': 5, 'HS': 4, 'SE': 7}\n template_sTree = STree.STree(template)\n normalization_vector1 = None\n color_hm = {str(i): (0) for i in range(1, len(template) + 1)}\n new_color_hm1, not_matched1, color_domain_max1 = createColorVector(k,\n template_sTree, kmer_counts, color_hm, no_sec_peak,\n normalization_vector1)\n assert len(color_hm) == len(new_color_hm1)\n for i in color_hm.keys():\n x = color_hm[i]\n if x > 0:\n assert new_color_hm1[i] == math.log(x, 2)\n else:\n assert new_color_hm1[i] == 0\n assert len(not_matched1) == 0\n assert color_domain_max1 == 4.954196310386876\n normalization_vector2 = {'EE': 0, 'ES': 0, 'SS': 0.7, 'SI': 0.1, 'II': \n 0.2, 'IS': 0, 'SB': 0, 'BB': 0, 'BS': 0, 'SH': 0, 'HH': 0, 'HS': 0,\n 'SE': 0}\n color_hm = {str(i): (0) for i in range(1, len(template) + 1)}\n new_color_hm2, not_matched2, color_domain_max2 = createColorVector(k,\n template_sTree, kmer_counts, color_hm, no_sec_peak,\n normalization_vector2)\n last_idx = -1\n last_kmer = ''\n test_color_hm = {str(i): (0) for i in range(1, len(template) + 1)}\n for kmer in normalization_vector2:\n indices_list = [t.start() for t in re.finditer('(?={0})'.format(re.\n escape(kmer)), template)]\n indices_list.sort()\n norm = normalization_vector2[kmer]\n if norm == 0:\n norm = 1\n for idx in indices_list:\n for i in range(0, k):\n current_idx = str(idx + i + 1)\n if last_idx + 2 == int(current_idx) and last_kmer == kmer:\n continue\n test_color_hm[current_idx] += kmer_counts[kmer] / norm\n last_idx = idx\n last_kmer = kmer\n test_color_hm = {x: (math.log(y, 2) if y > 0 else y) for x, y in\n test_color_hm.items()}\n test_color_domain_max = max(test_color_hm.values())\n assert new_color_hm1 is not new_color_hm2\n assert len(color_hm) == len(new_color_hm2)\n assert len(not_matched2) == 0\n assert color_domain_max2 == test_color_domain_max\n for i in new_color_hm2.keys():\n assert new_color_hm2[i] == test_color_hm[i]\n kmer_counts2 = {'Ee': 5, 'eS': 7, 'sS': 20, 'Si': 10, 'iI': 15, 'iS': \n 11, 'Sb': 5, 'Bb': 6, 'bS': 5, 'sH': 4, 'Hh': 5, 'hS': 4, 'Se': 7}\n no_sec_peak2 = 0\n color_hm = {str(i): (0) for i in range(1, len(template) + 1)}\n new_color_hm3, not_matched3, color_domain_max3 = createColorVector(k,\n template_sTree, kmer_counts2, color_hm, no_sec_peak2,\n normalization_vector2)\n test_color_hm2 = {str(i): (0) for i in range(1, len(template) + 1)}\n for kmer in kmer_counts2.keys():\n indices_list = [t.start() for t in re.finditer('(?={0})'.format(re.\n escape(kmer.upper())), template)]\n indices_list.sort()\n norm = normalization_vector2[kmer.upper()]\n if norm == 0:\n norm = 1\n for idx in indices_list:\n idx = [(idx + i) for i in range(0, len(kmer)) if kmer[i].isupper()\n ][0]\n test_color_hm2[str(idx + 1)] += kmer_counts2[kmer] / norm\n test_color_hm2 = {x: (math.log(y, 2) if y > 0 else y) for x, y in\n test_color_hm2.items()}\n test_color_domain_max2 = max(test_color_hm2.values())\n assert len(not_matched3) == 0\n assert new_color_hm2 is not new_color_hm3\n assert len(color_hm) == len(new_color_hm3)\n for i in test_color_hm2:\n assert test_color_hm2[i] == new_color_hm3[i]\n assert test_color_domain_max2 == color_domain_max3\n\n\ndef test_helpAddIBloop():\n k = 3\n template1 = ['EEE']\n internalloop = True\n bulge = True\n forward = True\n new_template1 = helpAddIBloop(k, template1, internalloop, bulge, forward)\n template2 = ['EEE', 'SSS', 'III', 'SSS', 'BBB', 'SSS', 'HHH']\n internalloop = True\n bulge = True\n forward = False\n new_template2 = helpAddIBloop(k, template2, internalloop, bulge, forward)\n template3_f = ['EEE']\n template3_b = ['EEE', 'SSS', 'III', 'SSS', 'HHH']\n internalloop = True\n bulge = False\n forward = True\n new_template3_f = helpAddIBloop(k, template3_f, internalloop, bulge,\n forward)\n forward = False\n new_template3_b = helpAddIBloop(k, template3_b, internalloop, bulge,\n forward)\n template4_f = ['EEE']\n template4_b = ['EEE', 'SSS', 'BBB', 'SSS', 'HHH']\n internalloop = False\n bulge = True\n forward = True\n new_template4_f = helpAddIBloop(k, template4_f, internalloop, bulge,\n forward)\n forward = False\n new_template4_b = helpAddIBloop(k, template4_b, internalloop, bulge,\n forward)\n assert new_template1 == ['EEE', 'SSS', 'III', 'SSS', 'BBB']\n assert new_template2 == ['EEE', 'SSS', 'III', 'SSS', 'BBB', 'SSS',\n 'HHH', 'SSS', 'SSS', 'III']\n assert new_template3_f == ['EEE', 'SSS', 'III']\n assert new_template3_b == ['EEE', 'SSS', 'III', 'SSS', 'HHH', 'SSS', 'III']\n assert new_template4_f == ['EEE', 'SSS', 'BBB']\n assert new_template4_b == ['EEE', 'SSS', 'BBB', 'SSS', 'HHH', 'SSS']\n\n\ndef test_element2dotbracket():\n k3 = 3\n k2 = 2\n k4 = 4\n elem_list1 = ['EEE', 'SSS', 'III', 'SSS', 'BBB', 'SSS', 'HHH', 'SSS',\n 'SSS', 'III', 'SSS', 'EEE']\n dotbracket_string1 = '...(((...(((...(((...))))))...)))...'\n elem_list2 = ['EE', 'SS', 'II', 'SS', 'HH', 'SS', 'II', 'SS', 'MM',\n 'SS', 'BB', 'SS', 'HH', 'SS', 'SS', 'EE']\n dotbracket_string2 = '..((..((..))..))..((..((..))))..'\n elem_list3 = ['EEEE', 'SSSS', 'SSSS', 'EEEE']\n dotbracket_string3 = '....(((())))....'\n elem_list4 = ['EEE', 'SSS', 'III', 'SSS', 'BBB', 'SSS', 'HHH', 'SSS',\n 'SSS', 'III', 'SSS', 'MMM', 'SSS', 'HHH', 'SSS', 'EEE']\n dotbracket_string4 = '...(((...(((...(((...))))))...)))...(((...)))...'\n db1 = []\n db1.extend(element2dotbracket(elem_list1, k3, 0, 6, True))\n db1.extend(element2dotbracket(elem_list1, k3, 7, len(elem_list1) - 1, \n False))\n db1 = ''.join(db1)\n db2 = []\n db2.extend(element2dotbracket(elem_list2, k2, 0, 4, True))\n db2.extend(element2dotbracket(elem_list2, k2, 5, 8, False))\n db2.extend(element2dotbracket(elem_list2, k2, 9, 12, True))\n db2.extend(element2dotbracket(elem_list2, k2, 13, len(elem_list2) - 1, \n False))\n db2 = ''.join(db2)\n db3 = []\n db3.extend(element2dotbracket(elem_list3, k4, 0, 1, True))\n db3.extend(element2dotbracket(elem_list3, k4, 2, len(elem_list3) - 1, \n False))\n db3 = ''.join(db3)\n db4 = []\n db4.extend(element2dotbracket(elem_list4, k3, 0, 6, True))\n db4.extend(element2dotbracket(elem_list4, k3, 7, 11, False))\n db4.extend(element2dotbracket(elem_list4, k3, 12, 13, True))\n db4.extend(element2dotbracket(elem_list4, k3, 14, len(elem_list4) - 1, \n False))\n db4 = ''.join(db4)\n assert db1 == dotbracket_string1\n assert db2 == dotbracket_string2\n assert db3 == dotbracket_string3\n assert db4 == dotbracket_string4\n", "step-4": "from src.secStructure import *\nfrom suffix_trees import STree\nimport math\nimport re\n\n\ndef test_processData():\n data = ['example/example1.fa', 'example/example2.fa']\n struct_data = ['example/exampleStrucData/exampleStructuralData1.fa',\n 'example/exampleStrucData/exampleStructuralData2.fa']\n k = 3\n top = 10\n peak = None\n feature = None\n cmd = False\n no_sec_peak = 1\n process = SecStructure(data, data, k, peak, top, feature, cmd,\n struct_data, no_sec_peak)\n alphabet1 = process.getStructProfile1().getAlphabet()\n alphabet2 = process.getStructProfile2().getAlphabet()\n kmer_counts1 = process.getStructProfile1().getProfile()\n kmer_counts2 = process.getStructProfile2().getProfile()\n results = SecStructure.processData(process)\n template1 = results[0][0]\n template2 = results[1][0]\n dotbracket_string1 = results[0][1]\n dotbracket_string2 = results[1][1]\n assert len(alphabet1) == 6\n for e in ['S', 'H', 'B', 'I', 'M', 'E']:\n assert e in alphabet1\n assert len(alphabet2) == 2\n assert 'S' in alphabet2\n assert 'E' in alphabet2\n assert kmer_counts1 == {'EE': 4, 'ES': 1, 'SS': 11, 'SH': 1, 'HH': 3,\n 'II': 4, 'IS': 1, 'SM': 1, 'MM': 1, 'BB': 4, 'BS': 1}\n assert kmer_counts2 == {'SS': 20, 'EE': 7, 'ES': 3, 'SE': 2}\n assert template1 == 'EEESSSIIISSSBBBSSSHHHSSSSSSIIISSSMMMSSSHHHSSSEEE'\n assert dotbracket_string1 == '...(((...(((...(((...))))))...)))...(((...)))...'\n assert template2 == 'EEESSSSSSEEE'\n assert dotbracket_string2 == '...((()))...'\n no_sec_peak = 0\n process2 = SecStructure(data, data, k, peak, top, feature, cmd,\n struct_data, no_sec_peak)\n alphabet1 = process2.getStructProfile1().getAlphabet()\n alphabet2 = process2.getStructProfile2().getAlphabet()\n kmer_counts1 = process2.getStructProfile1().getProfile()\n kmer_counts2 = process2.getStructProfile2().getProfile()\n results = SecStructure.processData(process2)\n template1 = results[0][0]\n template2 = results[1][0]\n dotbracket_string1 = results[0][1]\n dotbracket_string2 = results[1][1]\n assert len(alphabet1) == 10\n for e in ['s', 'h', 'b', 'i', 'm', 'E', 'S', 'B', 'I', 'E']:\n assert e in alphabet1\n assert len(alphabet2) == 4\n for e in ['s', 'S', 'e', 'E']:\n assert e in alphabet2\n assert kmer_counts1 == {'eE': 1, 'Es': 1, 'sS': 1, 'Sh': 1, 'iI': 1,\n 'Is': 1, 'bB': 1, 'Bs': 1}\n assert kmer_counts2 == {'sS': 3, 'Ss': 2, 'sE': 1, 'Ee': 1, 'Se': 1}\n assert template1 == 'EEESSSIIISSSBBBSSSSSSSSSIIISSSEEE'\n assert dotbracket_string1 == '...(((...(((...((())))))...)))...'\n assert template2 == 'EEESSSSSSEEE'\n assert dotbracket_string2 == '...((()))...'\n sProfile1 = process.getStructProfile1()\n sProfile2 = process.getStructProfile2()\n alphabet3 = ['S', 'B', 'E']\n alphabet4 = ['S', 'I', 'E']\n sProfile1.setAlphabet(alphabet3)\n sProfile2.setAlphabet(alphabet4)\n results = SecStructure.processData(process)\n template1 = results[0][0]\n template2 = results[1][0]\n dotbracket_string1 = results[0][1]\n dotbracket_string2 = results[1][1]\n assert template1 == 'EEESSSBBBSSSSSSSSSEEE'\n assert dotbracket_string1 == '...(((...((())))))...'\n assert template2 == 'EEESSSIIISSSSSSIIISSSEEE'\n assert dotbracket_string2 == '...(((...((()))...)))...'\n alphabet5 = ['S', 'H', 'E']\n alphabet6 = ['S', 'H', 'M', 'E']\n sProfile1.setAlphabet(alphabet5)\n sProfile2.setAlphabet(alphabet6)\n results = SecStructure.processData(process)\n template1 = results[0][0]\n template2 = results[1][0]\n dotbracket_string1 = results[0][1]\n dotbracket_string2 = results[1][1]\n assert template1 == 'EEESSSHHHSSSEEE'\n assert dotbracket_string1 == '...(((...)))...'\n assert template2 == 'EEESSSHHHSSSMMMSSSHHHSSSEEE'\n assert dotbracket_string2 == '...(((...)))...(((...)))...'\n alphabet7 = ['S', 'H', 'E', 'B', 'I']\n alphabet8 = ['S', 'M', 'E']\n sProfile1.setAlphabet(alphabet7)\n sProfile2.setAlphabet(alphabet8)\n results = SecStructure.processData(process)\n template1 = results[0][0]\n template2 = results[1][0]\n dotbracket_string1 = results[0][1]\n dotbracket_string2 = results[1][1]\n assert template1 == 'EEESSSIIISSSBBBSSSHHHSSSSSSIIISSSEEE'\n assert dotbracket_string1 == '...(((...(((...(((...))))))...)))...'\n assert template2 == 'EEESSSSSSEEE'\n assert dotbracket_string2 == '...((()))...'\n\n\ndef test_createColorVector():\n k = 2\n no_sec_peak = 1\n template = 'EEESSSIIISSSBBBSSSHHHSSSSSSIIISSSEEE'\n kmer_counts = {'EE': 5, 'ES': 7, 'SS': 20, 'SI': 10, 'II': 15, 'IS': 11,\n 'SB': 5, 'BB': 6, 'BS': 5, 'SH': 4, 'HH': 5, 'HS': 4, 'SE': 7}\n template_sTree = STree.STree(template)\n normalization_vector1 = None\n color_hm = {str(i): (0) for i in range(1, len(template) + 1)}\n new_color_hm1, not_matched1, color_domain_max1 = createColorVector(k,\n template_sTree, kmer_counts, color_hm, no_sec_peak,\n normalization_vector1)\n assert len(color_hm) == len(new_color_hm1)\n for i in color_hm.keys():\n x = color_hm[i]\n if x > 0:\n assert new_color_hm1[i] == math.log(x, 2)\n else:\n assert new_color_hm1[i] == 0\n assert len(not_matched1) == 0\n assert color_domain_max1 == 4.954196310386876\n normalization_vector2 = {'EE': 0, 'ES': 0, 'SS': 0.7, 'SI': 0.1, 'II': \n 0.2, 'IS': 0, 'SB': 0, 'BB': 0, 'BS': 0, 'SH': 0, 'HH': 0, 'HS': 0,\n 'SE': 0}\n color_hm = {str(i): (0) for i in range(1, len(template) + 1)}\n new_color_hm2, not_matched2, color_domain_max2 = createColorVector(k,\n template_sTree, kmer_counts, color_hm, no_sec_peak,\n normalization_vector2)\n last_idx = -1\n last_kmer = ''\n test_color_hm = {str(i): (0) for i in range(1, len(template) + 1)}\n for kmer in normalization_vector2:\n indices_list = [t.start() for t in re.finditer('(?={0})'.format(re.\n escape(kmer)), template)]\n indices_list.sort()\n norm = normalization_vector2[kmer]\n if norm == 0:\n norm = 1\n for idx in indices_list:\n for i in range(0, k):\n current_idx = str(idx + i + 1)\n if last_idx + 2 == int(current_idx) and last_kmer == kmer:\n continue\n test_color_hm[current_idx] += kmer_counts[kmer] / norm\n last_idx = idx\n last_kmer = kmer\n test_color_hm = {x: (math.log(y, 2) if y > 0 else y) for x, y in\n test_color_hm.items()}\n test_color_domain_max = max(test_color_hm.values())\n assert new_color_hm1 is not new_color_hm2\n assert len(color_hm) == len(new_color_hm2)\n assert len(not_matched2) == 0\n assert color_domain_max2 == test_color_domain_max\n for i in new_color_hm2.keys():\n assert new_color_hm2[i] == test_color_hm[i]\n kmer_counts2 = {'Ee': 5, 'eS': 7, 'sS': 20, 'Si': 10, 'iI': 15, 'iS': \n 11, 'Sb': 5, 'Bb': 6, 'bS': 5, 'sH': 4, 'Hh': 5, 'hS': 4, 'Se': 7}\n no_sec_peak2 = 0\n color_hm = {str(i): (0) for i in range(1, len(template) + 1)}\n new_color_hm3, not_matched3, color_domain_max3 = createColorVector(k,\n template_sTree, kmer_counts2, color_hm, no_sec_peak2,\n normalization_vector2)\n test_color_hm2 = {str(i): (0) for i in range(1, len(template) + 1)}\n for kmer in kmer_counts2.keys():\n indices_list = [t.start() for t in re.finditer('(?={0})'.format(re.\n escape(kmer.upper())), template)]\n indices_list.sort()\n norm = normalization_vector2[kmer.upper()]\n if norm == 0:\n norm = 1\n for idx in indices_list:\n idx = [(idx + i) for i in range(0, len(kmer)) if kmer[i].isupper()\n ][0]\n test_color_hm2[str(idx + 1)] += kmer_counts2[kmer] / norm\n test_color_hm2 = {x: (math.log(y, 2) if y > 0 else y) for x, y in\n test_color_hm2.items()}\n test_color_domain_max2 = max(test_color_hm2.values())\n assert len(not_matched3) == 0\n assert new_color_hm2 is not new_color_hm3\n assert len(color_hm) == len(new_color_hm3)\n for i in test_color_hm2:\n assert test_color_hm2[i] == new_color_hm3[i]\n assert test_color_domain_max2 == color_domain_max3\n\n\ndef test_helpAddIBloop():\n k = 3\n template1 = ['EEE']\n internalloop = True\n bulge = True\n forward = True\n new_template1 = helpAddIBloop(k, template1, internalloop, bulge, forward)\n template2 = ['EEE', 'SSS', 'III', 'SSS', 'BBB', 'SSS', 'HHH']\n internalloop = True\n bulge = True\n forward = False\n new_template2 = helpAddIBloop(k, template2, internalloop, bulge, forward)\n template3_f = ['EEE']\n template3_b = ['EEE', 'SSS', 'III', 'SSS', 'HHH']\n internalloop = True\n bulge = False\n forward = True\n new_template3_f = helpAddIBloop(k, template3_f, internalloop, bulge,\n forward)\n forward = False\n new_template3_b = helpAddIBloop(k, template3_b, internalloop, bulge,\n forward)\n template4_f = ['EEE']\n template4_b = ['EEE', 'SSS', 'BBB', 'SSS', 'HHH']\n internalloop = False\n bulge = True\n forward = True\n new_template4_f = helpAddIBloop(k, template4_f, internalloop, bulge,\n forward)\n forward = False\n new_template4_b = helpAddIBloop(k, template4_b, internalloop, bulge,\n forward)\n assert new_template1 == ['EEE', 'SSS', 'III', 'SSS', 'BBB']\n assert new_template2 == ['EEE', 'SSS', 'III', 'SSS', 'BBB', 'SSS',\n 'HHH', 'SSS', 'SSS', 'III']\n assert new_template3_f == ['EEE', 'SSS', 'III']\n assert new_template3_b == ['EEE', 'SSS', 'III', 'SSS', 'HHH', 'SSS', 'III']\n assert new_template4_f == ['EEE', 'SSS', 'BBB']\n assert new_template4_b == ['EEE', 'SSS', 'BBB', 'SSS', 'HHH', 'SSS']\n\n\ndef test_element2dotbracket():\n k3 = 3\n k2 = 2\n k4 = 4\n elem_list1 = ['EEE', 'SSS', 'III', 'SSS', 'BBB', 'SSS', 'HHH', 'SSS',\n 'SSS', 'III', 'SSS', 'EEE']\n dotbracket_string1 = '...(((...(((...(((...))))))...)))...'\n elem_list2 = ['EE', 'SS', 'II', 'SS', 'HH', 'SS', 'II', 'SS', 'MM',\n 'SS', 'BB', 'SS', 'HH', 'SS', 'SS', 'EE']\n dotbracket_string2 = '..((..((..))..))..((..((..))))..'\n elem_list3 = ['EEEE', 'SSSS', 'SSSS', 'EEEE']\n dotbracket_string3 = '....(((())))....'\n elem_list4 = ['EEE', 'SSS', 'III', 'SSS', 'BBB', 'SSS', 'HHH', 'SSS',\n 'SSS', 'III', 'SSS', 'MMM', 'SSS', 'HHH', 'SSS', 'EEE']\n dotbracket_string4 = '...(((...(((...(((...))))))...)))...(((...)))...'\n db1 = []\n db1.extend(element2dotbracket(elem_list1, k3, 0, 6, True))\n db1.extend(element2dotbracket(elem_list1, k3, 7, len(elem_list1) - 1, \n False))\n db1 = ''.join(db1)\n db2 = []\n db2.extend(element2dotbracket(elem_list2, k2, 0, 4, True))\n db2.extend(element2dotbracket(elem_list2, k2, 5, 8, False))\n db2.extend(element2dotbracket(elem_list2, k2, 9, 12, True))\n db2.extend(element2dotbracket(elem_list2, k2, 13, len(elem_list2) - 1, \n False))\n db2 = ''.join(db2)\n db3 = []\n db3.extend(element2dotbracket(elem_list3, k4, 0, 1, True))\n db3.extend(element2dotbracket(elem_list3, k4, 2, len(elem_list3) - 1, \n False))\n db3 = ''.join(db3)\n db4 = []\n db4.extend(element2dotbracket(elem_list4, k3, 0, 6, True))\n db4.extend(element2dotbracket(elem_list4, k3, 7, 11, False))\n db4.extend(element2dotbracket(elem_list4, k3, 12, 13, True))\n db4.extend(element2dotbracket(elem_list4, k3, 14, len(elem_list4) - 1, \n False))\n db4 = ''.join(db4)\n assert db1 == dotbracket_string1\n assert db2 == dotbracket_string2\n assert db3 == dotbracket_string3\n assert db4 == dotbracket_string4\n", "step-5": "from src.secStructure import *\nfrom suffix_trees import STree\nimport math\nimport re\n\n\ndef test_processData():\n # Test1: ignoring peak position\n data = ['example/example1.fa', 'example/example2.fa']\n struct_data = ['example/exampleStrucData/exampleStructuralData1.fa',\n 'example/exampleStrucData/exampleStructuralData2.fa']\n k = 3\n top = 10\n peak = None\n feature = None\n cmd = False\n no_sec_peak = 1 # True\n\n # Executing\n\n process = SecStructure(data, data, k, peak, top, feature, cmd, struct_data, no_sec_peak)\n\n alphabet1 = process.getStructProfile1().getAlphabet()\n alphabet2 = process.getStructProfile2().getAlphabet()\n\n kmer_counts1 = process.getStructProfile1().getProfile()\n kmer_counts2 = process.getStructProfile2().getProfile()\n\n results = SecStructure.processData(process)\n\n template1 = results[0][0]\n template2 = results[1][0]\n\n dotbracket_string1 = results[0][1]\n dotbracket_string2 = results[1][1]\n\n # Testing\n\n assert len(alphabet1) == 6\n for e in [\"S\", \"H\", \"B\", \"I\", \"M\", \"E\"]:\n assert e in alphabet1\n\n assert len(alphabet2) == 2\n assert \"S\" in alphabet2\n assert \"E\" in alphabet2\n\n assert kmer_counts1 == {'EE': 4, 'ES': 1, 'SS': 11, 'SH': 1, 'HH': 3, 'II': 4, 'IS': 1, 'SM': 1, 'MM': 1, 'BB': 4,\n 'BS': 1}\n assert kmer_counts2 == {'SS': 20, 'EE': 7, 'ES': 3, 'SE': 2}\n\n assert template1 == \"EEESSSIIISSSBBBSSSHHHSSSSSSIIISSSMMMSSSHHHSSSEEE\"\n assert dotbracket_string1 == \"...(((...(((...(((...))))))...)))...(((...)))...\"\n\n assert template2 == \"EEESSSSSSEEE\"\n assert dotbracket_string2 == \"...((()))...\"\n\n # Test2: with peak position\n no_sec_peak = 0 # True\n\n # Executing\n\n process2 = SecStructure(data, data, k, peak, top, feature, cmd, struct_data, no_sec_peak)\n\n alphabet1 = process2.getStructProfile1().getAlphabet()\n alphabet2 = process2.getStructProfile2().getAlphabet()\n\n kmer_counts1 = process2.getStructProfile1().getProfile()\n kmer_counts2 = process2.getStructProfile2().getProfile()\n\n results = SecStructure.processData(process2)\n\n template1 = results[0][0]\n template2 = results[1][0]\n\n dotbracket_string1 = results[0][1]\n dotbracket_string2 = results[1][1]\n\n # Testing\n\n assert len(alphabet1) == 10\n for e in [\"s\", \"h\", \"b\", \"i\", \"m\", \"E\", \"S\", \"B\", \"I\", \"E\"]:\n assert e in alphabet1\n\n assert len(alphabet2) == 4\n for e in [\"s\", \"S\", \"e\", \"E\"]:\n assert e in alphabet2\n\n assert kmer_counts1 == {'eE': 1, 'Es': 1, 'sS': 1, 'Sh': 1, 'iI': 1, 'Is': 1, 'bB': 1, 'Bs': 1}\n assert kmer_counts2 == {'sS': 3, 'Ss': 2, 'sE': 1, 'Ee': 1, 'Se': 1}\n\n assert template1 == \"EEESSSIIISSSBBBSSSSSSSSSIIISSSEEE\"\n assert dotbracket_string1 == \"...(((...(((...((())))))...)))...\"\n\n assert template2 == \"EEESSSSSSEEE\"\n assert dotbracket_string2 == \"...((()))...\"\n\n # Test3: different alphabets\n sProfile1 = process.getStructProfile1()\n sProfile2 = process.getStructProfile2()\n\n # Test3a: alphabets with no multiloop\n\n alphabet3 = [\"S\", \"B\", \"E\"]\n alphabet4 = [\"S\", \"I\", \"E\"]\n\n sProfile1.setAlphabet(alphabet3)\n sProfile2.setAlphabet(alphabet4)\n\n results = SecStructure.processData(process)\n\n template1 = results[0][0]\n template2 = results[1][0]\n\n dotbracket_string1 = results[0][1]\n dotbracket_string2 = results[1][1]\n\n assert template1 == \"EEESSSBBBSSSSSSSSSEEE\"\n assert dotbracket_string1 == \"...(((...((())))))...\"\n\n assert template2 == \"EEESSSIIISSSSSSIIISSSEEE\"\n assert dotbracket_string2 == \"...(((...((()))...)))...\"\n\n # Test3b: alphabets with only hairpin or hairpin and multiloop\n alphabet5 = [\"S\", \"H\", \"E\"]\n alphabet6 = [\"S\", \"H\", \"M\", \"E\"]\n\n sProfile1.setAlphabet(alphabet5)\n sProfile2.setAlphabet(alphabet6)\n\n results = SecStructure.processData(process)\n\n template1 = results[0][0]\n template2 = results[1][0]\n\n dotbracket_string1 = results[0][1]\n dotbracket_string2 = results[1][1]\n\n assert template1 == \"EEESSSHHHSSSEEE\"\n assert dotbracket_string1 == \"...(((...)))...\"\n\n assert template2 == \"EEESSSHHHSSSMMMSSSHHHSSSEEE\"\n assert dotbracket_string2 == \"...(((...)))...(((...)))...\"\n\n # Test3c: ('flawed') alphabets with no multiloops\n\n alphabet7 = [\"S\", \"H\", \"E\", \"B\", \"I\"]\n alphabet8 = [\"S\", \"M\", \"E\"] # should be equal to [\"S\",\"E\"]\n\n sProfile1.setAlphabet(alphabet7)\n sProfile2.setAlphabet(alphabet8)\n\n results = SecStructure.processData(process)\n\n template1 = results[0][0]\n template2 = results[1][0]\n\n dotbracket_string1 = results[0][1]\n dotbracket_string2 = results[1][1]\n\n assert template1 == \"EEESSSIIISSSBBBSSSHHHSSSSSSIIISSSEEE\"\n assert dotbracket_string1 == \"...(((...(((...(((...))))))...)))...\"\n\n assert template2 == \"EEESSSSSSEEE\"\n assert dotbracket_string2 == \"...((()))...\"\n\n\ndef test_createColorVector():\n # Test1: no normalization vector wanted\n k = 2\n no_sec_peak = 1\n template = \"EEESSSIIISSSBBBSSSHHHSSSSSSIIISSSEEE\"\n kmer_counts = {\"EE\": 5, \"ES\": 7, \"SS\": 20, \"SI\": 10, \"II\": 15, \"IS\": 11, \"SB\": 5, \"BB\": 6, \"BS\": 5, \"SH\": 4,\n \"HH\": 5, \"HS\": 4, \"SE\": 7}\n template_sTree = STree.STree(template)\n normalization_vector1 = None\n\n color_hm = {str(i): 0 for i in range(1, len(template) + 1)}\n\n # Executing\n new_color_hm1, not_matched1, color_domain_max1 = createColorVector(k, template_sTree, kmer_counts, color_hm,\n no_sec_peak, normalization_vector1)\n\n assert len(color_hm) == len(new_color_hm1)\n for i in color_hm.keys():\n x = color_hm[i]\n if x > 0:\n assert new_color_hm1[i] == math.log(x, 2)\n else:\n assert new_color_hm1[i] == 0\n assert len(not_matched1) == 0\n assert color_domain_max1 == 4.954196310386876\n\n # Test2: with normalization vector\n\n normalization_vector2 = {\"EE\": 0, \"ES\": 0, \"SS\": 0.7, \"SI\": 0.1, \"II\": 0.2, \"IS\": 0, \"SB\": 0, \"BB\": 0, \"BS\": 0,\n \"SH\": 0, \"HH\": 0, \"HS\": 0, \"SE\": 0}\n\n # Execution\n\n color_hm = {str(i): 0 for i in range(1, len(template) + 1)}\n new_color_hm2, not_matched2, color_domain_max2 = createColorVector(k, template_sTree, kmer_counts, color_hm,\n no_sec_peak, normalization_vector2)\n\n last_idx = -1\n last_kmer = \"\"\n\n test_color_hm = {str(i): 0 for i in range(1, len(template) + 1)}\n for kmer in normalization_vector2:\n indices_list = [t.start() for t in re.finditer('(?={0})'.format(re.escape(kmer)), template)]\n indices_list.sort()\n norm = normalization_vector2[kmer]\n if norm == 0:\n norm = 1\n for idx in indices_list:\n for i in range(0, k):\n current_idx = str(idx + i + 1)\n if last_idx + 2 == int(current_idx) and last_kmer == kmer:\n continue\n test_color_hm[current_idx] += (kmer_counts[kmer] / norm)\n last_idx = idx\n last_kmer = kmer\n\n test_color_hm = {x: math.log(y, 2) if y > 0 else y for x, y in test_color_hm.items()}\n test_color_domain_max = max(test_color_hm.values())\n\n # Testing\n\n assert new_color_hm1 is not new_color_hm2\n assert len(color_hm) == len(new_color_hm2)\n assert len(not_matched2) == 0\n assert color_domain_max2 == test_color_domain_max\n for i in new_color_hm2.keys():\n assert new_color_hm2[i] == test_color_hm[i]\n\n # Test3: normalization vector and secondary peak position\n\n kmer_counts2 = {\"Ee\": 5, \"eS\": 7, \"sS\": 20, \"Si\": 10, \"iI\": 15, \"iS\": 11, \"Sb\": 5, \"Bb\": 6, \"bS\": 5, \"sH\": 4,\n \"Hh\": 5, \"hS\": 4, \"Se\": 7}\n no_sec_peak2 = 0\n\n # Execution\n\n color_hm = {str(i): 0 for i in range(1, len(template) + 1)}\n new_color_hm3, not_matched3, color_domain_max3 = createColorVector(k, template_sTree, kmer_counts2, color_hm,\n no_sec_peak2, normalization_vector2)\n\n test_color_hm2 = {str(i): 0 for i in range(1, len(template) + 1)}\n for kmer in kmer_counts2.keys():\n indices_list = [t.start() for t in re.finditer('(?={0})'.format(re.escape(kmer.upper())), template)]\n indices_list.sort()\n norm = normalization_vector2[kmer.upper()]\n if norm == 0:\n norm = 1\n for idx in indices_list:\n # use only peak-position in 2-mer for visualization\n idx = [idx + i for i in range(0, len(kmer)) if kmer[i].isupper()][0]\n test_color_hm2[str(idx + 1)] += (kmer_counts2[kmer] / norm)\n\n test_color_hm2 = {x: math.log(y, 2) if y > 0 else y for x, y in test_color_hm2.items()}\n test_color_domain_max2 = max(test_color_hm2.values())\n\n # Testing\n\n assert len(not_matched3) == 0\n assert new_color_hm2 is not new_color_hm3\n assert len(color_hm) == len(new_color_hm3)\n for i in test_color_hm2:\n assert test_color_hm2[i] == new_color_hm3[i]\n assert test_color_domain_max2 == color_domain_max3\n\n\ndef test_helpAddIBloop():\n k = 3\n\n # Test 1: forward and all true\n template1 = [\"EEE\"]\n internalloop = True\n bulge = True\n forward = True\n\n # Execution\n new_template1 = helpAddIBloop(k, template1, internalloop, bulge, forward)\n\n # Test 2: backward and all true\n template2 = [\"EEE\", \"SSS\", \"III\", \"SSS\", \"BBB\", \"SSS\", \"HHH\"]\n internalloop = True\n bulge = True\n forward = False\n\n # Execution\n new_template2 = helpAddIBloop(k, template2, internalloop, bulge, forward)\n\n # Test 3: only internal loops, forward and backward\n template3_f = [\"EEE\"]\n template3_b = [\"EEE\", \"SSS\", \"III\", \"SSS\", \"HHH\"]\n internalloop = True\n bulge = False\n forward = True\n\n # Execution\n new_template3_f = helpAddIBloop(k, template3_f, internalloop, bulge, forward)\n\n forward = False\n new_template3_b = helpAddIBloop(k, template3_b, internalloop, bulge, forward)\n\n # Test 4: only bulges, forward and backward\n template4_f = [\"EEE\"]\n template4_b = [\"EEE\", \"SSS\", \"BBB\", \"SSS\", \"HHH\"]\n internalloop = False\n bulge = True\n forward = True\n\n # Execution\n new_template4_f = helpAddIBloop(k, template4_f, internalloop, bulge, forward)\n\n forward = False\n new_template4_b = helpAddIBloop(k, template4_b, internalloop, bulge, forward)\n\n # Testing\n assert new_template1 == [\"EEE\", \"SSS\", \"III\", \"SSS\", \"BBB\"]\n assert new_template2 == [\"EEE\", \"SSS\", \"III\", \"SSS\", \"BBB\", \"SSS\", \"HHH\", \"SSS\", \"SSS\", \"III\"]\n assert new_template3_f == [\"EEE\", \"SSS\", \"III\"]\n assert new_template3_b == [\"EEE\", \"SSS\", \"III\", \"SSS\", \"HHH\", \"SSS\", \"III\"]\n assert new_template4_f == [\"EEE\", \"SSS\", \"BBB\"]\n assert new_template4_b == [\"EEE\", \"SSS\", \"BBB\", \"SSS\", \"HHH\", \"SSS\"]\n\n\ndef test_element2dotbracket():\n k3 = 3\n k2 = 2\n k4 = 4\n\n # Test1 without multiloop\n elem_list1 = [\"EEE\", \"SSS\", \"III\", \"SSS\", \"BBB\", \"SSS\", \"HHH\", \"SSS\", \"SSS\", \"III\", \"SSS\", \"EEE\"]\n dotbracket_string1 = \"...(((...(((...(((...))))))...)))...\"\n\n # Test2 with multiloop\n elem_list2 = [\"EE\", \"SS\", \"II\", \"SS\", \"HH\", \"SS\", \"II\", \"SS\", \"MM\", \"SS\", \"BB\", \"SS\", \"HH\", \"SS\", \"SS\", \"EE\"]\n dotbracket_string2 = \"..((..((..))..))..((..((..))))..\"\n\n # Test 3 without loops\n elem_list3 = [\"EEEE\", \"SSSS\", \"SSSS\", \"EEEE\"]\n dotbracket_string3 = \"....(((())))....\"\n\n # Test 5 with everything\n elem_list4 = [\"EEE\", \"SSS\", \"III\", \"SSS\", \"BBB\", \"SSS\", \"HHH\", \"SSS\", \"SSS\", \"III\", \"SSS\", \"MMM\", \"SSS\", \"HHH\",\n \"SSS\", \"EEE\"]\n dotbracket_string4 = \"...(((...(((...(((...))))))...)))...(((...)))...\"\n\n # Execution\n db1 = []\n db1.extend(element2dotbracket(elem_list1, k3, 0, 6, True))\n db1.extend(element2dotbracket(elem_list1, k3, 7, len(elem_list1) - 1, False))\n db1 = ''.join(db1)\n\n db2 = []\n db2.extend(element2dotbracket(elem_list2, k2, 0, 4, True))\n db2.extend(element2dotbracket(elem_list2, k2, 5, 8, False))\n db2.extend(element2dotbracket(elem_list2, k2, 9, 12, True))\n db2.extend(element2dotbracket(elem_list2, k2, 13, len(elem_list2) - 1, False))\n db2 = ''.join(db2)\n\n db3 = []\n db3.extend(element2dotbracket(elem_list3, k4, 0, 1, True))\n db3.extend(element2dotbracket(elem_list3, k4, 2, len(elem_list3) - 1, False))\n db3 = ''.join(db3)\n\n db4 = []\n db4.extend(element2dotbracket(elem_list4, k3, 0, 6, True))\n db4.extend(element2dotbracket(elem_list4, k3, 7, 11, False))\n db4.extend(element2dotbracket(elem_list4, k3, 12, 13, True))\n db4.extend(element2dotbracket(elem_list4, k3, 14, len(elem_list4) - 1, False))\n db4 = ''.join(db4)\n\n # testing\n assert db1 == dotbracket_string1\n assert db2 == dotbracket_string2\n assert db3 == dotbracket_string3\n assert db4 == dotbracket_string4\n", "step-ids": [ 1, 3, 4, 5, 6 ] }
[ 1, 3, 4, 5, 6 ]
from .facebook import *
normal
{ "blob_id": "7901a2bd4ae1070c8263d3cd97351b01ffbf7bb1", "index": 7246, "step-1": "<mask token>\n", "step-2": "from .facebook import *\n", "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0, 1 ] }
[ 0, 1 ]
from kivy.uix.button import Button from kivy.uix.gridlayout import GridLayout from kivy.uix.floatlayout import FloatLayout from kivy.uix.label import Label from kivy.app import App import webbrowser a=0.0 b="?" n=0.0 k="" g="" class ghetto(GridLayout): def matCallback(self,a): webbrowser.open_new("https://us05web.zoom.us/j/2688374138?pwd=ekJpMnJsdWkyTWdGcE0zMEZzdjFydz09") def biyoCallback(self,a): webbrowser.open_new("https://us04web.zoom.us/j/8651192984?pwd=cFV0bUNPTXRUOGVPZWw4dEhDQm0vUT09") def edebCallback(self,a): webbrowser.open_new("https://us04web.zoom.us/j/4724567240?pwd=MzIzam5jcE9MeEkxTkVnR1plVVZ6dz09") def kimyaCallback(self,a): webbrowser.open_new("https://us04web.zoom.us/j/8080079163?pwd=UitJVWs4Y0dOU2ZjbHMvZUVBQVZXdz09") def tarihCallback(self,a): webbrowser.open_new("https://us04web.zoom.us/j/7045543550?pwd=yPBZGImZndgSF-Mj4JRTaFTq2Oh94Bs") def cogCallback(self,a): webbrowser.open_new("https://us04web.zoom.us/j/6832847624?pwd=TzhNUzlFNHM2K3FpR09nVHhCaFZPQT09") def bilisiCallback(self,a): webbrowser.open_new("https://us02web.zoom.us/j/3469922894") def muzCallback(self,a): webbrowser.open_new("https://us04web.zoom.us/j/7411417677?pwd=K1A5czBGWWlnRzdBOWs0VEJQaUloUT09") def ingCallback(self,a): webbrowser.open_new("https://us04web.zoom.us/j/6712002142?pwd=azFMYjljb3lPOVBoTXdYT3FabmpIUT09") def felCallback(self,a): webbrowser.open_new("https://us04web.zoom.us/j/8358223221?pwd=eTlXcm4vc3RVUnNOSzV0UmhqM1ZEZz09") def __init__(self,**kwargs): super(ghetto, self).__init__(**kwargs) self.cols = 2 self.btn1 = Button(text='MATEMATİK') self.btn1.bind(on_press=self.matCallback) self.btn2 = Button(text='KİMYA') self.btn2.bind(on_press=self.kimyaCallback) self.btn3 = Button(text='BİYOLOJİ') self.btn3.bind(on_press=self.biyoCallback) self.btn4 = Button(text='FELSEFE') self.btn4.bind(on_press=self.felCallback) self.btn6 = Button(text='EDEBİYAT') self.btn6.bind(on_press=self.edebCallback) self.btn7 = Button(text='BİLİŞİM') self.btn7.bind(on_press=self.bilisiCallback) self.btn5 = Button(text='TARİH') self.btn5.bind(on_press=self.tarihCallback) self.btn8 = Button(text='MÜZİK') self.btn8.bind(on_press=self.muzCallback) self.btn9 = Button(text='İNGİLİZCE') self.btn9.bind(on_press=self.ingCallback) self.btn10 = Button(text='COĞRAFYA') self.btn10.bind(on_press=self.cogCallback) self.add_widget(self.btn10) self.add_widget(self.btn1) self.add_widget(self.btn2) self.add_widget(self.btn3) self.add_widget(self.btn4) self.add_widget(self.btn5) self.add_widget(self.btn6) self.add_widget(self.btn7) self.add_widget(self.btn8) self.add_widget(self.btn9) class main(App): def build(self): return ghetto() if __name__ == "__main__": main().run()
normal
{ "blob_id": "39affe139eec4cf6877646188839d79ed575235c", "index": 8952, "step-1": "<mask token>\n\n\nclass ghetto(GridLayout):\n <mask token>\n\n def biyoCallback(self, a):\n webbrowser.open_new(\n 'https://us04web.zoom.us/j/8651192984?pwd=cFV0bUNPTXRUOGVPZWw4dEhDQm0vUT09'\n )\n\n def edebCallback(self, a):\n webbrowser.open_new(\n 'https://us04web.zoom.us/j/4724567240?pwd=MzIzam5jcE9MeEkxTkVnR1plVVZ6dz09'\n )\n\n def kimyaCallback(self, a):\n webbrowser.open_new(\n 'https://us04web.zoom.us/j/8080079163?pwd=UitJVWs4Y0dOU2ZjbHMvZUVBQVZXdz09'\n )\n <mask token>\n <mask token>\n\n def bilisiCallback(self, a):\n webbrowser.open_new('https://us02web.zoom.us/j/3469922894')\n\n def muzCallback(self, a):\n webbrowser.open_new(\n 'https://us04web.zoom.us/j/7411417677?pwd=K1A5czBGWWlnRzdBOWs0VEJQaUloUT09'\n )\n\n def ingCallback(self, a):\n webbrowser.open_new(\n 'https://us04web.zoom.us/j/6712002142?pwd=azFMYjljb3lPOVBoTXdYT3FabmpIUT09'\n )\n\n def felCallback(self, a):\n webbrowser.open_new(\n 'https://us04web.zoom.us/j/8358223221?pwd=eTlXcm4vc3RVUnNOSzV0UmhqM1ZEZz09'\n )\n\n def __init__(self, **kwargs):\n super(ghetto, self).__init__(**kwargs)\n self.cols = 2\n self.btn1 = Button(text='MATEMATİK')\n self.btn1.bind(on_press=self.matCallback)\n self.btn2 = Button(text='KİMYA')\n self.btn2.bind(on_press=self.kimyaCallback)\n self.btn3 = Button(text='BİYOLOJİ')\n self.btn3.bind(on_press=self.biyoCallback)\n self.btn4 = Button(text='FELSEFE')\n self.btn4.bind(on_press=self.felCallback)\n self.btn6 = Button(text='EDEBİYAT')\n self.btn6.bind(on_press=self.edebCallback)\n self.btn7 = Button(text='BİLİŞİM')\n self.btn7.bind(on_press=self.bilisiCallback)\n self.btn5 = Button(text='TARİH')\n self.btn5.bind(on_press=self.tarihCallback)\n self.btn8 = Button(text='MÜZİK')\n self.btn8.bind(on_press=self.muzCallback)\n self.btn9 = Button(text='İNGİLİZCE')\n self.btn9.bind(on_press=self.ingCallback)\n self.btn10 = Button(text='COĞRAFYA')\n self.btn10.bind(on_press=self.cogCallback)\n self.add_widget(self.btn10)\n self.add_widget(self.btn1)\n self.add_widget(self.btn2)\n self.add_widget(self.btn3)\n self.add_widget(self.btn4)\n self.add_widget(self.btn5)\n self.add_widget(self.btn6)\n self.add_widget(self.btn7)\n self.add_widget(self.btn8)\n self.add_widget(self.btn9)\n\n\nclass main(App):\n\n def build(self):\n return ghetto()\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass ghetto(GridLayout):\n\n def matCallback(self, a):\n webbrowser.open_new(\n 'https://us05web.zoom.us/j/2688374138?pwd=ekJpMnJsdWkyTWdGcE0zMEZzdjFydz09'\n )\n\n def biyoCallback(self, a):\n webbrowser.open_new(\n 'https://us04web.zoom.us/j/8651192984?pwd=cFV0bUNPTXRUOGVPZWw4dEhDQm0vUT09'\n )\n\n def edebCallback(self, a):\n webbrowser.open_new(\n 'https://us04web.zoom.us/j/4724567240?pwd=MzIzam5jcE9MeEkxTkVnR1plVVZ6dz09'\n )\n\n def kimyaCallback(self, a):\n webbrowser.open_new(\n 'https://us04web.zoom.us/j/8080079163?pwd=UitJVWs4Y0dOU2ZjbHMvZUVBQVZXdz09'\n )\n <mask token>\n <mask token>\n\n def bilisiCallback(self, a):\n webbrowser.open_new('https://us02web.zoom.us/j/3469922894')\n\n def muzCallback(self, a):\n webbrowser.open_new(\n 'https://us04web.zoom.us/j/7411417677?pwd=K1A5czBGWWlnRzdBOWs0VEJQaUloUT09'\n )\n\n def ingCallback(self, a):\n webbrowser.open_new(\n 'https://us04web.zoom.us/j/6712002142?pwd=azFMYjljb3lPOVBoTXdYT3FabmpIUT09'\n )\n\n def felCallback(self, a):\n webbrowser.open_new(\n 'https://us04web.zoom.us/j/8358223221?pwd=eTlXcm4vc3RVUnNOSzV0UmhqM1ZEZz09'\n )\n\n def __init__(self, **kwargs):\n super(ghetto, self).__init__(**kwargs)\n self.cols = 2\n self.btn1 = Button(text='MATEMATİK')\n self.btn1.bind(on_press=self.matCallback)\n self.btn2 = Button(text='KİMYA')\n self.btn2.bind(on_press=self.kimyaCallback)\n self.btn3 = Button(text='BİYOLOJİ')\n self.btn3.bind(on_press=self.biyoCallback)\n self.btn4 = Button(text='FELSEFE')\n self.btn4.bind(on_press=self.felCallback)\n self.btn6 = Button(text='EDEBİYAT')\n self.btn6.bind(on_press=self.edebCallback)\n self.btn7 = Button(text='BİLİŞİM')\n self.btn7.bind(on_press=self.bilisiCallback)\n self.btn5 = Button(text='TARİH')\n self.btn5.bind(on_press=self.tarihCallback)\n self.btn8 = Button(text='MÜZİK')\n self.btn8.bind(on_press=self.muzCallback)\n self.btn9 = Button(text='İNGİLİZCE')\n self.btn9.bind(on_press=self.ingCallback)\n self.btn10 = Button(text='COĞRAFYA')\n self.btn10.bind(on_press=self.cogCallback)\n self.add_widget(self.btn10)\n self.add_widget(self.btn1)\n self.add_widget(self.btn2)\n self.add_widget(self.btn3)\n self.add_widget(self.btn4)\n self.add_widget(self.btn5)\n self.add_widget(self.btn6)\n self.add_widget(self.btn7)\n self.add_widget(self.btn8)\n self.add_widget(self.btn9)\n\n\nclass main(App):\n\n def build(self):\n return ghetto()\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass ghetto(GridLayout):\n\n def matCallback(self, a):\n webbrowser.open_new(\n 'https://us05web.zoom.us/j/2688374138?pwd=ekJpMnJsdWkyTWdGcE0zMEZzdjFydz09'\n )\n\n def biyoCallback(self, a):\n webbrowser.open_new(\n 'https://us04web.zoom.us/j/8651192984?pwd=cFV0bUNPTXRUOGVPZWw4dEhDQm0vUT09'\n )\n\n def edebCallback(self, a):\n webbrowser.open_new(\n 'https://us04web.zoom.us/j/4724567240?pwd=MzIzam5jcE9MeEkxTkVnR1plVVZ6dz09'\n )\n\n def kimyaCallback(self, a):\n webbrowser.open_new(\n 'https://us04web.zoom.us/j/8080079163?pwd=UitJVWs4Y0dOU2ZjbHMvZUVBQVZXdz09'\n )\n\n def tarihCallback(self, a):\n webbrowser.open_new(\n 'https://us04web.zoom.us/j/7045543550?pwd=yPBZGImZndgSF-Mj4JRTaFTq2Oh94Bs'\n )\n\n def cogCallback(self, a):\n webbrowser.open_new(\n 'https://us04web.zoom.us/j/6832847624?pwd=TzhNUzlFNHM2K3FpR09nVHhCaFZPQT09'\n )\n\n def bilisiCallback(self, a):\n webbrowser.open_new('https://us02web.zoom.us/j/3469922894')\n\n def muzCallback(self, a):\n webbrowser.open_new(\n 'https://us04web.zoom.us/j/7411417677?pwd=K1A5czBGWWlnRzdBOWs0VEJQaUloUT09'\n )\n\n def ingCallback(self, a):\n webbrowser.open_new(\n 'https://us04web.zoom.us/j/6712002142?pwd=azFMYjljb3lPOVBoTXdYT3FabmpIUT09'\n )\n\n def felCallback(self, a):\n webbrowser.open_new(\n 'https://us04web.zoom.us/j/8358223221?pwd=eTlXcm4vc3RVUnNOSzV0UmhqM1ZEZz09'\n )\n\n def __init__(self, **kwargs):\n super(ghetto, self).__init__(**kwargs)\n self.cols = 2\n self.btn1 = Button(text='MATEMATİK')\n self.btn1.bind(on_press=self.matCallback)\n self.btn2 = Button(text='KİMYA')\n self.btn2.bind(on_press=self.kimyaCallback)\n self.btn3 = Button(text='BİYOLOJİ')\n self.btn3.bind(on_press=self.biyoCallback)\n self.btn4 = Button(text='FELSEFE')\n self.btn4.bind(on_press=self.felCallback)\n self.btn6 = Button(text='EDEBİYAT')\n self.btn6.bind(on_press=self.edebCallback)\n self.btn7 = Button(text='BİLİŞİM')\n self.btn7.bind(on_press=self.bilisiCallback)\n self.btn5 = Button(text='TARİH')\n self.btn5.bind(on_press=self.tarihCallback)\n self.btn8 = Button(text='MÜZİK')\n self.btn8.bind(on_press=self.muzCallback)\n self.btn9 = Button(text='İNGİLİZCE')\n self.btn9.bind(on_press=self.ingCallback)\n self.btn10 = Button(text='COĞRAFYA')\n self.btn10.bind(on_press=self.cogCallback)\n self.add_widget(self.btn10)\n self.add_widget(self.btn1)\n self.add_widget(self.btn2)\n self.add_widget(self.btn3)\n self.add_widget(self.btn4)\n self.add_widget(self.btn5)\n self.add_widget(self.btn6)\n self.add_widget(self.btn7)\n self.add_widget(self.btn8)\n self.add_widget(self.btn9)\n\n\nclass main(App):\n\n def build(self):\n return ghetto()\n\n\nif __name__ == '__main__':\n main().run()\n", "step-4": "from kivy.uix.button import Button\nfrom kivy.uix.gridlayout import GridLayout\nfrom kivy.uix.floatlayout import FloatLayout\nfrom kivy.uix.label import Label\nfrom kivy.app import App\nimport webbrowser\na = 0.0\nb = '?'\nn = 0.0\nk = ''\ng = ''\n\n\nclass ghetto(GridLayout):\n\n def matCallback(self, a):\n webbrowser.open_new(\n 'https://us05web.zoom.us/j/2688374138?pwd=ekJpMnJsdWkyTWdGcE0zMEZzdjFydz09'\n )\n\n def biyoCallback(self, a):\n webbrowser.open_new(\n 'https://us04web.zoom.us/j/8651192984?pwd=cFV0bUNPTXRUOGVPZWw4dEhDQm0vUT09'\n )\n\n def edebCallback(self, a):\n webbrowser.open_new(\n 'https://us04web.zoom.us/j/4724567240?pwd=MzIzam5jcE9MeEkxTkVnR1plVVZ6dz09'\n )\n\n def kimyaCallback(self, a):\n webbrowser.open_new(\n 'https://us04web.zoom.us/j/8080079163?pwd=UitJVWs4Y0dOU2ZjbHMvZUVBQVZXdz09'\n )\n\n def tarihCallback(self, a):\n webbrowser.open_new(\n 'https://us04web.zoom.us/j/7045543550?pwd=yPBZGImZndgSF-Mj4JRTaFTq2Oh94Bs'\n )\n\n def cogCallback(self, a):\n webbrowser.open_new(\n 'https://us04web.zoom.us/j/6832847624?pwd=TzhNUzlFNHM2K3FpR09nVHhCaFZPQT09'\n )\n\n def bilisiCallback(self, a):\n webbrowser.open_new('https://us02web.zoom.us/j/3469922894')\n\n def muzCallback(self, a):\n webbrowser.open_new(\n 'https://us04web.zoom.us/j/7411417677?pwd=K1A5czBGWWlnRzdBOWs0VEJQaUloUT09'\n )\n\n def ingCallback(self, a):\n webbrowser.open_new(\n 'https://us04web.zoom.us/j/6712002142?pwd=azFMYjljb3lPOVBoTXdYT3FabmpIUT09'\n )\n\n def felCallback(self, a):\n webbrowser.open_new(\n 'https://us04web.zoom.us/j/8358223221?pwd=eTlXcm4vc3RVUnNOSzV0UmhqM1ZEZz09'\n )\n\n def __init__(self, **kwargs):\n super(ghetto, self).__init__(**kwargs)\n self.cols = 2\n self.btn1 = Button(text='MATEMATİK')\n self.btn1.bind(on_press=self.matCallback)\n self.btn2 = Button(text='KİMYA')\n self.btn2.bind(on_press=self.kimyaCallback)\n self.btn3 = Button(text='BİYOLOJİ')\n self.btn3.bind(on_press=self.biyoCallback)\n self.btn4 = Button(text='FELSEFE')\n self.btn4.bind(on_press=self.felCallback)\n self.btn6 = Button(text='EDEBİYAT')\n self.btn6.bind(on_press=self.edebCallback)\n self.btn7 = Button(text='BİLİŞİM')\n self.btn7.bind(on_press=self.bilisiCallback)\n self.btn5 = Button(text='TARİH')\n self.btn5.bind(on_press=self.tarihCallback)\n self.btn8 = Button(text='MÜZİK')\n self.btn8.bind(on_press=self.muzCallback)\n self.btn9 = Button(text='İNGİLİZCE')\n self.btn9.bind(on_press=self.ingCallback)\n self.btn10 = Button(text='COĞRAFYA')\n self.btn10.bind(on_press=self.cogCallback)\n self.add_widget(self.btn10)\n self.add_widget(self.btn1)\n self.add_widget(self.btn2)\n self.add_widget(self.btn3)\n self.add_widget(self.btn4)\n self.add_widget(self.btn5)\n self.add_widget(self.btn6)\n self.add_widget(self.btn7)\n self.add_widget(self.btn8)\n self.add_widget(self.btn9)\n\n\nclass main(App):\n\n def build(self):\n return ghetto()\n\n\nif __name__ == '__main__':\n main().run()\n", "step-5": "from kivy.uix.button import Button\r\nfrom kivy.uix.gridlayout import GridLayout\r\nfrom kivy.uix.floatlayout import FloatLayout\r\nfrom kivy.uix.label import Label\r\nfrom kivy.app import App\r\nimport webbrowser\r\na=0.0\r\nb=\"?\"\r\nn=0.0\r\nk=\"\"\r\ng=\"\"\r\nclass ghetto(GridLayout):\r\n def matCallback(self,a):\r\n webbrowser.open_new(\"https://us05web.zoom.us/j/2688374138?pwd=ekJpMnJsdWkyTWdGcE0zMEZzdjFydz09\")\r\n def biyoCallback(self,a):\r\n webbrowser.open_new(\"https://us04web.zoom.us/j/8651192984?pwd=cFV0bUNPTXRUOGVPZWw4dEhDQm0vUT09\")\r\n def edebCallback(self,a):\r\n webbrowser.open_new(\"https://us04web.zoom.us/j/4724567240?pwd=MzIzam5jcE9MeEkxTkVnR1plVVZ6dz09\")\r\n def kimyaCallback(self,a):\r\n webbrowser.open_new(\"https://us04web.zoom.us/j/8080079163?pwd=UitJVWs4Y0dOU2ZjbHMvZUVBQVZXdz09\")\r\n def tarihCallback(self,a):\r\n webbrowser.open_new(\"https://us04web.zoom.us/j/7045543550?pwd=yPBZGImZndgSF-Mj4JRTaFTq2Oh94Bs\")\r\n def cogCallback(self,a):\r\n webbrowser.open_new(\"https://us04web.zoom.us/j/6832847624?pwd=TzhNUzlFNHM2K3FpR09nVHhCaFZPQT09\")\r\n def bilisiCallback(self,a):\r\n webbrowser.open_new(\"https://us02web.zoom.us/j/3469922894\")\r\n def muzCallback(self,a):\r\n webbrowser.open_new(\"https://us04web.zoom.us/j/7411417677?pwd=K1A5czBGWWlnRzdBOWs0VEJQaUloUT09\")\r\n def ingCallback(self,a):\r\n webbrowser.open_new(\"https://us04web.zoom.us/j/6712002142?pwd=azFMYjljb3lPOVBoTXdYT3FabmpIUT09\")\r\n def felCallback(self,a):\r\n webbrowser.open_new(\"https://us04web.zoom.us/j/8358223221?pwd=eTlXcm4vc3RVUnNOSzV0UmhqM1ZEZz09\")\r\n\r\n \r\n \r\n def __init__(self,**kwargs):\r\n super(ghetto, self).__init__(**kwargs)\r\n self.cols = 2\r\n self.btn1 = Button(text='MATEMATİK')\r\n self.btn1.bind(on_press=self.matCallback)\r\n self.btn2 = Button(text='KİMYA')\r\n self.btn2.bind(on_press=self.kimyaCallback)\r\n self.btn3 = Button(text='BİYOLOJİ')\r\n self.btn3.bind(on_press=self.biyoCallback)\r\n self.btn4 = Button(text='FELSEFE')\r\n self.btn4.bind(on_press=self.felCallback)\r\n self.btn6 = Button(text='EDEBİYAT')\r\n self.btn6.bind(on_press=self.edebCallback)\r\n self.btn7 = Button(text='BİLİŞİM')\r\n self.btn7.bind(on_press=self.bilisiCallback)\r\n self.btn5 = Button(text='TARİH')\r\n self.btn5.bind(on_press=self.tarihCallback)\r\n self.btn8 = Button(text='MÜZİK')\r\n self.btn8.bind(on_press=self.muzCallback)\r\n self.btn9 = Button(text='İNGİLİZCE')\r\n self.btn9.bind(on_press=self.ingCallback)\r\n self.btn10 = Button(text='COĞRAFYA')\r\n self.btn10.bind(on_press=self.cogCallback)\r\n self.add_widget(self.btn10)\r\n self.add_widget(self.btn1)\r\n self.add_widget(self.btn2)\r\n self.add_widget(self.btn3)\r\n self.add_widget(self.btn4)\r\n self.add_widget(self.btn5)\r\n self.add_widget(self.btn6)\r\n self.add_widget(self.btn7)\r\n self.add_widget(self.btn8)\r\n self.add_widget(self.btn9)\r\n \r\n \r\n\r\nclass main(App):\r\n def build(self):\r\n return ghetto()\r\n\r\nif __name__ == \"__main__\":\r\n main().run()\r\n", "step-ids": [ 11, 12, 15, 17, 18 ] }
[ 11, 12, 15, 17, 18 ]
from packer.utils import hello_world
normal
{ "blob_id": "d549303228e860ae278a5a9497a4a3a68989aeca", "index": 6097, "step-1": "<mask token>\n", "step-2": "from packer.utils import hello_world\n", "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0, 1 ] }
[ 0, 1 ]
from django.db import models # db에 있는 models을 가져옴 from django.utils import timezone # 유틸에 있는 timezone을 가져옴 # Create your models here. class Post(models.Model): # Post라는 객체를 정의함 인수로 장고모델을 가져왔음 # 장고모델이기 때문에 데이터베이스에 저장된다. author = models.ForeignKey('auth.User') # 외래키, 다른 객체에 대한 링크 title = models.CharField(max_length=200) # 글자수 제한 text = models.TextField() # 글자수제한없음 created_date = models.DateTimeField(default=timezone.now) # Date형식 published_date = models.DateTimeField(blank=True, null=True) def publish(self): # 파이썬의 메소드 self.published_date = timezone.now() self.save() def __str__(self): return self.title class User(models.Model): id = models.CharField(max_length=30, primary_key='true') password = models.CharField(max_length=50) reg_date = models.DateField(default=timezone.now) upt_date = models.DateField(default=timezone.now) last_pwd = models.CharField(max_length=50) def chg_password(self): self.last_pwd = self.password self.save() def __id__(self): return self.id
normal
{ "blob_id": "3aa8c9b39174f0ed5799d6991516b34ca669b7d6", "index": 9765, "step-1": "<mask token>\n\n\nclass Post(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return self.title\n\n\nclass User(models.Model):\n id = models.CharField(max_length=30, primary_key='true')\n password = models.CharField(max_length=50)\n reg_date = models.DateField(default=timezone.now)\n upt_date = models.DateField(default=timezone.now)\n last_pwd = models.CharField(max_length=50)\n\n def chg_password(self):\n self.last_pwd = self.password\n self.save()\n\n def __id__(self):\n return self.id\n", "step-2": "<mask token>\n\n\nclass Post(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def publish(self):\n self.published_date = timezone.now()\n self.save()\n\n def __str__(self):\n return self.title\n\n\nclass User(models.Model):\n id = models.CharField(max_length=30, primary_key='true')\n password = models.CharField(max_length=50)\n reg_date = models.DateField(default=timezone.now)\n upt_date = models.DateField(default=timezone.now)\n last_pwd = models.CharField(max_length=50)\n\n def chg_password(self):\n self.last_pwd = self.password\n self.save()\n\n def __id__(self):\n return self.id\n", "step-3": "<mask token>\n\n\nclass Post(models.Model):\n author = models.ForeignKey('auth.User')\n title = models.CharField(max_length=200)\n text = models.TextField()\n created_date = models.DateTimeField(default=timezone.now)\n published_date = models.DateTimeField(blank=True, null=True)\n\n def publish(self):\n self.published_date = timezone.now()\n self.save()\n\n def __str__(self):\n return self.title\n\n\nclass User(models.Model):\n id = models.CharField(max_length=30, primary_key='true')\n password = models.CharField(max_length=50)\n reg_date = models.DateField(default=timezone.now)\n upt_date = models.DateField(default=timezone.now)\n last_pwd = models.CharField(max_length=50)\n\n def chg_password(self):\n self.last_pwd = self.password\n self.save()\n\n def __id__(self):\n return self.id\n", "step-4": "from django.db import models\nfrom django.utils import timezone\n\n\nclass Post(models.Model):\n author = models.ForeignKey('auth.User')\n title = models.CharField(max_length=200)\n text = models.TextField()\n created_date = models.DateTimeField(default=timezone.now)\n published_date = models.DateTimeField(blank=True, null=True)\n\n def publish(self):\n self.published_date = timezone.now()\n self.save()\n\n def __str__(self):\n return self.title\n\n\nclass User(models.Model):\n id = models.CharField(max_length=30, primary_key='true')\n password = models.CharField(max_length=50)\n reg_date = models.DateField(default=timezone.now)\n upt_date = models.DateField(default=timezone.now)\n last_pwd = models.CharField(max_length=50)\n\n def chg_password(self):\n self.last_pwd = self.password\n self.save()\n\n def __id__(self):\n return self.id\n", "step-5": "from django.db import models # db에 있는 models을 가져옴\nfrom django.utils import timezone # 유틸에 있는 timezone을 가져옴\n\n\n# Create your models here.\n\nclass Post(models.Model):\n # Post라는 객체를 정의함 인수로 장고모델을 가져왔음\n # 장고모델이기 때문에 데이터베이스에 저장된다.\n author = models.ForeignKey('auth.User') # 외래키, 다른 객체에 대한 링크\n title = models.CharField(max_length=200) # 글자수 제한\n text = models.TextField() # 글자수제한없음\n created_date = models.DateTimeField(default=timezone.now) # Date형식\n published_date = models.DateTimeField(blank=True, null=True)\n\n def publish(self): # 파이썬의 메소드\n self.published_date = timezone.now()\n self.save()\n\n def __str__(self):\n return self.title\n\n\nclass User(models.Model):\n id = models.CharField(max_length=30, primary_key='true')\n password = models.CharField(max_length=50)\n reg_date = models.DateField(default=timezone.now)\n upt_date = models.DateField(default=timezone.now)\n last_pwd = models.CharField(max_length=50)\n\n def chg_password(self):\n self.last_pwd = self.password\n self.save()\n\n def __id__(self):\n return self.id\n\n", "step-ids": [ 6, 7, 8, 9, 10 ] }
[ 6, 7, 8, 9, 10 ]
""" Problem Link: https://practice.geeksforgeeks.org/problems/palindrome/0 Given an integer, check whether it is a palindrome or not. Input: The first line of input contains an integer T denoting the number of test cases. For each test case there will be single line containing single integer N. Output: Print "Yes" or "No" (without quotes) depending on whether the number is palindrome or not. Constraints: 1 <= T <= 1000 1 <= N <= 10000 Example: Input: 3 6 167 55555 Output: Yes No Yes """ for _ in range(int(input())): n = int(input()) temp = n rev = 0 while temp: rev = (rev*10)+(temp%10) temp //= 10 print("Yes" if rev == n else "No")
normal
{ "blob_id": "ea12ede51881f6e826a044df5d7aba457c434658", "index": 6050, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor _ in range(int(input())):\n n = int(input())\n temp = n\n rev = 0\n while temp:\n rev = rev * 10 + temp % 10\n temp //= 10\n print('Yes' if rev == n else 'No')\n", "step-3": "\"\"\"\nProblem Link: https://practice.geeksforgeeks.org/problems/palindrome/0\n\nGiven an integer, check whether it is a palindrome or not.\n\nInput:\nThe first line of input contains an integer T denoting the number of test cases. \nFor each test case there will be single line containing single integer N.\n\nOutput:\nPrint \"Yes\" or \"No\" (without quotes) depending on whether the number is palindrome or not.\n\nConstraints:\n1 <= T <= 1000\n1 <= N <= 10000\n\nExample:\nInput:\n3\n6\n167\n55555\n\nOutput:\nYes\nNo\nYes\n\"\"\"\nfor _ in range(int(input())):\n n = int(input())\n temp = n\n rev = 0\n while temp:\n rev = (rev*10)+(temp%10)\n temp //= 10\n print(\"Yes\" if rev == n else \"No\")", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
import requests rsp = requests.get( 'https://api.weixin.qq.com/cgi-bin/token?grant_type=client_credential&appid=%s&secret=%s' % ('wx27c0e6ef6a7f0716', '6e29e232daf462652f66ee8acc11838b')) print(rsp.text)
normal
{ "blob_id": "d86fe165e378e56650e3b76bf3d0f72e2a50a023", "index": 5082, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint(rsp.text)\n", "step-3": "<mask token>\nrsp = requests.get(\n 'https://api.weixin.qq.com/cgi-bin/token?grant_type=client_credential&appid=%s&secret=%s'\n % ('wx27c0e6ef6a7f0716', '6e29e232daf462652f66ee8acc11838b'))\nprint(rsp.text)\n", "step-4": "import requests\nrsp = requests.get(\n 'https://api.weixin.qq.com/cgi-bin/token?grant_type=client_credential&appid=%s&secret=%s'\n % ('wx27c0e6ef6a7f0716', '6e29e232daf462652f66ee8acc11838b'))\nprint(rsp.text)\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
#!/usr/bin/env python import sys import struct import Queue import logging import redis logging.getLogger("scapy.runtime").setLevel(logging.ERROR) from threading import Thread from scapy.all import sniff, sendp, hexdump, get_if_list, get_if_hwaddr from scapy.all import Packet, IPOption from scapy.all import PacketListField, ShortField, IntField, LongField, BitField, FieldListField, FieldLenField, ByteField from scapy.all import Dot1Q, IP, UDP, Raw from scapy.layers.inet import _IPOption_HDR from check.verification import Verification NUM = 0 redis_session = redis.Redis(host='localhost') def get_if(): ifs=get_if_list() iface=None for i in get_if_list(): if "enp0s8" in i: iface=i break; if not iface: print "Cannot find enp0s8 interface" exit(1) return iface class SwitchTrace(Packet): fields_desc = [ BitField("swid", 0x0, 6), BitField("inport", 0x0, 6), BitField("rule", 0x0, 20)] def extract_padding(self, p): return "", p class IVPOption_MRI(IPOption): name = "MRI" option = 31 fields_desc = [ _IPOption_HDR, FieldLenField("length", None, fmt="B", length_of="swtraces", adjust=lambda pkt,l:l*2+4), ShortField("count", 0), PacketListField("swtraces", [], SwitchTrace, count_from=lambda pkt:(pkt.count*1)) ] def check_packet(queue): while True: path = queue.get() verif = Verification() verif_path = verif.verif_packet(path) def handle_pkt(pkt, q): #pkt.show2() global NUM count = 0 path = [] vlanid = pkt[Dot1Q].vlan while (count < pkt['MRI'].count): swid = pkt['MRI'].swtraces[count].swid inport = pkt['MRI'].swtraces[count].inport ruleid = pkt['MRI'].swtraces[count].rule dst_ip = pkt['IP'].dst path.insert(0, [dst_ip, swid, inport, ruleid]) count = count + 1 NUM = NUM + 1 q.put([path, NUM, len(path), vlanid]) print("Path %i: %s and vlan ID: %d" % (NUM, path, vlanid)) sys.stdout.flush() def main(): q = Queue.Queue(maxsize=0) workers = 5 for i in range(workers): thread = Thread(target=check_packet, args=(q, )) thread.setDaemon(True) thread.start() iface = 'enp0s8' print 'Path Format [vlanID, [dst_ip, swID, inport, ruleID], ...]\n' sys.stdout.flush() try: sniff(filter='', iface = iface, prn = lambda x: handle_pkt(x, q)) finally: for key in redis_session.scan_iter("s*"): redis_session.delete(key) if __name__ == '__main__': main()
normal
{ "blob_id": "e4ecc1746e907f11936683384e1edb34dd637de7", "index": 8171, "step-1": "#!/usr/bin/env python\nimport sys\nimport struct\nimport Queue\nimport logging\nimport redis\nlogging.getLogger(\"scapy.runtime\").setLevel(logging.ERROR)\n\nfrom threading import Thread\nfrom scapy.all import sniff, sendp, hexdump, get_if_list, get_if_hwaddr\nfrom scapy.all import Packet, IPOption\nfrom scapy.all import PacketListField, ShortField, IntField, LongField, BitField, FieldListField, FieldLenField, ByteField\nfrom scapy.all import Dot1Q, IP, UDP, Raw\nfrom scapy.layers.inet import _IPOption_HDR\nfrom check.verification import Verification\n\nNUM = 0\nredis_session = redis.Redis(host='localhost')\n\n\ndef get_if():\n ifs=get_if_list()\n iface=None\n for i in get_if_list():\n if \"enp0s8\" in i:\n iface=i\n break;\n if not iface:\n print \"Cannot find enp0s8 interface\"\n exit(1)\n return iface\n\n\nclass SwitchTrace(Packet):\n fields_desc = [ BitField(\"swid\", 0x0, 6),\n BitField(\"inport\", 0x0, 6),\n BitField(\"rule\", 0x0, 20)]\n\n def extract_padding(self, p):\n return \"\", p\n\n\nclass IVPOption_MRI(IPOption):\n name = \"MRI\"\n option = 31\n fields_desc = [ _IPOption_HDR,\n FieldLenField(\"length\", None, fmt=\"B\",\n length_of=\"swtraces\",\n adjust=lambda pkt,l:l*2+4),\n ShortField(\"count\", 0),\n PacketListField(\"swtraces\",\n [],\n SwitchTrace,\n count_from=lambda pkt:(pkt.count*1)) ]\n\n\ndef check_packet(queue):\n while True:\n path = queue.get()\n verif = Verification()\n verif_path = verif.verif_packet(path)\n\n\ndef handle_pkt(pkt, q):\n #pkt.show2()\n global NUM\n count = 0\n path = []\n vlanid = pkt[Dot1Q].vlan\n while (count < pkt['MRI'].count):\n swid = pkt['MRI'].swtraces[count].swid\n inport = pkt['MRI'].swtraces[count].inport\n ruleid = pkt['MRI'].swtraces[count].rule\n dst_ip = pkt['IP'].dst\n path.insert(0, [dst_ip, swid, inport, ruleid])\n count = count + 1\n NUM = NUM + 1\n q.put([path, NUM, len(path), vlanid])\n print(\"Path %i: %s and vlan ID: %d\" % (NUM, path, vlanid))\n sys.stdout.flush()\n\ndef main():\n q = Queue.Queue(maxsize=0)\n workers = 5\n\n for i in range(workers):\n thread = Thread(target=check_packet, args=(q, ))\n thread.setDaemon(True)\n thread.start()\n\n iface = 'enp0s8'\n print 'Path Format [vlanID, [dst_ip, swID, inport, ruleID], ...]\\n'\n sys.stdout.flush()\n try:\n sniff(filter='', iface = iface, prn = lambda x: handle_pkt(x, q))\n finally:\n for key in redis_session.scan_iter(\"s*\"):\n redis_session.delete(key)\n\n\nif __name__ == '__main__':\n main()\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
import pymysql from app_module.models import User, Vehicle, Address, Customer, Location, Coupon, VehicleClass, Corporation, Corporate from datetime import datetime HOSTNAME = 'localhost' USERNAME = 'root' PASSWORD = '123456' DATABASE = 'proj_p2' def get_connection(): my_sql_connection = pymysql.connect(host=HOSTNAME, user=USERNAME, passwd=PASSWORD, db=DATABASE) return my_sql_connection def run_query(query, args=None): conn = get_connection() cur = conn.cursor() cur.execute(query, args) rs = cur.fetchall() if (len(rs) != 0): return rs conn.commit() cur.close() conn.close() def insert_address(address_obj): run_query('''insert into zlrz_address (state, city, street, zipcode) values (%s, %s, %s, %s)''' , (address_obj.state, address_obj.city, address_obj.street, int(address_obj.zipcode))) rs = run_query('''select * from zlrz_address where state = %s and city = %s and street=%s and zipcode=%s''' , (address_obj.state, address_obj.city, address_obj.street, int(address_obj.zipcode))) return rs[0][0] def insert_customer(customer_obj): run_query('''insert into zlrz_customer (cust_type, firstname, lastname, cust_email, cust_phonenum, addr_id, username, password) values (%s, %s, %s, %s, %s, %s, %s, %s) ''' , (customer_obj.cust_type, customer_obj.first_name, customer_obj.last_name, customer_obj.cust_email, customer_obj.cust_phonenum, customer_obj.address_id, customer_obj.username, customer_obj.password)) rs = run_query( '''select * from zlrz_customer where firstname = %s and lastname = %s and cust_email = %s and cust_phonenum = %s order by cust_id desc''' , (customer_obj.first_name, customer_obj.last_name, customer_obj.cust_email, customer_obj.cust_phonenum)) return rs[0][0] def insert_vehicle(vehicle_obj): run_query('''insert into zlrz_vehicle (veh_make, veh_model, veh_year, veh_vin, veh_license, vc_num, ol_id) values (%s, %s, %s, %s, %s, %s, %s) ''' , ( vehicle_obj.make, vehicle_obj.model, int(vehicle_obj.year), vehicle_obj.vin_num, vehicle_obj.license_num, vehicle_obj.class_num, vehicle_obj.location_id)) rs = run_query('''select * from zlrz_vehicle where veh_make = %s and veh_model = %s and veh_year = %s and veh_vin = %s and veh_license = %s and vc_num = %s and ol_id = %s ''' , (vehicle_obj.make, vehicle_obj.model, int(vehicle_obj.year), vehicle_obj.vin_num, vehicle_obj.license_num, vehicle_obj.class_num, vehicle_obj.location_id)) return rs[0][0] def insert_vehicle_class(class_obj): run_query('''insert into zlrz_vehicle_class (vc_name, vc_rateperday, vc_feeovermile) values (%s, %s, %s)''' , (class_obj.vc_name, int(class_obj.vc_rateperday), int(class_obj.vc_feeovermile))) rs = run_query('''select * from zlrz_vehicle_class where vc_name = %s and vc_rateperday = %s and vc_feeovermile = %s ''' , (class_obj.vc_name, int(class_obj.vc_rateperday), int(class_obj.vc_feeovermile))) return rs[0][0] def insert_office_location(location_obj): run_query('''insert into zlrz_office_location (ol_phonenum, ol_state, ol_city, ol_street, ol_zipcode) values (%s, %s, %s, %s, %s) ''' , (location_obj.phone, location_obj.state, location_obj.city, location_obj.street, int(location_obj.zipcode))) rs = run_query('''select * from zlrz_office_location where ol_phonenum = %s and ol_state = %s and ol_city = %s and ol_street=%s and ol_zipcode=%s ''' , (location_obj.phone, location_obj.state, location_obj.city, location_obj.street, int(location_obj.zipcode))) return rs[0][0] def insert_corporation(corp_obj): run_query('''insert into zlrz_corporation (corp_name, corp_regnum) values (%s, %s)''' , (corp_obj.corp_name, corp_obj.corp_regnum)) rs = run_query('''select * from zlrz_corporation where corp_name = %s and corp_regnum = %s''' , (corp_obj.corp_name, corp_obj.corp_regnum)) return rs[0][0] def insert_corporate(corporate_obj): run_query('''insert into zlrz_corporate (cust_id, employee_id, corp_id, cust_type) values (%s, %s, %s, %s)''' , (corporate_obj.cust_id, corporate_obj.employee_id, corporate_obj.corp_id, corporate_obj.cust_type)) rs = run_query( '''select * from zlrz_corporate where cust_id = %s and employee_id = %s and corp_id = %s and cust_type = %s''' , (corporate_obj.cust_id, corporate_obj.employee_id, corporate_obj.corp_id, corporate_obj.cust_type)) return rs[0][0] def insert_individual(individual_obj): run_query( '''insert into zlrz_individual (cust_id, cust_driverlicnum, cust_insurcompname, cust_insurpolnum, cust_type) values (%s, %s, %s, %s, %s)''' , (individual_obj.cust_id, individual_obj.cust_driverlicnum, individual_obj.cust_insurcompname, individual_obj.cust_insurpolnum, individual_obj.cust_type)) rs = run_query( '''select * from zlrz_individual where cust_id = %s and cust_driverlicnum = %s and cust_insurcompname = %s and cust_insurpolnum = %s and cust_type = %s''' , (individual_obj.cust_id, individual_obj.cust_driverlicnum, individual_obj.cust_insurcompname, individual_obj.cust_insurpolnum, individual_obj.cust_type)) return rs[0][0] def insert_invoice(invoice_obj): run_query('''insert into zlrz_invoice (inv_date, inv_amount) values (%s, %s) ''' , (invoice_obj.inv_date, invoice_obj.inv_amount)) rs = run_query('''select * from zlrz_invoice where inv_date = %s and inv_amount = %s''' , (invoice_obj.inv_date, invoice_obj.inv_amount)) return rs[0][0] def insert_payment(payment_obj): run_query('''insert into zlrz_payment (pay_date, pay_method, pay_cardnum, inv_id, pay_amount) values (%s, %s , %s , %s , %s) ''' , (payment_obj.pay_date, payment_obj.pay_method, payment_obj.pay_cardnum, payment_obj.inv_id , payment_obj.pay_amount)) rs = run_query('''select * from zlrz_payment where pay_date=%s and pay_method=%s and pay_cardnum=%s and inv_id=%s and pay_amount=%s''' , (payment_obj.pay_date, payment_obj.pay_method, payment_obj.pay_cardnum, payment_obj.inv_id , payment_obj.pay_amount)) return rs[0][0] def insert_rental(rental_obj): run_query('''insert into zlrz_rental (ren_pickupdate, ren_dropoffdate, ren_startodometer, ren_endodometer , ren_dailylimit, cust_id, cust_type, veh_id, ren_pickuplocid, ren_dropoffloc_id, inv_id, cou_id) values (%s, %s , %s , %s , %s, %s, %s, %s, %s, %s, %s, %s) ''' , (rental_obj.ren_pickupdate, rental_obj.ren_dropoffdate, rental_obj.ren_startodometer , rental_obj.ren_endodometer, rental_obj.ren_dailylimit, rental_obj.cust_id , rental_obj.cust_type, rental_obj.veh_id, rental_obj.ren_pickuplocid, rental_obj.ren_dropoffloc_id , rental_obj.inv_id, rental_obj.cou_id)) rs = run_query('''select * from zlrz_rental where ren_pickupdate=%s and ren_dropoffdate=%s and ren_startodometer=%s and ren_endodometer=%s and ren_dailylimit=%s and cust_id=%s and cust_type=%s and veh_id=%s and ren_pickuplocid=%s and ren_dropoffloc_id=%s and inv_id=%s and cou_id=%s''' , (rental_obj.ren_pickupdate, rental_obj.ren_dropoffdate, rental_obj.ren_startodometer , rental_obj.ren_endodometer, rental_obj.ren_dailylimit, rental_obj.cust_id , rental_obj.cust_type, rental_obj.veh_id, rental_obj.ren_pickuplocid, rental_obj.ren_dropoffloc_id , rental_obj.inv_id, rental_obj.cou_id)) return rs[0][0] def insert_coupon(coupon_obj): run_query('''insert into zlrz_coupons (cou_rate, validstart, validend) values (%s, %s, %s) ''' , (coupon_obj.cou_rate, coupon_obj.validstart, coupon_obj.validend)) if coupon_obj.validstart and coupon_obj.validend: rs = run_query( '''select * from zlrz_coupons where cou_rate = %s and validstart = %s and validend = %s order by cou_id desc''' , (coupon_obj.cou_rate, coupon_obj.validstart, coupon_obj.validend)) else: rs = run_query( '''select * from zlrz_coupons where cou_rate = %s and validstart is null and validend is null order by cou_id desc''' , (coupon_obj.cou_rate)) return rs[0][0] def insert_cust_coupon(cust_coupon_obj): run_query('''insert into zlrz_cust_coupon (cou_id, cust_id, cust_type, coupon_type) values (%s, %s, %s, %s) ''' , (cust_coupon_obj.cou_id, cust_coupon_obj.cust_id, cust_coupon_obj.cust_type, cust_coupon_obj.coupon_type)) return def get_password(username): rs = run_query('''select password from zlrz_customer where username = %s''', (username,)) return rs[0][0] if rs is not None else rs def get_user_type(username): rs = run_query('''select cust_type from zlrz_customer where username = %s''', (username,)) return rs[0][0] if rs is not None else rs def get_user_id(username): rs = run_query('''select cust_id from zlrz_customer where username = %s''', (username,)) return rs[0][0] if rs is not None else rs def get_all_corporations(): rs = run_query('''select * from zlrz_corporation''') return [] if rs is None else list(map(lambda t: Corporation(t[1], t[2], t[0]), rs)) def get_cust_coupon(cust_id): rs = run_query('''select zlrz_coupons.* from zlrz_cust_coupon join zlrz_coupons on zlrz_cust_coupon.cou_id = zlrz_coupons.cou_id where zlrz_cust_coupon.cust_id = %s''', (cust_id)) return [] if rs is None else list(map(lambda t: Coupon(t[1], t[2], t[3], t[0]), rs)) def get_coupon(cust_id): rs = run_query('''select zlrz_coupons.* from zlrz_cust_coupon join zlrz_coupons on zlrz_cust_coupon.cou_id = zlrz_coupons.cou_id where zlrz_cust_coupon.cust_id = %s''' , (cust_id,)) res = None maxrate = float('-inf') if rs is not None: coupons = list(map(lambda t: Coupon(t[1], t[2], t[3], t[0]), rs)) for cou in coupons: if cou.validstart and cou.validend: if (datetime.now() - cou.validstart).days >= 0 and (cou.validend - datetime.now()).days >= 0: if cou.cou_rate > maxrate: maxrate = cou.cou_rate res = cou if not cou.validstart and not cou.validend: if cou.cou_rate > maxrate: maxrate = cou.cou_rate res = cou return res def get_vehicles(): """ Get full location :return: """ rs = run_query('''select * from zlrz_vehicle''') return [] if rs is None else rs def get_all_customers(): rs = run_query('''select * from zlrz_customer''') return [] if rs is None else list(map(lambda t: Customer(t[1], t[2], t[3], t[4], t[5], t[6], t[7], t[8], t[0]), rs)) def get_all_corporate(): rs = run_query('''select * from zlrz_corporate''') return [] if rs is None else list(map(lambda t: Corporate(t[0], t[1], t[2], t[3]), rs)) def get_all_individual(): rs = run_query('''select * from zlrz_individual''') return [] if rs is None else list(map(lambda t: Corporate(t[0], t[1], t[2], t[3], t[4]), rs)) def get_all_vehicles(): rs = run_query('''select * from zlrz_vehicle''') return [] if rs is None else list(map(lambda t: Vehicle(t[1], t[2], t[3], t[4], t[5], t[6], t[7], t[0]), rs)) def get_all_locations(): """ Get all location objects :return: """ rs = run_query('''select * from zlrz_office_location''') return [] if rs is None else list(map(lambda t: Location(t[1], t[2], t[3], t[4], t[5], t[0]), rs)) def get_location_by_id(location_id): """ Get all location objects :return: """ rs = run_query('''select * from zlrz_office_location where ol_id = %s''', (location_id,)) return list(map(lambda t: Location(t[1], t[2], t[3], t[4], t[5], t[0]), rs))[0] if rs is not None else None def get_all_vehclasses(): """ Get all vehicleclass objects :return: """ rs = run_query('''select * from zlrz_vehicle_class''') return [] if rs is None else list(map(lambda t: VehicleClass(t[1], t[2], t[3], t[0]), rs)) def get_vehicle_by_id(vehicle_id): rs = run_query('''select * from zlrz_vehicle where veh_id=%s''', (int(vehicle_id),)) return list(map(lambda t: Vehicle(t[1], t[2], t[3], t[4], t[5], t[6], t[7], t[0]), rs))[0] \ if rs is not None else None def get_vehicle_class(vehicle_id): rs = run_query('''select zlrz_vehicle_class.* from zlrz_vehicle join zlrz_vehicle_class on zlrz_vehicle.vc_num = zlrz_vehicle_class.vc_num where zlrz_vehicle.veh_id=%s''', (int(vehicle_id),)) return list(map(lambda t: VehicleClass(t[1], t[2], t[3], t[0]), rs))[0] if rs is not None else None def delete_veh_class(vc_num): if vc_num == '': return res = run_query('''select * from zlrz_vehicle where vc_num=%s''', (int(vc_num))) if res: return 1 else: rs = run_query('''delete from zlrz_vehicle_class where vc_num=%s''', (int(vc_num))) return rs def delete_off_loc(location_id): if location_id == '': return res = run_query('''select * from zlrz_office_location where ol_id=%s''', (int(location_id))) if res: return 1 else: rs = run_query('''delete from zlrz_office_location where ol_id=%s''', (int(location_id))) return rs def delete_vehicle(veh_id): if veh_id == '': return rs = run_query('''delete from zlrz_vehicle where veh_id=%s''', (int(veh_id))) return rs def delete_customer(cust_id): if cust_id == '': return rs5 = run_query('''delete from zlrz_rental where cust_id=%s''', (int(cust_id))) rs4 = run_query('''delete from zlrz_cust_coupon where cust_id=%s''', (int(cust_id))) rs2 = run_query('''delete from zlrz_corporate where cust_id=%s''', (int(cust_id))) rs3 = run_query('''delete from zlrz_individual where cust_id=%s''', (int(cust_id))) rs1 = run_query('''delete from zlrz_customer where cust_id=%s''', (int(cust_id))) return rs1 def delete_cust_coupon(cou_id): if cou_id == '': return rs1 = run_query('''delete from zlrz_cust_coupon where cou_id=%s''', (int(cou_id))) rs2 = run_query('''delete from zlrz_coupons where cou_id=%s''', (int(cou_id))) return rs1 def delete_corporation(corp_id): if corp_id == '': return res = run_query('''select * from zlrz_corporation where corp_id=%s''', (int(corp_id))) if res: return 1 else: rs = run_query('''delete from zlrz_corporation where corp_id=%s''', (int(corp_id))) return rs def update_vehicle_class(class_obj): rs = run_query('''update zlrz_vehicle_class set vc_rateperday = %s, vc_feeovermile = %s where vc_name = %s''', (int(class_obj.vc_rateperday), int(class_obj.vc_feeovermile), class_obj.vc_name)) return rs
normal
{ "blob_id": "62bad8eeb3b51a5012dad761a60639d36429d8e8", "index": 7660, "step-1": "<mask token>\n\n\ndef run_query(query, args=None):\n conn = get_connection()\n cur = conn.cursor()\n cur.execute(query, args)\n rs = cur.fetchall()\n if len(rs) != 0:\n return rs\n conn.commit()\n cur.close()\n conn.close()\n\n\ndef insert_address(address_obj):\n run_query(\n 'insert into zlrz_address (state, city, street, zipcode) values (%s, %s, %s, %s)'\n , (address_obj.state, address_obj.city, address_obj.street, int(\n address_obj.zipcode)))\n rs = run_query(\n 'select * from zlrz_address where state = %s and city = %s and street=%s and zipcode=%s'\n , (address_obj.state, address_obj.city, address_obj.street, int(\n address_obj.zipcode)))\n return rs[0][0]\n\n\ndef insert_customer(customer_obj):\n run_query(\n \"\"\"insert into zlrz_customer (cust_type, firstname, lastname, cust_email, cust_phonenum, addr_id, \n username, password) values (%s, %s, %s, %s, %s, %s, %s, %s) \"\"\"\n , (customer_obj.cust_type, customer_obj.first_name, customer_obj.\n last_name, customer_obj.cust_email, customer_obj.cust_phonenum,\n customer_obj.address_id, customer_obj.username, customer_obj.password))\n rs = run_query(\n 'select * from zlrz_customer where firstname = %s and lastname = %s and cust_email = %s and cust_phonenum = %s order by cust_id desc'\n , (customer_obj.first_name, customer_obj.last_name, customer_obj.\n cust_email, customer_obj.cust_phonenum))\n return rs[0][0]\n\n\ndef insert_vehicle(vehicle_obj):\n run_query(\n \"\"\"insert into zlrz_vehicle (veh_make, veh_model, veh_year, veh_vin, veh_license, vc_num, ol_id) values \n (%s, %s, %s, %s, %s, %s, %s) \"\"\"\n , (vehicle_obj.make, vehicle_obj.model, int(vehicle_obj.year),\n vehicle_obj.vin_num, vehicle_obj.license_num, vehicle_obj.class_num,\n vehicle_obj.location_id))\n rs = run_query(\n \"\"\"select * from zlrz_vehicle where veh_make = %s and veh_model = %s and veh_year = %s and veh_vin \n = %s and veh_license = %s and vc_num = %s and ol_id = %s \"\"\"\n , (vehicle_obj.make, vehicle_obj.model, int(vehicle_obj.year),\n vehicle_obj.vin_num, vehicle_obj.license_num, vehicle_obj.class_num,\n vehicle_obj.location_id))\n return rs[0][0]\n\n\ndef insert_vehicle_class(class_obj):\n run_query(\n 'insert into zlrz_vehicle_class (vc_name, vc_rateperday, vc_feeovermile) values (%s, %s, %s)'\n , (class_obj.vc_name, int(class_obj.vc_rateperday), int(class_obj.\n vc_feeovermile)))\n rs = run_query(\n \"\"\"select * from zlrz_vehicle_class where vc_name = %s and vc_rateperday = %s and vc_feeovermile = \n %s \"\"\"\n , (class_obj.vc_name, int(class_obj.vc_rateperday), int(class_obj.\n vc_feeovermile)))\n return rs[0][0]\n\n\ndef insert_office_location(location_obj):\n run_query(\n \"\"\"insert into zlrz_office_location (ol_phonenum, ol_state, ol_city, ol_street, ol_zipcode) values (%s, \n %s, %s, %s, %s) \"\"\"\n , (location_obj.phone, location_obj.state, location_obj.city,\n location_obj.street, int(location_obj.zipcode)))\n rs = run_query(\n \"\"\"select * from zlrz_office_location where ol_phonenum = %s and ol_state = %s and ol_city = %s \n and ol_street=%s and ol_zipcode=%s \"\"\"\n , (location_obj.phone, location_obj.state, location_obj.city,\n location_obj.street, int(location_obj.zipcode)))\n return rs[0][0]\n\n\n<mask token>\n\n\ndef insert_corporate(corporate_obj):\n run_query(\n 'insert into zlrz_corporate (cust_id, employee_id, corp_id, cust_type) values (%s, %s, %s, %s)'\n , (corporate_obj.cust_id, corporate_obj.employee_id, corporate_obj.\n corp_id, corporate_obj.cust_type))\n rs = run_query(\n 'select * from zlrz_corporate where cust_id = %s and employee_id = %s and corp_id = %s and cust_type = %s'\n , (corporate_obj.cust_id, corporate_obj.employee_id, corporate_obj.\n corp_id, corporate_obj.cust_type))\n return rs[0][0]\n\n\n<mask token>\n\n\ndef insert_invoice(invoice_obj):\n run_query(\n 'insert into zlrz_invoice (inv_date, inv_amount) values (%s, %s) ',\n (invoice_obj.inv_date, invoice_obj.inv_amount))\n rs = run_query(\n 'select * from zlrz_invoice where inv_date = %s and inv_amount = %s',\n (invoice_obj.inv_date, invoice_obj.inv_amount))\n return rs[0][0]\n\n\n<mask token>\n\n\ndef insert_rental(rental_obj):\n run_query(\n \"\"\"insert into zlrz_rental (ren_pickupdate, ren_dropoffdate, ren_startodometer, ren_endodometer\n , ren_dailylimit, cust_id, cust_type, veh_id, ren_pickuplocid, ren_dropoffloc_id, inv_id, cou_id) \n values (%s, %s , %s , %s , %s, %s, %s, %s, %s, %s, %s, %s) \"\"\"\n , (rental_obj.ren_pickupdate, rental_obj.ren_dropoffdate,\n rental_obj.ren_startodometer, rental_obj.ren_endodometer,\n rental_obj.ren_dailylimit, rental_obj.cust_id, rental_obj.cust_type,\n rental_obj.veh_id, rental_obj.ren_pickuplocid, rental_obj.\n ren_dropoffloc_id, rental_obj.inv_id, rental_obj.cou_id))\n rs = run_query(\n \"\"\"select * from zlrz_rental where ren_pickupdate=%s and ren_dropoffdate=%s and ren_startodometer=%s\n and ren_endodometer=%s and ren_dailylimit=%s and cust_id=%s and cust_type=%s and veh_id=%s and ren_pickuplocid=%s\n and ren_dropoffloc_id=%s and inv_id=%s and cou_id=%s\"\"\"\n , (rental_obj.ren_pickupdate, rental_obj.ren_dropoffdate,\n rental_obj.ren_startodometer, rental_obj.ren_endodometer,\n rental_obj.ren_dailylimit, rental_obj.cust_id, rental_obj.cust_type,\n rental_obj.veh_id, rental_obj.ren_pickuplocid, rental_obj.\n ren_dropoffloc_id, rental_obj.inv_id, rental_obj.cou_id))\n return rs[0][0]\n\n\ndef insert_coupon(coupon_obj):\n run_query(\n 'insert into zlrz_coupons (cou_rate, validstart, validend) values (%s, %s, %s) '\n , (coupon_obj.cou_rate, coupon_obj.validstart, coupon_obj.validend))\n if coupon_obj.validstart and coupon_obj.validend:\n rs = run_query(\n 'select * from zlrz_coupons where cou_rate = %s and validstart = %s and validend = %s order by cou_id desc'\n , (coupon_obj.cou_rate, coupon_obj.validstart, coupon_obj.validend)\n )\n else:\n rs = run_query(\n 'select * from zlrz_coupons where cou_rate = %s and validstart is null and validend is null order by cou_id desc'\n , coupon_obj.cou_rate)\n return rs[0][0]\n\n\n<mask token>\n\n\ndef get_user_type(username):\n rs = run_query('select cust_type from zlrz_customer where username = %s',\n (username,))\n return rs[0][0] if rs is not None else rs\n\n\ndef get_user_id(username):\n rs = run_query('select cust_id from zlrz_customer where username = %s',\n (username,))\n return rs[0][0] if rs is not None else rs\n\n\n<mask token>\n\n\ndef get_cust_coupon(cust_id):\n rs = run_query(\n \"\"\"select zlrz_coupons.* from zlrz_cust_coupon join zlrz_coupons \n on zlrz_cust_coupon.cou_id = zlrz_coupons.cou_id where zlrz_cust_coupon.cust_id = %s\"\"\"\n , cust_id)\n return [] if rs is None else list(map(lambda t: Coupon(t[1], t[2], t[3],\n t[0]), rs))\n\n\n<mask token>\n\n\ndef get_vehicles():\n \"\"\"\n Get full location\n :return:\n \"\"\"\n rs = run_query('select * from zlrz_vehicle')\n return [] if rs is None else rs\n\n\ndef get_all_customers():\n rs = run_query('select * from zlrz_customer')\n return [] if rs is None else list(map(lambda t: Customer(t[1], t[2], t[\n 3], t[4], t[5], t[6], t[7], t[8], t[0]), rs))\n\n\ndef get_all_corporate():\n rs = run_query('select * from zlrz_corporate')\n return [] if rs is None else list(map(lambda t: Corporate(t[0], t[1], t\n [2], t[3]), rs))\n\n\n<mask token>\n\n\ndef get_all_locations():\n \"\"\"\n Get all location objects\n :return:\n \"\"\"\n rs = run_query('select * from zlrz_office_location')\n return [] if rs is None else list(map(lambda t: Location(t[1], t[2], t[\n 3], t[4], t[5], t[0]), rs))\n\n\ndef get_location_by_id(location_id):\n \"\"\"\n Get all location objects\n :return:\n \"\"\"\n rs = run_query('select * from zlrz_office_location where ol_id = %s', (\n location_id,))\n return list(map(lambda t: Location(t[1], t[2], t[3], t[4], t[5], t[0]), rs)\n )[0] if rs is not None else None\n\n\ndef get_all_vehclasses():\n \"\"\"\n Get all vehicleclass objects\n :return:\n \"\"\"\n rs = run_query('select * from zlrz_vehicle_class')\n return [] if rs is None else list(map(lambda t: VehicleClass(t[1], t[2],\n t[3], t[0]), rs))\n\n\n<mask token>\n\n\ndef get_vehicle_class(vehicle_id):\n rs = run_query(\n \"\"\"select zlrz_vehicle_class.* from zlrz_vehicle join zlrz_vehicle_class \n on zlrz_vehicle.vc_num = zlrz_vehicle_class.vc_num where zlrz_vehicle.veh_id=%s\"\"\"\n , (int(vehicle_id),))\n return list(map(lambda t: VehicleClass(t[1], t[2], t[3], t[0]), rs))[0\n ] if rs is not None else None\n\n\n<mask token>\n\n\ndef delete_off_loc(location_id):\n if location_id == '':\n return\n res = run_query('select * from zlrz_office_location where ol_id=%s',\n int(location_id))\n if res:\n return 1\n else:\n rs = run_query('delete from zlrz_office_location where ol_id=%s',\n int(location_id))\n return rs\n\n\ndef delete_vehicle(veh_id):\n if veh_id == '':\n return\n rs = run_query('delete from zlrz_vehicle where veh_id=%s', int(veh_id))\n return rs\n\n\ndef delete_customer(cust_id):\n if cust_id == '':\n return\n rs5 = run_query('delete from zlrz_rental where cust_id=%s', int(cust_id))\n rs4 = run_query('delete from zlrz_cust_coupon where cust_id=%s', int(\n cust_id))\n rs2 = run_query('delete from zlrz_corporate where cust_id=%s', int(cust_id)\n )\n rs3 = run_query('delete from zlrz_individual where cust_id=%s', int(\n cust_id))\n rs1 = run_query('delete from zlrz_customer where cust_id=%s', int(cust_id))\n return rs1\n\n\ndef delete_cust_coupon(cou_id):\n if cou_id == '':\n return\n rs1 = run_query('delete from zlrz_cust_coupon where cou_id=%s', int(cou_id)\n )\n rs2 = run_query('delete from zlrz_coupons where cou_id=%s', int(cou_id))\n return rs1\n\n\ndef delete_corporation(corp_id):\n if corp_id == '':\n return\n res = run_query('select * from zlrz_corporation where corp_id=%s', int(\n corp_id))\n if res:\n return 1\n else:\n rs = run_query('delete from zlrz_corporation where corp_id=%s', int\n (corp_id))\n return rs\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef run_query(query, args=None):\n conn = get_connection()\n cur = conn.cursor()\n cur.execute(query, args)\n rs = cur.fetchall()\n if len(rs) != 0:\n return rs\n conn.commit()\n cur.close()\n conn.close()\n\n\ndef insert_address(address_obj):\n run_query(\n 'insert into zlrz_address (state, city, street, zipcode) values (%s, %s, %s, %s)'\n , (address_obj.state, address_obj.city, address_obj.street, int(\n address_obj.zipcode)))\n rs = run_query(\n 'select * from zlrz_address where state = %s and city = %s and street=%s and zipcode=%s'\n , (address_obj.state, address_obj.city, address_obj.street, int(\n address_obj.zipcode)))\n return rs[0][0]\n\n\ndef insert_customer(customer_obj):\n run_query(\n \"\"\"insert into zlrz_customer (cust_type, firstname, lastname, cust_email, cust_phonenum, addr_id, \n username, password) values (%s, %s, %s, %s, %s, %s, %s, %s) \"\"\"\n , (customer_obj.cust_type, customer_obj.first_name, customer_obj.\n last_name, customer_obj.cust_email, customer_obj.cust_phonenum,\n customer_obj.address_id, customer_obj.username, customer_obj.password))\n rs = run_query(\n 'select * from zlrz_customer where firstname = %s and lastname = %s and cust_email = %s and cust_phonenum = %s order by cust_id desc'\n , (customer_obj.first_name, customer_obj.last_name, customer_obj.\n cust_email, customer_obj.cust_phonenum))\n return rs[0][0]\n\n\ndef insert_vehicle(vehicle_obj):\n run_query(\n \"\"\"insert into zlrz_vehicle (veh_make, veh_model, veh_year, veh_vin, veh_license, vc_num, ol_id) values \n (%s, %s, %s, %s, %s, %s, %s) \"\"\"\n , (vehicle_obj.make, vehicle_obj.model, int(vehicle_obj.year),\n vehicle_obj.vin_num, vehicle_obj.license_num, vehicle_obj.class_num,\n vehicle_obj.location_id))\n rs = run_query(\n \"\"\"select * from zlrz_vehicle where veh_make = %s and veh_model = %s and veh_year = %s and veh_vin \n = %s and veh_license = %s and vc_num = %s and ol_id = %s \"\"\"\n , (vehicle_obj.make, vehicle_obj.model, int(vehicle_obj.year),\n vehicle_obj.vin_num, vehicle_obj.license_num, vehicle_obj.class_num,\n vehicle_obj.location_id))\n return rs[0][0]\n\n\ndef insert_vehicle_class(class_obj):\n run_query(\n 'insert into zlrz_vehicle_class (vc_name, vc_rateperday, vc_feeovermile) values (%s, %s, %s)'\n , (class_obj.vc_name, int(class_obj.vc_rateperday), int(class_obj.\n vc_feeovermile)))\n rs = run_query(\n \"\"\"select * from zlrz_vehicle_class where vc_name = %s and vc_rateperday = %s and vc_feeovermile = \n %s \"\"\"\n , (class_obj.vc_name, int(class_obj.vc_rateperday), int(class_obj.\n vc_feeovermile)))\n return rs[0][0]\n\n\ndef insert_office_location(location_obj):\n run_query(\n \"\"\"insert into zlrz_office_location (ol_phonenum, ol_state, ol_city, ol_street, ol_zipcode) values (%s, \n %s, %s, %s, %s) \"\"\"\n , (location_obj.phone, location_obj.state, location_obj.city,\n location_obj.street, int(location_obj.zipcode)))\n rs = run_query(\n \"\"\"select * from zlrz_office_location where ol_phonenum = %s and ol_state = %s and ol_city = %s \n and ol_street=%s and ol_zipcode=%s \"\"\"\n , (location_obj.phone, location_obj.state, location_obj.city,\n location_obj.street, int(location_obj.zipcode)))\n return rs[0][0]\n\n\ndef insert_corporation(corp_obj):\n run_query(\n 'insert into zlrz_corporation (corp_name, corp_regnum) values (%s, %s)'\n , (corp_obj.corp_name, corp_obj.corp_regnum))\n rs = run_query(\n 'select * from zlrz_corporation where corp_name = %s and corp_regnum = %s'\n , (corp_obj.corp_name, corp_obj.corp_regnum))\n return rs[0][0]\n\n\ndef insert_corporate(corporate_obj):\n run_query(\n 'insert into zlrz_corporate (cust_id, employee_id, corp_id, cust_type) values (%s, %s, %s, %s)'\n , (corporate_obj.cust_id, corporate_obj.employee_id, corporate_obj.\n corp_id, corporate_obj.cust_type))\n rs = run_query(\n 'select * from zlrz_corporate where cust_id = %s and employee_id = %s and corp_id = %s and cust_type = %s'\n , (corporate_obj.cust_id, corporate_obj.employee_id, corporate_obj.\n corp_id, corporate_obj.cust_type))\n return rs[0][0]\n\n\n<mask token>\n\n\ndef insert_invoice(invoice_obj):\n run_query(\n 'insert into zlrz_invoice (inv_date, inv_amount) values (%s, %s) ',\n (invoice_obj.inv_date, invoice_obj.inv_amount))\n rs = run_query(\n 'select * from zlrz_invoice where inv_date = %s and inv_amount = %s',\n (invoice_obj.inv_date, invoice_obj.inv_amount))\n return rs[0][0]\n\n\n<mask token>\n\n\ndef insert_rental(rental_obj):\n run_query(\n \"\"\"insert into zlrz_rental (ren_pickupdate, ren_dropoffdate, ren_startodometer, ren_endodometer\n , ren_dailylimit, cust_id, cust_type, veh_id, ren_pickuplocid, ren_dropoffloc_id, inv_id, cou_id) \n values (%s, %s , %s , %s , %s, %s, %s, %s, %s, %s, %s, %s) \"\"\"\n , (rental_obj.ren_pickupdate, rental_obj.ren_dropoffdate,\n rental_obj.ren_startodometer, rental_obj.ren_endodometer,\n rental_obj.ren_dailylimit, rental_obj.cust_id, rental_obj.cust_type,\n rental_obj.veh_id, rental_obj.ren_pickuplocid, rental_obj.\n ren_dropoffloc_id, rental_obj.inv_id, rental_obj.cou_id))\n rs = run_query(\n \"\"\"select * from zlrz_rental where ren_pickupdate=%s and ren_dropoffdate=%s and ren_startodometer=%s\n and ren_endodometer=%s and ren_dailylimit=%s and cust_id=%s and cust_type=%s and veh_id=%s and ren_pickuplocid=%s\n and ren_dropoffloc_id=%s and inv_id=%s and cou_id=%s\"\"\"\n , (rental_obj.ren_pickupdate, rental_obj.ren_dropoffdate,\n rental_obj.ren_startodometer, rental_obj.ren_endodometer,\n rental_obj.ren_dailylimit, rental_obj.cust_id, rental_obj.cust_type,\n rental_obj.veh_id, rental_obj.ren_pickuplocid, rental_obj.\n ren_dropoffloc_id, rental_obj.inv_id, rental_obj.cou_id))\n return rs[0][0]\n\n\ndef insert_coupon(coupon_obj):\n run_query(\n 'insert into zlrz_coupons (cou_rate, validstart, validend) values (%s, %s, %s) '\n , (coupon_obj.cou_rate, coupon_obj.validstart, coupon_obj.validend))\n if coupon_obj.validstart and coupon_obj.validend:\n rs = run_query(\n 'select * from zlrz_coupons where cou_rate = %s and validstart = %s and validend = %s order by cou_id desc'\n , (coupon_obj.cou_rate, coupon_obj.validstart, coupon_obj.validend)\n )\n else:\n rs = run_query(\n 'select * from zlrz_coupons where cou_rate = %s and validstart is null and validend is null order by cou_id desc'\n , coupon_obj.cou_rate)\n return rs[0][0]\n\n\n<mask token>\n\n\ndef get_user_type(username):\n rs = run_query('select cust_type from zlrz_customer where username = %s',\n (username,))\n return rs[0][0] if rs is not None else rs\n\n\ndef get_user_id(username):\n rs = run_query('select cust_id from zlrz_customer where username = %s',\n (username,))\n return rs[0][0] if rs is not None else rs\n\n\n<mask token>\n\n\ndef get_cust_coupon(cust_id):\n rs = run_query(\n \"\"\"select zlrz_coupons.* from zlrz_cust_coupon join zlrz_coupons \n on zlrz_cust_coupon.cou_id = zlrz_coupons.cou_id where zlrz_cust_coupon.cust_id = %s\"\"\"\n , cust_id)\n return [] if rs is None else list(map(lambda t: Coupon(t[1], t[2], t[3],\n t[0]), rs))\n\n\n<mask token>\n\n\ndef get_vehicles():\n \"\"\"\n Get full location\n :return:\n \"\"\"\n rs = run_query('select * from zlrz_vehicle')\n return [] if rs is None else rs\n\n\ndef get_all_customers():\n rs = run_query('select * from zlrz_customer')\n return [] if rs is None else list(map(lambda t: Customer(t[1], t[2], t[\n 3], t[4], t[5], t[6], t[7], t[8], t[0]), rs))\n\n\ndef get_all_corporate():\n rs = run_query('select * from zlrz_corporate')\n return [] if rs is None else list(map(lambda t: Corporate(t[0], t[1], t\n [2], t[3]), rs))\n\n\ndef get_all_individual():\n rs = run_query('select * from zlrz_individual')\n return [] if rs is None else list(map(lambda t: Corporate(t[0], t[1], t\n [2], t[3], t[4]), rs))\n\n\n<mask token>\n\n\ndef get_all_locations():\n \"\"\"\n Get all location objects\n :return:\n \"\"\"\n rs = run_query('select * from zlrz_office_location')\n return [] if rs is None else list(map(lambda t: Location(t[1], t[2], t[\n 3], t[4], t[5], t[0]), rs))\n\n\ndef get_location_by_id(location_id):\n \"\"\"\n Get all location objects\n :return:\n \"\"\"\n rs = run_query('select * from zlrz_office_location where ol_id = %s', (\n location_id,))\n return list(map(lambda t: Location(t[1], t[2], t[3], t[4], t[5], t[0]), rs)\n )[0] if rs is not None else None\n\n\ndef get_all_vehclasses():\n \"\"\"\n Get all vehicleclass objects\n :return:\n \"\"\"\n rs = run_query('select * from zlrz_vehicle_class')\n return [] if rs is None else list(map(lambda t: VehicleClass(t[1], t[2],\n t[3], t[0]), rs))\n\n\n<mask token>\n\n\ndef get_vehicle_class(vehicle_id):\n rs = run_query(\n \"\"\"select zlrz_vehicle_class.* from zlrz_vehicle join zlrz_vehicle_class \n on zlrz_vehicle.vc_num = zlrz_vehicle_class.vc_num where zlrz_vehicle.veh_id=%s\"\"\"\n , (int(vehicle_id),))\n return list(map(lambda t: VehicleClass(t[1], t[2], t[3], t[0]), rs))[0\n ] if rs is not None else None\n\n\n<mask token>\n\n\ndef delete_off_loc(location_id):\n if location_id == '':\n return\n res = run_query('select * from zlrz_office_location where ol_id=%s',\n int(location_id))\n if res:\n return 1\n else:\n rs = run_query('delete from zlrz_office_location where ol_id=%s',\n int(location_id))\n return rs\n\n\ndef delete_vehicle(veh_id):\n if veh_id == '':\n return\n rs = run_query('delete from zlrz_vehicle where veh_id=%s', int(veh_id))\n return rs\n\n\ndef delete_customer(cust_id):\n if cust_id == '':\n return\n rs5 = run_query('delete from zlrz_rental where cust_id=%s', int(cust_id))\n rs4 = run_query('delete from zlrz_cust_coupon where cust_id=%s', int(\n cust_id))\n rs2 = run_query('delete from zlrz_corporate where cust_id=%s', int(cust_id)\n )\n rs3 = run_query('delete from zlrz_individual where cust_id=%s', int(\n cust_id))\n rs1 = run_query('delete from zlrz_customer where cust_id=%s', int(cust_id))\n return rs1\n\n\ndef delete_cust_coupon(cou_id):\n if cou_id == '':\n return\n rs1 = run_query('delete from zlrz_cust_coupon where cou_id=%s', int(cou_id)\n )\n rs2 = run_query('delete from zlrz_coupons where cou_id=%s', int(cou_id))\n return rs1\n\n\ndef delete_corporation(corp_id):\n if corp_id == '':\n return\n res = run_query('select * from zlrz_corporation where corp_id=%s', int(\n corp_id))\n if res:\n return 1\n else:\n rs = run_query('delete from zlrz_corporation where corp_id=%s', int\n (corp_id))\n return rs\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef run_query(query, args=None):\n conn = get_connection()\n cur = conn.cursor()\n cur.execute(query, args)\n rs = cur.fetchall()\n if len(rs) != 0:\n return rs\n conn.commit()\n cur.close()\n conn.close()\n\n\ndef insert_address(address_obj):\n run_query(\n 'insert into zlrz_address (state, city, street, zipcode) values (%s, %s, %s, %s)'\n , (address_obj.state, address_obj.city, address_obj.street, int(\n address_obj.zipcode)))\n rs = run_query(\n 'select * from zlrz_address where state = %s and city = %s and street=%s and zipcode=%s'\n , (address_obj.state, address_obj.city, address_obj.street, int(\n address_obj.zipcode)))\n return rs[0][0]\n\n\ndef insert_customer(customer_obj):\n run_query(\n \"\"\"insert into zlrz_customer (cust_type, firstname, lastname, cust_email, cust_phonenum, addr_id, \n username, password) values (%s, %s, %s, %s, %s, %s, %s, %s) \"\"\"\n , (customer_obj.cust_type, customer_obj.first_name, customer_obj.\n last_name, customer_obj.cust_email, customer_obj.cust_phonenum,\n customer_obj.address_id, customer_obj.username, customer_obj.password))\n rs = run_query(\n 'select * from zlrz_customer where firstname = %s and lastname = %s and cust_email = %s and cust_phonenum = %s order by cust_id desc'\n , (customer_obj.first_name, customer_obj.last_name, customer_obj.\n cust_email, customer_obj.cust_phonenum))\n return rs[0][0]\n\n\ndef insert_vehicle(vehicle_obj):\n run_query(\n \"\"\"insert into zlrz_vehicle (veh_make, veh_model, veh_year, veh_vin, veh_license, vc_num, ol_id) values \n (%s, %s, %s, %s, %s, %s, %s) \"\"\"\n , (vehicle_obj.make, vehicle_obj.model, int(vehicle_obj.year),\n vehicle_obj.vin_num, vehicle_obj.license_num, vehicle_obj.class_num,\n vehicle_obj.location_id))\n rs = run_query(\n \"\"\"select * from zlrz_vehicle where veh_make = %s and veh_model = %s and veh_year = %s and veh_vin \n = %s and veh_license = %s and vc_num = %s and ol_id = %s \"\"\"\n , (vehicle_obj.make, vehicle_obj.model, int(vehicle_obj.year),\n vehicle_obj.vin_num, vehicle_obj.license_num, vehicle_obj.class_num,\n vehicle_obj.location_id))\n return rs[0][0]\n\n\ndef insert_vehicle_class(class_obj):\n run_query(\n 'insert into zlrz_vehicle_class (vc_name, vc_rateperday, vc_feeovermile) values (%s, %s, %s)'\n , (class_obj.vc_name, int(class_obj.vc_rateperday), int(class_obj.\n vc_feeovermile)))\n rs = run_query(\n \"\"\"select * from zlrz_vehicle_class where vc_name = %s and vc_rateperday = %s and vc_feeovermile = \n %s \"\"\"\n , (class_obj.vc_name, int(class_obj.vc_rateperday), int(class_obj.\n vc_feeovermile)))\n return rs[0][0]\n\n\ndef insert_office_location(location_obj):\n run_query(\n \"\"\"insert into zlrz_office_location (ol_phonenum, ol_state, ol_city, ol_street, ol_zipcode) values (%s, \n %s, %s, %s, %s) \"\"\"\n , (location_obj.phone, location_obj.state, location_obj.city,\n location_obj.street, int(location_obj.zipcode)))\n rs = run_query(\n \"\"\"select * from zlrz_office_location where ol_phonenum = %s and ol_state = %s and ol_city = %s \n and ol_street=%s and ol_zipcode=%s \"\"\"\n , (location_obj.phone, location_obj.state, location_obj.city,\n location_obj.street, int(location_obj.zipcode)))\n return rs[0][0]\n\n\ndef insert_corporation(corp_obj):\n run_query(\n 'insert into zlrz_corporation (corp_name, corp_regnum) values (%s, %s)'\n , (corp_obj.corp_name, corp_obj.corp_regnum))\n rs = run_query(\n 'select * from zlrz_corporation where corp_name = %s and corp_regnum = %s'\n , (corp_obj.corp_name, corp_obj.corp_regnum))\n return rs[0][0]\n\n\ndef insert_corporate(corporate_obj):\n run_query(\n 'insert into zlrz_corporate (cust_id, employee_id, corp_id, cust_type) values (%s, %s, %s, %s)'\n , (corporate_obj.cust_id, corporate_obj.employee_id, corporate_obj.\n corp_id, corporate_obj.cust_type))\n rs = run_query(\n 'select * from zlrz_corporate where cust_id = %s and employee_id = %s and corp_id = %s and cust_type = %s'\n , (corporate_obj.cust_id, corporate_obj.employee_id, corporate_obj.\n corp_id, corporate_obj.cust_type))\n return rs[0][0]\n\n\ndef insert_individual(individual_obj):\n run_query(\n 'insert into zlrz_individual (cust_id, cust_driverlicnum, cust_insurcompname, cust_insurpolnum, cust_type) values (%s, %s, %s, %s, %s)'\n , (individual_obj.cust_id, individual_obj.cust_driverlicnum,\n individual_obj.cust_insurcompname, individual_obj.cust_insurpolnum,\n individual_obj.cust_type))\n rs = run_query(\n 'select * from zlrz_individual where cust_id = %s and cust_driverlicnum = %s and cust_insurcompname = %s and cust_insurpolnum = %s and cust_type = %s'\n , (individual_obj.cust_id, individual_obj.cust_driverlicnum,\n individual_obj.cust_insurcompname, individual_obj.cust_insurpolnum,\n individual_obj.cust_type))\n return rs[0][0]\n\n\ndef insert_invoice(invoice_obj):\n run_query(\n 'insert into zlrz_invoice (inv_date, inv_amount) values (%s, %s) ',\n (invoice_obj.inv_date, invoice_obj.inv_amount))\n rs = run_query(\n 'select * from zlrz_invoice where inv_date = %s and inv_amount = %s',\n (invoice_obj.inv_date, invoice_obj.inv_amount))\n return rs[0][0]\n\n\ndef insert_payment(payment_obj):\n run_query(\n \"\"\"insert into zlrz_payment (pay_date, pay_method, pay_cardnum, inv_id, pay_amount) \n values (%s, %s , %s , %s , %s) \"\"\"\n , (payment_obj.pay_date, payment_obj.pay_method, payment_obj.\n pay_cardnum, payment_obj.inv_id, payment_obj.pay_amount))\n rs = run_query(\n \"\"\"select * from zlrz_payment where pay_date=%s and pay_method=%s and pay_cardnum=%s and inv_id=%s\n and pay_amount=%s\"\"\"\n , (payment_obj.pay_date, payment_obj.pay_method, payment_obj.\n pay_cardnum, payment_obj.inv_id, payment_obj.pay_amount))\n return rs[0][0]\n\n\ndef insert_rental(rental_obj):\n run_query(\n \"\"\"insert into zlrz_rental (ren_pickupdate, ren_dropoffdate, ren_startodometer, ren_endodometer\n , ren_dailylimit, cust_id, cust_type, veh_id, ren_pickuplocid, ren_dropoffloc_id, inv_id, cou_id) \n values (%s, %s , %s , %s , %s, %s, %s, %s, %s, %s, %s, %s) \"\"\"\n , (rental_obj.ren_pickupdate, rental_obj.ren_dropoffdate,\n rental_obj.ren_startodometer, rental_obj.ren_endodometer,\n rental_obj.ren_dailylimit, rental_obj.cust_id, rental_obj.cust_type,\n rental_obj.veh_id, rental_obj.ren_pickuplocid, rental_obj.\n ren_dropoffloc_id, rental_obj.inv_id, rental_obj.cou_id))\n rs = run_query(\n \"\"\"select * from zlrz_rental where ren_pickupdate=%s and ren_dropoffdate=%s and ren_startodometer=%s\n and ren_endodometer=%s and ren_dailylimit=%s and cust_id=%s and cust_type=%s and veh_id=%s and ren_pickuplocid=%s\n and ren_dropoffloc_id=%s and inv_id=%s and cou_id=%s\"\"\"\n , (rental_obj.ren_pickupdate, rental_obj.ren_dropoffdate,\n rental_obj.ren_startodometer, rental_obj.ren_endodometer,\n rental_obj.ren_dailylimit, rental_obj.cust_id, rental_obj.cust_type,\n rental_obj.veh_id, rental_obj.ren_pickuplocid, rental_obj.\n ren_dropoffloc_id, rental_obj.inv_id, rental_obj.cou_id))\n return rs[0][0]\n\n\ndef insert_coupon(coupon_obj):\n run_query(\n 'insert into zlrz_coupons (cou_rate, validstart, validend) values (%s, %s, %s) '\n , (coupon_obj.cou_rate, coupon_obj.validstart, coupon_obj.validend))\n if coupon_obj.validstart and coupon_obj.validend:\n rs = run_query(\n 'select * from zlrz_coupons where cou_rate = %s and validstart = %s and validend = %s order by cou_id desc'\n , (coupon_obj.cou_rate, coupon_obj.validstart, coupon_obj.validend)\n )\n else:\n rs = run_query(\n 'select * from zlrz_coupons where cou_rate = %s and validstart is null and validend is null order by cou_id desc'\n , coupon_obj.cou_rate)\n return rs[0][0]\n\n\ndef insert_cust_coupon(cust_coupon_obj):\n run_query(\n 'insert into zlrz_cust_coupon (cou_id, cust_id, cust_type, coupon_type) values (%s, %s, %s, %s) '\n , (cust_coupon_obj.cou_id, cust_coupon_obj.cust_id, cust_coupon_obj\n .cust_type, cust_coupon_obj.coupon_type))\n return\n\n\ndef get_password(username):\n rs = run_query('select password from zlrz_customer where username = %s',\n (username,))\n return rs[0][0] if rs is not None else rs\n\n\ndef get_user_type(username):\n rs = run_query('select cust_type from zlrz_customer where username = %s',\n (username,))\n return rs[0][0] if rs is not None else rs\n\n\ndef get_user_id(username):\n rs = run_query('select cust_id from zlrz_customer where username = %s',\n (username,))\n return rs[0][0] if rs is not None else rs\n\n\ndef get_all_corporations():\n rs = run_query('select * from zlrz_corporation')\n return [] if rs is None else list(map(lambda t: Corporation(t[1], t[2],\n t[0]), rs))\n\n\ndef get_cust_coupon(cust_id):\n rs = run_query(\n \"\"\"select zlrz_coupons.* from zlrz_cust_coupon join zlrz_coupons \n on zlrz_cust_coupon.cou_id = zlrz_coupons.cou_id where zlrz_cust_coupon.cust_id = %s\"\"\"\n , cust_id)\n return [] if rs is None else list(map(lambda t: Coupon(t[1], t[2], t[3],\n t[0]), rs))\n\n\ndef get_coupon(cust_id):\n rs = run_query(\n \"\"\"select zlrz_coupons.* from zlrz_cust_coupon join zlrz_coupons \n on zlrz_cust_coupon.cou_id = zlrz_coupons.cou_id where zlrz_cust_coupon.cust_id = %s\"\"\"\n , (cust_id,))\n res = None\n maxrate = float('-inf')\n if rs is not None:\n coupons = list(map(lambda t: Coupon(t[1], t[2], t[3], t[0]), rs))\n for cou in coupons:\n if cou.validstart and cou.validend:\n if (datetime.now() - cou.validstart).days >= 0 and (cou.\n validend - datetime.now()).days >= 0:\n if cou.cou_rate > maxrate:\n maxrate = cou.cou_rate\n res = cou\n if not cou.validstart and not cou.validend:\n if cou.cou_rate > maxrate:\n maxrate = cou.cou_rate\n res = cou\n return res\n\n\ndef get_vehicles():\n \"\"\"\n Get full location\n :return:\n \"\"\"\n rs = run_query('select * from zlrz_vehicle')\n return [] if rs is None else rs\n\n\ndef get_all_customers():\n rs = run_query('select * from zlrz_customer')\n return [] if rs is None else list(map(lambda t: Customer(t[1], t[2], t[\n 3], t[4], t[5], t[6], t[7], t[8], t[0]), rs))\n\n\ndef get_all_corporate():\n rs = run_query('select * from zlrz_corporate')\n return [] if rs is None else list(map(lambda t: Corporate(t[0], t[1], t\n [2], t[3]), rs))\n\n\ndef get_all_individual():\n rs = run_query('select * from zlrz_individual')\n return [] if rs is None else list(map(lambda t: Corporate(t[0], t[1], t\n [2], t[3], t[4]), rs))\n\n\ndef get_all_vehicles():\n rs = run_query('select * from zlrz_vehicle')\n return [] if rs is None else list(map(lambda t: Vehicle(t[1], t[2], t[3\n ], t[4], t[5], t[6], t[7], t[0]), rs))\n\n\ndef get_all_locations():\n \"\"\"\n Get all location objects\n :return:\n \"\"\"\n rs = run_query('select * from zlrz_office_location')\n return [] if rs is None else list(map(lambda t: Location(t[1], t[2], t[\n 3], t[4], t[5], t[0]), rs))\n\n\ndef get_location_by_id(location_id):\n \"\"\"\n Get all location objects\n :return:\n \"\"\"\n rs = run_query('select * from zlrz_office_location where ol_id = %s', (\n location_id,))\n return list(map(lambda t: Location(t[1], t[2], t[3], t[4], t[5], t[0]), rs)\n )[0] if rs is not None else None\n\n\ndef get_all_vehclasses():\n \"\"\"\n Get all vehicleclass objects\n :return:\n \"\"\"\n rs = run_query('select * from zlrz_vehicle_class')\n return [] if rs is None else list(map(lambda t: VehicleClass(t[1], t[2],\n t[3], t[0]), rs))\n\n\ndef get_vehicle_by_id(vehicle_id):\n rs = run_query('select * from zlrz_vehicle where veh_id=%s', (int(\n vehicle_id),))\n return list(map(lambda t: Vehicle(t[1], t[2], t[3], t[4], t[5], t[6], t\n [7], t[0]), rs))[0] if rs is not None else None\n\n\ndef get_vehicle_class(vehicle_id):\n rs = run_query(\n \"\"\"select zlrz_vehicle_class.* from zlrz_vehicle join zlrz_vehicle_class \n on zlrz_vehicle.vc_num = zlrz_vehicle_class.vc_num where zlrz_vehicle.veh_id=%s\"\"\"\n , (int(vehicle_id),))\n return list(map(lambda t: VehicleClass(t[1], t[2], t[3], t[0]), rs))[0\n ] if rs is not None else None\n\n\ndef delete_veh_class(vc_num):\n if vc_num == '':\n return\n res = run_query('select * from zlrz_vehicle where vc_num=%s', int(vc_num))\n if res:\n return 1\n else:\n rs = run_query('delete from zlrz_vehicle_class where vc_num=%s',\n int(vc_num))\n return rs\n\n\ndef delete_off_loc(location_id):\n if location_id == '':\n return\n res = run_query('select * from zlrz_office_location where ol_id=%s',\n int(location_id))\n if res:\n return 1\n else:\n rs = run_query('delete from zlrz_office_location where ol_id=%s',\n int(location_id))\n return rs\n\n\ndef delete_vehicle(veh_id):\n if veh_id == '':\n return\n rs = run_query('delete from zlrz_vehicle where veh_id=%s', int(veh_id))\n return rs\n\n\ndef delete_customer(cust_id):\n if cust_id == '':\n return\n rs5 = run_query('delete from zlrz_rental where cust_id=%s', int(cust_id))\n rs4 = run_query('delete from zlrz_cust_coupon where cust_id=%s', int(\n cust_id))\n rs2 = run_query('delete from zlrz_corporate where cust_id=%s', int(cust_id)\n )\n rs3 = run_query('delete from zlrz_individual where cust_id=%s', int(\n cust_id))\n rs1 = run_query('delete from zlrz_customer where cust_id=%s', int(cust_id))\n return rs1\n\n\ndef delete_cust_coupon(cou_id):\n if cou_id == '':\n return\n rs1 = run_query('delete from zlrz_cust_coupon where cou_id=%s', int(cou_id)\n )\n rs2 = run_query('delete from zlrz_coupons where cou_id=%s', int(cou_id))\n return rs1\n\n\ndef delete_corporation(corp_id):\n if corp_id == '':\n return\n res = run_query('select * from zlrz_corporation where corp_id=%s', int(\n corp_id))\n if res:\n return 1\n else:\n rs = run_query('delete from zlrz_corporation where corp_id=%s', int\n (corp_id))\n return rs\n\n\ndef update_vehicle_class(class_obj):\n rs = run_query(\n 'update zlrz_vehicle_class set vc_rateperday = %s, vc_feeovermile = %s where vc_name = %s'\n , (int(class_obj.vc_rateperday), int(class_obj.vc_feeovermile),\n class_obj.vc_name))\n return rs\n", "step-4": "import pymysql\nfrom app_module.models import User, Vehicle, Address, Customer, Location, Coupon, VehicleClass, Corporation, Corporate\nfrom datetime import datetime\nHOSTNAME = 'localhost'\nUSERNAME = 'root'\nPASSWORD = '123456'\nDATABASE = 'proj_p2'\n\n\ndef get_connection():\n my_sql_connection = pymysql.connect(host=HOSTNAME, user=USERNAME,\n passwd=PASSWORD, db=DATABASE)\n return my_sql_connection\n\n\ndef run_query(query, args=None):\n conn = get_connection()\n cur = conn.cursor()\n cur.execute(query, args)\n rs = cur.fetchall()\n if len(rs) != 0:\n return rs\n conn.commit()\n cur.close()\n conn.close()\n\n\ndef insert_address(address_obj):\n run_query(\n 'insert into zlrz_address (state, city, street, zipcode) values (%s, %s, %s, %s)'\n , (address_obj.state, address_obj.city, address_obj.street, int(\n address_obj.zipcode)))\n rs = run_query(\n 'select * from zlrz_address where state = %s and city = %s and street=%s and zipcode=%s'\n , (address_obj.state, address_obj.city, address_obj.street, int(\n address_obj.zipcode)))\n return rs[0][0]\n\n\ndef insert_customer(customer_obj):\n run_query(\n \"\"\"insert into zlrz_customer (cust_type, firstname, lastname, cust_email, cust_phonenum, addr_id, \n username, password) values (%s, %s, %s, %s, %s, %s, %s, %s) \"\"\"\n , (customer_obj.cust_type, customer_obj.first_name, customer_obj.\n last_name, customer_obj.cust_email, customer_obj.cust_phonenum,\n customer_obj.address_id, customer_obj.username, customer_obj.password))\n rs = run_query(\n 'select * from zlrz_customer where firstname = %s and lastname = %s and cust_email = %s and cust_phonenum = %s order by cust_id desc'\n , (customer_obj.first_name, customer_obj.last_name, customer_obj.\n cust_email, customer_obj.cust_phonenum))\n return rs[0][0]\n\n\ndef insert_vehicle(vehicle_obj):\n run_query(\n \"\"\"insert into zlrz_vehicle (veh_make, veh_model, veh_year, veh_vin, veh_license, vc_num, ol_id) values \n (%s, %s, %s, %s, %s, %s, %s) \"\"\"\n , (vehicle_obj.make, vehicle_obj.model, int(vehicle_obj.year),\n vehicle_obj.vin_num, vehicle_obj.license_num, vehicle_obj.class_num,\n vehicle_obj.location_id))\n rs = run_query(\n \"\"\"select * from zlrz_vehicle where veh_make = %s and veh_model = %s and veh_year = %s and veh_vin \n = %s and veh_license = %s and vc_num = %s and ol_id = %s \"\"\"\n , (vehicle_obj.make, vehicle_obj.model, int(vehicle_obj.year),\n vehicle_obj.vin_num, vehicle_obj.license_num, vehicle_obj.class_num,\n vehicle_obj.location_id))\n return rs[0][0]\n\n\ndef insert_vehicle_class(class_obj):\n run_query(\n 'insert into zlrz_vehicle_class (vc_name, vc_rateperday, vc_feeovermile) values (%s, %s, %s)'\n , (class_obj.vc_name, int(class_obj.vc_rateperday), int(class_obj.\n vc_feeovermile)))\n rs = run_query(\n \"\"\"select * from zlrz_vehicle_class where vc_name = %s and vc_rateperday = %s and vc_feeovermile = \n %s \"\"\"\n , (class_obj.vc_name, int(class_obj.vc_rateperday), int(class_obj.\n vc_feeovermile)))\n return rs[0][0]\n\n\ndef insert_office_location(location_obj):\n run_query(\n \"\"\"insert into zlrz_office_location (ol_phonenum, ol_state, ol_city, ol_street, ol_zipcode) values (%s, \n %s, %s, %s, %s) \"\"\"\n , (location_obj.phone, location_obj.state, location_obj.city,\n location_obj.street, int(location_obj.zipcode)))\n rs = run_query(\n \"\"\"select * from zlrz_office_location where ol_phonenum = %s and ol_state = %s and ol_city = %s \n and ol_street=%s and ol_zipcode=%s \"\"\"\n , (location_obj.phone, location_obj.state, location_obj.city,\n location_obj.street, int(location_obj.zipcode)))\n return rs[0][0]\n\n\ndef insert_corporation(corp_obj):\n run_query(\n 'insert into zlrz_corporation (corp_name, corp_regnum) values (%s, %s)'\n , (corp_obj.corp_name, corp_obj.corp_regnum))\n rs = run_query(\n 'select * from zlrz_corporation where corp_name = %s and corp_regnum = %s'\n , (corp_obj.corp_name, corp_obj.corp_regnum))\n return rs[0][0]\n\n\ndef insert_corporate(corporate_obj):\n run_query(\n 'insert into zlrz_corporate (cust_id, employee_id, corp_id, cust_type) values (%s, %s, %s, %s)'\n , (corporate_obj.cust_id, corporate_obj.employee_id, corporate_obj.\n corp_id, corporate_obj.cust_type))\n rs = run_query(\n 'select * from zlrz_corporate where cust_id = %s and employee_id = %s and corp_id = %s and cust_type = %s'\n , (corporate_obj.cust_id, corporate_obj.employee_id, corporate_obj.\n corp_id, corporate_obj.cust_type))\n return rs[0][0]\n\n\ndef insert_individual(individual_obj):\n run_query(\n 'insert into zlrz_individual (cust_id, cust_driverlicnum, cust_insurcompname, cust_insurpolnum, cust_type) values (%s, %s, %s, %s, %s)'\n , (individual_obj.cust_id, individual_obj.cust_driverlicnum,\n individual_obj.cust_insurcompname, individual_obj.cust_insurpolnum,\n individual_obj.cust_type))\n rs = run_query(\n 'select * from zlrz_individual where cust_id = %s and cust_driverlicnum = %s and cust_insurcompname = %s and cust_insurpolnum = %s and cust_type = %s'\n , (individual_obj.cust_id, individual_obj.cust_driverlicnum,\n individual_obj.cust_insurcompname, individual_obj.cust_insurpolnum,\n individual_obj.cust_type))\n return rs[0][0]\n\n\ndef insert_invoice(invoice_obj):\n run_query(\n 'insert into zlrz_invoice (inv_date, inv_amount) values (%s, %s) ',\n (invoice_obj.inv_date, invoice_obj.inv_amount))\n rs = run_query(\n 'select * from zlrz_invoice where inv_date = %s and inv_amount = %s',\n (invoice_obj.inv_date, invoice_obj.inv_amount))\n return rs[0][0]\n\n\ndef insert_payment(payment_obj):\n run_query(\n \"\"\"insert into zlrz_payment (pay_date, pay_method, pay_cardnum, inv_id, pay_amount) \n values (%s, %s , %s , %s , %s) \"\"\"\n , (payment_obj.pay_date, payment_obj.pay_method, payment_obj.\n pay_cardnum, payment_obj.inv_id, payment_obj.pay_amount))\n rs = run_query(\n \"\"\"select * from zlrz_payment where pay_date=%s and pay_method=%s and pay_cardnum=%s and inv_id=%s\n and pay_amount=%s\"\"\"\n , (payment_obj.pay_date, payment_obj.pay_method, payment_obj.\n pay_cardnum, payment_obj.inv_id, payment_obj.pay_amount))\n return rs[0][0]\n\n\ndef insert_rental(rental_obj):\n run_query(\n \"\"\"insert into zlrz_rental (ren_pickupdate, ren_dropoffdate, ren_startodometer, ren_endodometer\n , ren_dailylimit, cust_id, cust_type, veh_id, ren_pickuplocid, ren_dropoffloc_id, inv_id, cou_id) \n values (%s, %s , %s , %s , %s, %s, %s, %s, %s, %s, %s, %s) \"\"\"\n , (rental_obj.ren_pickupdate, rental_obj.ren_dropoffdate,\n rental_obj.ren_startodometer, rental_obj.ren_endodometer,\n rental_obj.ren_dailylimit, rental_obj.cust_id, rental_obj.cust_type,\n rental_obj.veh_id, rental_obj.ren_pickuplocid, rental_obj.\n ren_dropoffloc_id, rental_obj.inv_id, rental_obj.cou_id))\n rs = run_query(\n \"\"\"select * from zlrz_rental where ren_pickupdate=%s and ren_dropoffdate=%s and ren_startodometer=%s\n and ren_endodometer=%s and ren_dailylimit=%s and cust_id=%s and cust_type=%s and veh_id=%s and ren_pickuplocid=%s\n and ren_dropoffloc_id=%s and inv_id=%s and cou_id=%s\"\"\"\n , (rental_obj.ren_pickupdate, rental_obj.ren_dropoffdate,\n rental_obj.ren_startodometer, rental_obj.ren_endodometer,\n rental_obj.ren_dailylimit, rental_obj.cust_id, rental_obj.cust_type,\n rental_obj.veh_id, rental_obj.ren_pickuplocid, rental_obj.\n ren_dropoffloc_id, rental_obj.inv_id, rental_obj.cou_id))\n return rs[0][0]\n\n\ndef insert_coupon(coupon_obj):\n run_query(\n 'insert into zlrz_coupons (cou_rate, validstart, validend) values (%s, %s, %s) '\n , (coupon_obj.cou_rate, coupon_obj.validstart, coupon_obj.validend))\n if coupon_obj.validstart and coupon_obj.validend:\n rs = run_query(\n 'select * from zlrz_coupons where cou_rate = %s and validstart = %s and validend = %s order by cou_id desc'\n , (coupon_obj.cou_rate, coupon_obj.validstart, coupon_obj.validend)\n )\n else:\n rs = run_query(\n 'select * from zlrz_coupons where cou_rate = %s and validstart is null and validend is null order by cou_id desc'\n , coupon_obj.cou_rate)\n return rs[0][0]\n\n\ndef insert_cust_coupon(cust_coupon_obj):\n run_query(\n 'insert into zlrz_cust_coupon (cou_id, cust_id, cust_type, coupon_type) values (%s, %s, %s, %s) '\n , (cust_coupon_obj.cou_id, cust_coupon_obj.cust_id, cust_coupon_obj\n .cust_type, cust_coupon_obj.coupon_type))\n return\n\n\ndef get_password(username):\n rs = run_query('select password from zlrz_customer where username = %s',\n (username,))\n return rs[0][0] if rs is not None else rs\n\n\ndef get_user_type(username):\n rs = run_query('select cust_type from zlrz_customer where username = %s',\n (username,))\n return rs[0][0] if rs is not None else rs\n\n\ndef get_user_id(username):\n rs = run_query('select cust_id from zlrz_customer where username = %s',\n (username,))\n return rs[0][0] if rs is not None else rs\n\n\ndef get_all_corporations():\n rs = run_query('select * from zlrz_corporation')\n return [] if rs is None else list(map(lambda t: Corporation(t[1], t[2],\n t[0]), rs))\n\n\ndef get_cust_coupon(cust_id):\n rs = run_query(\n \"\"\"select zlrz_coupons.* from zlrz_cust_coupon join zlrz_coupons \n on zlrz_cust_coupon.cou_id = zlrz_coupons.cou_id where zlrz_cust_coupon.cust_id = %s\"\"\"\n , cust_id)\n return [] if rs is None else list(map(lambda t: Coupon(t[1], t[2], t[3],\n t[0]), rs))\n\n\ndef get_coupon(cust_id):\n rs = run_query(\n \"\"\"select zlrz_coupons.* from zlrz_cust_coupon join zlrz_coupons \n on zlrz_cust_coupon.cou_id = zlrz_coupons.cou_id where zlrz_cust_coupon.cust_id = %s\"\"\"\n , (cust_id,))\n res = None\n maxrate = float('-inf')\n if rs is not None:\n coupons = list(map(lambda t: Coupon(t[1], t[2], t[3], t[0]), rs))\n for cou in coupons:\n if cou.validstart and cou.validend:\n if (datetime.now() - cou.validstart).days >= 0 and (cou.\n validend - datetime.now()).days >= 0:\n if cou.cou_rate > maxrate:\n maxrate = cou.cou_rate\n res = cou\n if not cou.validstart and not cou.validend:\n if cou.cou_rate > maxrate:\n maxrate = cou.cou_rate\n res = cou\n return res\n\n\ndef get_vehicles():\n \"\"\"\n Get full location\n :return:\n \"\"\"\n rs = run_query('select * from zlrz_vehicle')\n return [] if rs is None else rs\n\n\ndef get_all_customers():\n rs = run_query('select * from zlrz_customer')\n return [] if rs is None else list(map(lambda t: Customer(t[1], t[2], t[\n 3], t[4], t[5], t[6], t[7], t[8], t[0]), rs))\n\n\ndef get_all_corporate():\n rs = run_query('select * from zlrz_corporate')\n return [] if rs is None else list(map(lambda t: Corporate(t[0], t[1], t\n [2], t[3]), rs))\n\n\ndef get_all_individual():\n rs = run_query('select * from zlrz_individual')\n return [] if rs is None else list(map(lambda t: Corporate(t[0], t[1], t\n [2], t[3], t[4]), rs))\n\n\ndef get_all_vehicles():\n rs = run_query('select * from zlrz_vehicle')\n return [] if rs is None else list(map(lambda t: Vehicle(t[1], t[2], t[3\n ], t[4], t[5], t[6], t[7], t[0]), rs))\n\n\ndef get_all_locations():\n \"\"\"\n Get all location objects\n :return:\n \"\"\"\n rs = run_query('select * from zlrz_office_location')\n return [] if rs is None else list(map(lambda t: Location(t[1], t[2], t[\n 3], t[4], t[5], t[0]), rs))\n\n\ndef get_location_by_id(location_id):\n \"\"\"\n Get all location objects\n :return:\n \"\"\"\n rs = run_query('select * from zlrz_office_location where ol_id = %s', (\n location_id,))\n return list(map(lambda t: Location(t[1], t[2], t[3], t[4], t[5], t[0]), rs)\n )[0] if rs is not None else None\n\n\ndef get_all_vehclasses():\n \"\"\"\n Get all vehicleclass objects\n :return:\n \"\"\"\n rs = run_query('select * from zlrz_vehicle_class')\n return [] if rs is None else list(map(lambda t: VehicleClass(t[1], t[2],\n t[3], t[0]), rs))\n\n\ndef get_vehicle_by_id(vehicle_id):\n rs = run_query('select * from zlrz_vehicle where veh_id=%s', (int(\n vehicle_id),))\n return list(map(lambda t: Vehicle(t[1], t[2], t[3], t[4], t[5], t[6], t\n [7], t[0]), rs))[0] if rs is not None else None\n\n\ndef get_vehicle_class(vehicle_id):\n rs = run_query(\n \"\"\"select zlrz_vehicle_class.* from zlrz_vehicle join zlrz_vehicle_class \n on zlrz_vehicle.vc_num = zlrz_vehicle_class.vc_num where zlrz_vehicle.veh_id=%s\"\"\"\n , (int(vehicle_id),))\n return list(map(lambda t: VehicleClass(t[1], t[2], t[3], t[0]), rs))[0\n ] if rs is not None else None\n\n\ndef delete_veh_class(vc_num):\n if vc_num == '':\n return\n res = run_query('select * from zlrz_vehicle where vc_num=%s', int(vc_num))\n if res:\n return 1\n else:\n rs = run_query('delete from zlrz_vehicle_class where vc_num=%s',\n int(vc_num))\n return rs\n\n\ndef delete_off_loc(location_id):\n if location_id == '':\n return\n res = run_query('select * from zlrz_office_location where ol_id=%s',\n int(location_id))\n if res:\n return 1\n else:\n rs = run_query('delete from zlrz_office_location where ol_id=%s',\n int(location_id))\n return rs\n\n\ndef delete_vehicle(veh_id):\n if veh_id == '':\n return\n rs = run_query('delete from zlrz_vehicle where veh_id=%s', int(veh_id))\n return rs\n\n\ndef delete_customer(cust_id):\n if cust_id == '':\n return\n rs5 = run_query('delete from zlrz_rental where cust_id=%s', int(cust_id))\n rs4 = run_query('delete from zlrz_cust_coupon where cust_id=%s', int(\n cust_id))\n rs2 = run_query('delete from zlrz_corporate where cust_id=%s', int(cust_id)\n )\n rs3 = run_query('delete from zlrz_individual where cust_id=%s', int(\n cust_id))\n rs1 = run_query('delete from zlrz_customer where cust_id=%s', int(cust_id))\n return rs1\n\n\ndef delete_cust_coupon(cou_id):\n if cou_id == '':\n return\n rs1 = run_query('delete from zlrz_cust_coupon where cou_id=%s', int(cou_id)\n )\n rs2 = run_query('delete from zlrz_coupons where cou_id=%s', int(cou_id))\n return rs1\n\n\ndef delete_corporation(corp_id):\n if corp_id == '':\n return\n res = run_query('select * from zlrz_corporation where corp_id=%s', int(\n corp_id))\n if res:\n return 1\n else:\n rs = run_query('delete from zlrz_corporation where corp_id=%s', int\n (corp_id))\n return rs\n\n\ndef update_vehicle_class(class_obj):\n rs = run_query(\n 'update zlrz_vehicle_class set vc_rateperday = %s, vc_feeovermile = %s where vc_name = %s'\n , (int(class_obj.vc_rateperday), int(class_obj.vc_feeovermile),\n class_obj.vc_name))\n return rs\n", "step-5": "import pymysql\nfrom app_module.models import User, Vehicle, Address, Customer, Location, Coupon, VehicleClass, Corporation, Corporate\nfrom datetime import datetime\n\nHOSTNAME = 'localhost'\nUSERNAME = 'root'\nPASSWORD = '123456'\nDATABASE = 'proj_p2'\n\n\ndef get_connection():\n my_sql_connection = pymysql.connect(host=HOSTNAME, user=USERNAME, passwd=PASSWORD, db=DATABASE)\n return my_sql_connection\n\n\ndef run_query(query, args=None):\n conn = get_connection()\n cur = conn.cursor()\n\n cur.execute(query, args)\n\n rs = cur.fetchall()\n if (len(rs) != 0):\n return rs\n conn.commit()\n\n cur.close()\n conn.close()\n\n\ndef insert_address(address_obj):\n run_query('''insert into zlrz_address (state, city, street, zipcode) values (%s, %s, %s, %s)'''\n , (address_obj.state, address_obj.city, address_obj.street, int(address_obj.zipcode)))\n rs = run_query('''select * from zlrz_address where state = %s and city = %s and street=%s and zipcode=%s'''\n , (address_obj.state, address_obj.city, address_obj.street, int(address_obj.zipcode)))\n return rs[0][0]\n\n\ndef insert_customer(customer_obj):\n run_query('''insert into zlrz_customer (cust_type, firstname, lastname, cust_email, cust_phonenum, addr_id, \n username, password) values (%s, %s, %s, %s, %s, %s, %s, %s) '''\n , (customer_obj.cust_type, customer_obj.first_name, customer_obj.last_name, customer_obj.cust_email,\n customer_obj.cust_phonenum, customer_obj.address_id, customer_obj.username, customer_obj.password))\n rs = run_query(\n '''select * from zlrz_customer where firstname = %s and lastname = %s and cust_email = %s and cust_phonenum = %s order by cust_id desc'''\n , (customer_obj.first_name, customer_obj.last_name, customer_obj.cust_email, customer_obj.cust_phonenum))\n return rs[0][0]\n\n\ndef insert_vehicle(vehicle_obj):\n run_query('''insert into zlrz_vehicle (veh_make, veh_model, veh_year, veh_vin, veh_license, vc_num, ol_id) values \n (%s, %s, %s, %s, %s, %s, %s) '''\n , (\n vehicle_obj.make, vehicle_obj.model, int(vehicle_obj.year), vehicle_obj.vin_num, vehicle_obj.license_num,\n vehicle_obj.class_num, vehicle_obj.location_id))\n rs = run_query('''select * from zlrz_vehicle where veh_make = %s and veh_model = %s and veh_year = %s and veh_vin \n = %s and veh_license = %s and vc_num = %s and ol_id = %s '''\n , (vehicle_obj.make, vehicle_obj.model, int(vehicle_obj.year), vehicle_obj.vin_num,\n vehicle_obj.license_num, vehicle_obj.class_num, vehicle_obj.location_id))\n return rs[0][0]\n\n\ndef insert_vehicle_class(class_obj):\n run_query('''insert into zlrz_vehicle_class (vc_name, vc_rateperday, vc_feeovermile) values (%s, %s, %s)'''\n , (class_obj.vc_name, int(class_obj.vc_rateperday), int(class_obj.vc_feeovermile)))\n rs = run_query('''select * from zlrz_vehicle_class where vc_name = %s and vc_rateperday = %s and vc_feeovermile = \n %s '''\n , (class_obj.vc_name, int(class_obj.vc_rateperday), int(class_obj.vc_feeovermile)))\n return rs[0][0]\n\n\ndef insert_office_location(location_obj):\n run_query('''insert into zlrz_office_location (ol_phonenum, ol_state, ol_city, ol_street, ol_zipcode) values (%s, \n %s, %s, %s, %s) '''\n , (location_obj.phone, location_obj.state, location_obj.city, location_obj.street,\n int(location_obj.zipcode)))\n rs = run_query('''select * from zlrz_office_location where ol_phonenum = %s and ol_state = %s and ol_city = %s \n and ol_street=%s and ol_zipcode=%s '''\n , (location_obj.phone, location_obj.state, location_obj.city, location_obj.street,\n int(location_obj.zipcode)))\n return rs[0][0]\n\n\ndef insert_corporation(corp_obj):\n run_query('''insert into zlrz_corporation (corp_name, corp_regnum) values (%s, %s)'''\n , (corp_obj.corp_name, corp_obj.corp_regnum))\n rs = run_query('''select * from zlrz_corporation where corp_name = %s and corp_regnum = %s'''\n , (corp_obj.corp_name, corp_obj.corp_regnum))\n return rs[0][0]\n\n\ndef insert_corporate(corporate_obj):\n run_query('''insert into zlrz_corporate (cust_id, employee_id, corp_id, cust_type) values (%s, %s, %s, %s)'''\n , (corporate_obj.cust_id, corporate_obj.employee_id, corporate_obj.corp_id, corporate_obj.cust_type))\n rs = run_query(\n '''select * from zlrz_corporate where cust_id = %s and employee_id = %s and corp_id = %s and cust_type = %s'''\n , (corporate_obj.cust_id, corporate_obj.employee_id, corporate_obj.corp_id, corporate_obj.cust_type))\n return rs[0][0]\n\n\ndef insert_individual(individual_obj):\n run_query(\n '''insert into zlrz_individual (cust_id, cust_driverlicnum, cust_insurcompname, cust_insurpolnum, cust_type) values (%s, %s, %s, %s, %s)'''\n , (individual_obj.cust_id, individual_obj.cust_driverlicnum, individual_obj.cust_insurcompname,\n individual_obj.cust_insurpolnum, individual_obj.cust_type))\n rs = run_query(\n '''select * from zlrz_individual where cust_id = %s and cust_driverlicnum = %s and cust_insurcompname = %s and cust_insurpolnum = %s and cust_type = %s'''\n , (individual_obj.cust_id, individual_obj.cust_driverlicnum, individual_obj.cust_insurcompname,\n individual_obj.cust_insurpolnum, individual_obj.cust_type))\n return rs[0][0]\n\n\ndef insert_invoice(invoice_obj):\n run_query('''insert into zlrz_invoice (inv_date, inv_amount) values (%s, %s) '''\n , (invoice_obj.inv_date, invoice_obj.inv_amount))\n rs = run_query('''select * from zlrz_invoice where inv_date = %s and inv_amount = %s'''\n , (invoice_obj.inv_date, invoice_obj.inv_amount))\n return rs[0][0]\n\n\ndef insert_payment(payment_obj):\n run_query('''insert into zlrz_payment (pay_date, pay_method, pay_cardnum, inv_id, pay_amount) \n values (%s, %s , %s , %s , %s) '''\n , (payment_obj.pay_date, payment_obj.pay_method, payment_obj.pay_cardnum, payment_obj.inv_id\n , payment_obj.pay_amount))\n rs = run_query('''select * from zlrz_payment where pay_date=%s and pay_method=%s and pay_cardnum=%s and inv_id=%s\n and pay_amount=%s'''\n , (payment_obj.pay_date, payment_obj.pay_method, payment_obj.pay_cardnum, payment_obj.inv_id\n , payment_obj.pay_amount))\n return rs[0][0]\n\n\ndef insert_rental(rental_obj):\n run_query('''insert into zlrz_rental (ren_pickupdate, ren_dropoffdate, ren_startodometer, ren_endodometer\n , ren_dailylimit, cust_id, cust_type, veh_id, ren_pickuplocid, ren_dropoffloc_id, inv_id, cou_id) \n values (%s, %s , %s , %s , %s, %s, %s, %s, %s, %s, %s, %s) '''\n , (rental_obj.ren_pickupdate, rental_obj.ren_dropoffdate, rental_obj.ren_startodometer\n , rental_obj.ren_endodometer, rental_obj.ren_dailylimit, rental_obj.cust_id\n , rental_obj.cust_type, rental_obj.veh_id, rental_obj.ren_pickuplocid, rental_obj.ren_dropoffloc_id\n , rental_obj.inv_id, rental_obj.cou_id))\n rs = run_query('''select * from zlrz_rental where ren_pickupdate=%s and ren_dropoffdate=%s and ren_startodometer=%s\n and ren_endodometer=%s and ren_dailylimit=%s and cust_id=%s and cust_type=%s and veh_id=%s and ren_pickuplocid=%s\n and ren_dropoffloc_id=%s and inv_id=%s and cou_id=%s'''\n , (rental_obj.ren_pickupdate, rental_obj.ren_dropoffdate, rental_obj.ren_startodometer\n , rental_obj.ren_endodometer, rental_obj.ren_dailylimit, rental_obj.cust_id\n , rental_obj.cust_type, rental_obj.veh_id, rental_obj.ren_pickuplocid,\n rental_obj.ren_dropoffloc_id\n , rental_obj.inv_id, rental_obj.cou_id))\n return rs[0][0]\n\n\ndef insert_coupon(coupon_obj):\n run_query('''insert into zlrz_coupons (cou_rate, validstart, validend) values (%s, %s, %s) '''\n , (coupon_obj.cou_rate, coupon_obj.validstart, coupon_obj.validend))\n if coupon_obj.validstart and coupon_obj.validend:\n rs = run_query(\n '''select * from zlrz_coupons where cou_rate = %s and validstart = %s and validend = %s order by cou_id desc'''\n , (coupon_obj.cou_rate, coupon_obj.validstart, coupon_obj.validend))\n else:\n rs = run_query(\n '''select * from zlrz_coupons where cou_rate = %s and validstart is null and validend is null order by cou_id desc'''\n , (coupon_obj.cou_rate))\n return rs[0][0]\n\n\ndef insert_cust_coupon(cust_coupon_obj):\n run_query('''insert into zlrz_cust_coupon (cou_id, cust_id, cust_type, coupon_type) values (%s, %s, %s, %s) '''\n ,\n (cust_coupon_obj.cou_id, cust_coupon_obj.cust_id, cust_coupon_obj.cust_type, cust_coupon_obj.coupon_type))\n return\n\n\ndef get_password(username):\n rs = run_query('''select password from zlrz_customer where username = %s''', (username,))\n return rs[0][0] if rs is not None else rs\n\n\ndef get_user_type(username):\n rs = run_query('''select cust_type from zlrz_customer where username = %s''', (username,))\n return rs[0][0] if rs is not None else rs\n\n\ndef get_user_id(username):\n rs = run_query('''select cust_id from zlrz_customer where username = %s''', (username,))\n return rs[0][0] if rs is not None else rs\n\n\ndef get_all_corporations():\n rs = run_query('''select * from zlrz_corporation''')\n return [] if rs is None else list(map(lambda t: Corporation(t[1], t[2], t[0]), rs))\n\n\ndef get_cust_coupon(cust_id):\n rs = run_query('''select zlrz_coupons.* from zlrz_cust_coupon join zlrz_coupons \n on zlrz_cust_coupon.cou_id = zlrz_coupons.cou_id where zlrz_cust_coupon.cust_id = %s''', (cust_id))\n return [] if rs is None else list(map(lambda t: Coupon(t[1], t[2], t[3], t[0]), rs))\n\n\ndef get_coupon(cust_id):\n rs = run_query('''select zlrz_coupons.* from zlrz_cust_coupon join zlrz_coupons \n on zlrz_cust_coupon.cou_id = zlrz_coupons.cou_id where zlrz_cust_coupon.cust_id = %s'''\n , (cust_id,))\n res = None\n maxrate = float('-inf')\n if rs is not None:\n coupons = list(map(lambda t: Coupon(t[1], t[2], t[3], t[0]), rs))\n for cou in coupons:\n if cou.validstart and cou.validend:\n if (datetime.now() - cou.validstart).days >= 0 and (cou.validend - datetime.now()).days >= 0:\n if cou.cou_rate > maxrate:\n maxrate = cou.cou_rate\n res = cou\n if not cou.validstart and not cou.validend:\n if cou.cou_rate > maxrate:\n maxrate = cou.cou_rate\n res = cou\n return res\n\n\ndef get_vehicles():\n \"\"\"\n Get full location\n :return:\n \"\"\"\n rs = run_query('''select * from zlrz_vehicle''')\n return [] if rs is None else rs\n\n\ndef get_all_customers():\n rs = run_query('''select * from zlrz_customer''')\n return [] if rs is None else list(map(lambda t: Customer(t[1], t[2], t[3], t[4], t[5], t[6], t[7], t[8], t[0]), rs))\n\n\ndef get_all_corporate():\n rs = run_query('''select * from zlrz_corporate''')\n return [] if rs is None else list(map(lambda t: Corporate(t[0], t[1], t[2], t[3]), rs))\n\n\ndef get_all_individual():\n rs = run_query('''select * from zlrz_individual''')\n return [] if rs is None else list(map(lambda t: Corporate(t[0], t[1], t[2], t[3], t[4]), rs))\n\n\ndef get_all_vehicles():\n rs = run_query('''select * from zlrz_vehicle''')\n return [] if rs is None else list(map(lambda t: Vehicle(t[1], t[2], t[3], t[4], t[5], t[6], t[7], t[0]), rs))\n\n\ndef get_all_locations():\n \"\"\"\n Get all location objects\n :return:\n \"\"\"\n rs = run_query('''select * from zlrz_office_location''')\n return [] if rs is None else list(map(lambda t: Location(t[1], t[2], t[3], t[4], t[5], t[0]), rs))\n\n\ndef get_location_by_id(location_id):\n \"\"\"\n Get all location objects\n :return:\n \"\"\"\n rs = run_query('''select * from zlrz_office_location where ol_id = %s''', (location_id,))\n return list(map(lambda t: Location(t[1], t[2], t[3], t[4], t[5], t[0]), rs))[0] if rs is not None else None\n\n\ndef get_all_vehclasses():\n \"\"\"\n Get all vehicleclass objects\n :return:\n \"\"\"\n rs = run_query('''select * from zlrz_vehicle_class''')\n return [] if rs is None else list(map(lambda t: VehicleClass(t[1], t[2], t[3], t[0]), rs))\n\n\ndef get_vehicle_by_id(vehicle_id):\n rs = run_query('''select * from zlrz_vehicle where veh_id=%s''', (int(vehicle_id),))\n return list(map(lambda t: Vehicle(t[1], t[2], t[3], t[4], t[5], t[6], t[7], t[0]), rs))[0] \\\n if rs is not None else None\n\n\ndef get_vehicle_class(vehicle_id):\n rs = run_query('''select zlrz_vehicle_class.* from zlrz_vehicle join zlrz_vehicle_class \n on zlrz_vehicle.vc_num = zlrz_vehicle_class.vc_num where zlrz_vehicle.veh_id=%s''', (int(vehicle_id),))\n return list(map(lambda t: VehicleClass(t[1], t[2], t[3], t[0]), rs))[0] if rs is not None else None\n\n\ndef delete_veh_class(vc_num):\n if vc_num == '':\n return\n res = run_query('''select * from zlrz_vehicle where vc_num=%s''', (int(vc_num)))\n if res:\n return 1\n else:\n rs = run_query('''delete from zlrz_vehicle_class where vc_num=%s''', (int(vc_num)))\n return rs\n\n\ndef delete_off_loc(location_id):\n if location_id == '':\n return\n res = run_query('''select * from zlrz_office_location where ol_id=%s''', (int(location_id)))\n if res:\n return 1\n else:\n rs = run_query('''delete from zlrz_office_location where ol_id=%s''', (int(location_id)))\n return rs\n\n\ndef delete_vehicle(veh_id):\n if veh_id == '':\n return\n rs = run_query('''delete from zlrz_vehicle where veh_id=%s''', (int(veh_id)))\n return rs\n\n\ndef delete_customer(cust_id):\n if cust_id == '':\n return\n rs5 = run_query('''delete from zlrz_rental where cust_id=%s''', (int(cust_id)))\n rs4 = run_query('''delete from zlrz_cust_coupon where cust_id=%s''', (int(cust_id)))\n rs2 = run_query('''delete from zlrz_corporate where cust_id=%s''', (int(cust_id)))\n rs3 = run_query('''delete from zlrz_individual where cust_id=%s''', (int(cust_id)))\n rs1 = run_query('''delete from zlrz_customer where cust_id=%s''', (int(cust_id)))\n return rs1\n\ndef delete_cust_coupon(cou_id):\n if cou_id == '':\n return\n rs1 = run_query('''delete from zlrz_cust_coupon where cou_id=%s''', (int(cou_id)))\n rs2 = run_query('''delete from zlrz_coupons where cou_id=%s''', (int(cou_id)))\n return rs1\n\n\ndef delete_corporation(corp_id):\n if corp_id == '':\n return\n res = run_query('''select * from zlrz_corporation where corp_id=%s''', (int(corp_id)))\n if res:\n return 1\n else:\n rs = run_query('''delete from zlrz_corporation where corp_id=%s''', (int(corp_id)))\n return rs\n\ndef update_vehicle_class(class_obj):\n rs = run_query('''update zlrz_vehicle_class set vc_rateperday = %s, vc_feeovermile = %s where vc_name = %s''', (int(class_obj.vc_rateperday), int(class_obj.vc_feeovermile), class_obj.vc_name))\n return rs", "step-ids": [ 25, 27, 37, 40, 41 ] }
[ 25, 27, 37, 40, 41 ]
#!/usr/bin/env python import sys, re, urllib, urllib2, string, time, os from urllib2 import Request, urlopen, URLError, HTTPError from urlparse import urlparse joomla_version="undefined" #used for joomla veersin info provided_url="" #the selected provided url verbose_flag = 0 # If set to 1, prints verbose information default_input_path = "" # The default input file path default_output_path = "" # The default output file path if os.name == "nt": path_slash = "\\" else: path_slash = "/" # Prints usage def print_usage(): """ print_usage() Prints help screen and exits. """ print "" print "" print " JoomFind v0.1" print "" print " Script made by Jasdev Singh" print "" print " This script is made only for educational and offline self-testing " print " purposes. The creator is not responsible or accountable for any " print " damage or loss caused that you perform with this script. " print "" print " Usage example:" print '\tpython joomfind.py -f filepath | -v' print "" print " Put URL(s) to scan in a newline delimited file" print " URL(s) must point to homepage of the CMS " print "" print " Options:" print " -f filename (specify input file)" print " -v, --verbose (show detailed output)" print " --help (displays this help text)" print "" return # Testing if URL is reachable, with error handling def test_url(): """ test_url() Checks whether URL is rechable. Prints relevant infomation. """ global provided_url global verbose_flag # extracting url provided_url = urlparse(provided_url).scheme+"://"+urlparse(provided_url).netloc print provided_url if verbose_flag: print "\t[.] Checking if connection can be established...",# + provided_url try: response = urllib2.urlopen(provided_url) except HTTPError, e: if verbose_flag: print "[!] Failed" return 0 except URLError, e: if verbose_flag: print "[!] Failed" return 0 else: valid_target = 1 if verbose_flag: print "Success" return 1 # Scans for the HTML meta tag information def scan_target_metatag(): """ scan_target_metatag() Scans the meta-tag of the website. The meta-tag has information that can lead to the detection of Joomla. """ target_meta_url=provided_url+"/index.php" if verbose_flag: print "\t[.] Trying to access meta tag information...", #+ target_meta_url try: response = urllib2.urlopen(target_meta_url) html = response.read(2000) #print html # Now extract the interesting information get_metatag = string.find(html, "Joomla! - Open Source Content Management") # If the target is not vulnerable exit if get_metatag == -1: meta_flag=0 if verbose_flag: print "Failed" else: meta_flag=1 if verbose_flag: print "Success" #print "meta flag="+str(meta_flag) return meta_flag except: if verbose_flag: print "Failed" # Tests whether the URL has a '/administrator' login page def scan_admin_url(): """ scan_admin_url() Scans the administrator URL of the website. The administrator URL, if reachable, is a clue that Joomla is being used. """ target_admin_url=provided_url+"/administrator/index.php" if verbose_flag: print "\t[.] Trying to access admin login page...", #+ target_admin_url try: response = urllib2.urlopen(target_admin_url) except HTTPError, e: admin_flag=0 #print "admin flag="+str(admin_flag) if verbose_flag: print "Failed" return admin_flag else: admin_flag=1 #print "admin flag="+str(admin_flag) if verbose_flag: print "Success" return admin_flag # Scans content of 'com_content' component def scan_com_content(): """ scan_com_content() Scans the content.xml file of the default component of the website. The content.xml file, if readable, is a clue that Joomla is being used. """ target_com_content=provided_url+"/administrator/components/com_content/content.xml" if verbose_flag: print "\t[.] Trying to access com_content component...", #+ target_com_content try: response = urllib2.urlopen(target_com_content) html = response.read() get_com = string.find(html, "Joomla") except HTTPError, e: com_component_flag=0 #print "com_component flag="+str(com_component_flag) if verbose_flag: print "Failed" return com_component_flag else: if get_com==-1: com_component_flag=0 if verbose_flag: print "Failed" else: com_component_flag=1 if verbose_flag: print "Success" #print "com_component flag="+str(com_component_flag) return com_component_flag # Scans the robots.txt file def scan_robots_txt(): """ scan_robots_txt() Scans the robots.txt file of website. The robots.txt file, if readable, has clues that Joomla is being used. """ target_robots_txt=provided_url+"/robots.txt" if verbose_flag: print "\t[.] Trying to access robots.txt file...",#+target_robots_txt try: response = urllib2.urlopen(target_robots_txt) html = response.read() get_robots = string.find(html, "Joomla") except HTTPError, e: robots_flag=0 #print "robots flag="+str(robots_flag) if verbose_flag: print "Failed" return robots_flag else: if get_robots==-1: robots_flag=0 if verbose_flag: print "Failed" else: robots_flag=1 if verbose_flag: print "Success" #print "robots flag="+str(robots_flag) return robots_flag # Scans the htaccess.txt file def scan_htaccess(): """ scan_htaccess() Scans the htaccess file of website. The htaccess file, if readable, has clues that Joomla is being used. """ target_htacess=provided_url+"/htaccess.txt" if verbose_flag: print "\t[.] Trying to access htaccess file...",#+target_htacess try: response = urllib2.urlopen(target_htacess) html = response.read() get_htaccess = string.find(html, "Joomla") except HTTPError, e: htaccess_flag=0 #print "htaccess flag="+str(htaccess_flag) if verbose_flag: print "Failed" return htaccess_flag else: if get_htaccess==-1: htaccess_flag=0 if verbose_flag: print "Failed" else: htaccess_flag=1 if verbose_flag: print "Success" #print "htaccess flag="+str(htaccess_flag) return htaccess_flag # Scans the system.css file def scan_system_css(): """ scan_system_css() Scans the system.css file of website. The system.css file, if readable, has clues that Joomla is being used. """ pass # Scans the MooTools.js file def scan_mootools(): """ scan_mootools() Scans the mootools.js file of website. The mootools.js file, if readable, has clues that Joomla is being used. """ target_mootools=provided_url+"/media/system/js/mootools-more.js" if verbose_flag: print "\t[.] Trying to access MooTools file...", #+ target_mootools try: response = urllib2.urlopen(target_mootools) html = response.read(3300) #print html get_mootools = string.find(html, 'MooTools.More={version:"1.4.0.1"') except HTTPError, e: mootools_flag=0 #print "mootools flag="+str(mootools_flag) if verbose_flag: print "Failed" return mootools_flag else: if get_mootools==-1: mootools_flag=0 if verbose_flag: print "Failed" else: mootools_flag=1 if verbose_flag: print "Success" joomla_version="2.x or 3.x" #print "mootools flag="+str(mootools_flag) return mootools_flag # Scans the en-GB.xml file def scan_engb_ini(): """ scan_engb_ini() Scans the en-GB.ini file of website. The en-GB.ini file, if readable, has clues that Joomla is being used. """ target_engb=provided_url+"/language/en-GB/en-GB.xml" if verbose_flag: print "\t[.] Trying to access en-GB file...", #+ target_engb try: response = urllib2.urlopen(target_engb) html = response.read(200) #print html t1 = string.find(html, '<version>') target_engb = html[t1+9:t1+14] except HTTPError, e: engb_flag=0 #print "engb flag="+str(engb_flag) if verbose_flag: print "Failed" return engb_flag else: if t1==-1: engb_flag=0 if verbose_flag: print "Failed" else: engb_flag=1 if verbose_flag: print "Success" global joomla_version joomla_version=target_engb #print "engb flag="+str(engb_flag) return engb_flag # Computes the result of the scans def compute_result(a,b,c,d,e,f,g): """ compute_result() Computes the final result. """ if (a or b or c or d or e or f or g)and ((a+b+c+d+e+f+g)>=3): return 1 else: return 0 # Reads URL's from an input file and processes them def process_from_file(): """ process_from_file() Starts processing the URL's from the input file. """ global default_input_path print "JoomFind v 1.0" print "\n\nTrying to read URL(s) form " + default_input_path + " file...\n" try: if not default_input_path: f = open("urls.txt") else: f=open(default_input_path) cwd=os.getcwd() file_path = cwd + path_slash + f.name # extracting url's to list from file start_urls = [url.strip() for url in f.readlines() if url[0] not in ['#',' ',"\n"]] if not start_urls: print "File is empty. Add some URL(s) first.\n" f.close() return 0 except: print "File not found. Make sure it exists.\n" return 0 #print start_urls num=str(len(start_urls)) print "Found " + num + " URL(s) on " + time.asctime(time.localtime(time.time())) + "\n" of=open(default_output_path,'a+') of.write("\n\n\tScanning " + num + " URL(s) ") of.write("\n\n\tDate\Time : " + time.asctime(time.localtime(time.time())) ) of.write("\n\n\tInput file path : " + default_input_path + "\n\n") of.close() for url in start_urls: global provided_url provided_url=url print "\nWorking on URL " + str(start_urls.index(url)+1) + ": " + provided_url processing() print "\nAll done! Check '" + default_output_path +"' file for results.\n" # Calls other scans and writes results to output file def processing(): """ processing() Calls other helper functions. """ err=test_url() of=open(default_output_path,'a+') if err!=0: metaf=scan_target_metatag() adminf=scan_admin_url() comf=scan_com_content() robotsf=scan_robots_txt() htf=scan_htaccess() moof=scan_mootools() engbf=scan_engb_ini() result=compute_result(metaf,adminf,comf,robotsf,htf,moof,engbf) if result==1: #print "THE TARGET IS USING JOOMLA CMS" #print "Joomla version is " + joomla_version of.write("\nJOOMLA USED (version : " + joomla_version + ") --> " + provided_url + "\n") else: #print "JOOMLA NOT USED" of.write("\nJOMLA NOT USED --> " + provided_url + "\n") else: of.write("\nBAD URL --> " + provided_url + "\n") of.close() return 0 # main method def main(): """ main() Starting point of program execution. """ # Checking if argument was provided if len(sys.argv) <=1: print_usage() sys.exit(1) for arg in sys.argv: # Checking if help was called if arg == "-h" or arg == "--help": print_usage() sys.exit(1) # Checking for verbose mode if arg == "-v" or arg == "--verbose": global verbose_flag verbose_flag=1 # Checking for input file if arg == "-f" or arg == "--file": global default_input_path global default_output_path default_input_path = sys.argv[2] default_output_path=default_input_path[:-4] + "_results.txt" #if arg == "-u" or arg == "--url": # input_url = sys.argv[2] if os.name == "nt": os.system('cls') else: os.system('clear') process_from_file() if __name__=="__main__": main() #EOF
normal
{ "blob_id": "9de2589cfb5bebba789ece8df9a0fcfbedb01173", "index": 2440, "step-1": "#!/usr/bin/env python\r\n\r\nimport sys, re, urllib, urllib2, string, time, os\r\nfrom urllib2 import Request, urlopen, URLError, HTTPError\r\nfrom urlparse import urlparse\r\n\r\njoomla_version=\"undefined\" #used for joomla veersin info\r\n\r\nprovided_url=\"\" #the selected provided url\r\n\r\nverbose_flag = 0 # If set to 1, prints verbose information\r\n\r\ndefault_input_path = \"\" # The default input file path\r\n \r\ndefault_output_path = \"\" # The default output file path\r\n\r\nif os.name == \"nt\":\r\n path_slash = \"\\\\\"\r\nelse:\r\n path_slash = \"/\"\r\n\r\n# Prints usage\r\ndef print_usage():\r\n \"\"\"\r\n\tprint_usage()\r\n\t\r\n\tPrints help screen and exits.\r\n\r\n \"\"\"\r\n print \"\"\r\n print \"\"\r\n print \" JoomFind v0.1\"\r\n print \"\"\r\n print \" Script made by Jasdev Singh\"\r\n print \"\"\r\n print \" This script is made only for educational and offline self-testing \"\r\n print \" purposes. The creator is not responsible or accountable for any \"\r\n print \" damage or loss caused that you perform with this script. \"\r\n print \"\"\r\n print \" Usage example:\"\r\n print '\\tpython joomfind.py -f filepath | -v'\r\n print \"\"\r\n print \" Put URL(s) to scan in a newline delimited file\"\r\n print \" URL(s) must point to homepage of the CMS \"\r\n print \"\"\r\n print \" Options:\"\r\n print \" -f filename (specify input file)\"\r\n print \" -v, --verbose (show detailed output)\"\r\n print \" --help (displays this help text)\"\r\n print \"\"\r\n return\r\n\r\n\r\n# Testing if URL is reachable, with error handling\r\ndef test_url():\r\n \"\"\"\r\n\ttest_url()\r\n\t\r\n\tChecks whether URL is rechable. Prints relevant infomation.\r\n\r\n \"\"\"\r\n global provided_url\r\n global verbose_flag\r\n # extracting url\r\n provided_url = urlparse(provided_url).scheme+\"://\"+urlparse(provided_url).netloc\r\n print provided_url \r\n if verbose_flag: print \"\\t[.] Checking if connection can be established...\",# + provided_url\r\n try:\r\n response = urllib2.urlopen(provided_url)\r\n \r\n except HTTPError, e:\r\n if verbose_flag: print \"[!] Failed\"\r\n return 0\r\n except URLError, e:\r\n if verbose_flag: print \"[!] Failed\"\r\n return 0\r\n else:\r\n valid_target = 1\r\n if verbose_flag: print \"Success\"\r\n return 1\r\n\r\n# Scans for the HTML meta tag information\r\ndef scan_target_metatag():\r\n \"\"\"\r\n\tscan_target_metatag()\r\n\t\r\n\tScans the meta-tag of the website. \r\n\r\n\tThe meta-tag has information that can lead to the detection of Joomla.\r\n\r\n \"\"\"\r\n target_meta_url=provided_url+\"/index.php\"\r\n if verbose_flag: print \"\\t[.] Trying to access meta tag information...\", #+ target_meta_url\r\n try:\r\n \tresponse = urllib2.urlopen(target_meta_url)\r\n \thtml = response.read(2000)\r\n \t#print html\r\n \t# Now extract the interesting information\r\n \tget_metatag = string.find(html, \"Joomla! - Open Source Content Management\")\r\n\r\n \t# If the target is not vulnerable exit\r\n \tif get_metatag == -1:\r\n meta_flag=0\r\n if verbose_flag: print \"Failed\"\r\n else:\r\n meta_flag=1\r\n if verbose_flag: print \"Success\"\r\n #print \"meta flag=\"+str(meta_flag)\r\n return meta_flag\r\n\r\n except:\r\n\tif verbose_flag: print \"Failed\"\r\n\r\n# Tests whether the URL has a '/administrator' login page\r\ndef scan_admin_url():\r\n \"\"\"\r\n\tscan_admin_url()\r\n\t\r\n\tScans the administrator URL of the website. \r\n\r\n\tThe administrator URL, if reachable, is a clue that Joomla is being used.\r\n\r\n \"\"\"\r\n target_admin_url=provided_url+\"/administrator/index.php\"\r\n if verbose_flag: print \"\\t[.] Trying to access admin login page...\", #+ target_admin_url\r\n try:\r\n response = urllib2.urlopen(target_admin_url)\r\n except HTTPError, e:\r\n admin_flag=0\r\n #print \"admin flag=\"+str(admin_flag)\r\n if verbose_flag: print \"Failed\"\r\n return admin_flag\r\n else:\r\n admin_flag=1\r\n #print \"admin flag=\"+str(admin_flag)\r\n if verbose_flag: print \"Success\"\r\n return admin_flag\r\n\r\n# Scans content of 'com_content' component\r\ndef scan_com_content():\r\n \"\"\"\r\n\tscan_com_content()\r\n\t\r\n\tScans the content.xml file of the default component of the website. \r\n\r\n\tThe content.xml file, if readable, is a clue that Joomla is being used.\r\n\r\n \"\"\"\r\n target_com_content=provided_url+\"/administrator/components/com_content/content.xml\"\r\n if verbose_flag: print \"\\t[.] Trying to access com_content component...\", #+ target_com_content\r\n try:\r\n response = urllib2.urlopen(target_com_content)\r\n html = response.read()\r\n get_com = string.find(html, \"Joomla\")\r\n except HTTPError, e:\r\n com_component_flag=0\r\n #print \"com_component flag=\"+str(com_component_flag)\r\n if verbose_flag: print \"Failed\"\r\n return com_component_flag\r\n else:\r\n if get_com==-1:\r\n com_component_flag=0\r\n if verbose_flag: print \"Failed\"\r\n else:\r\n com_component_flag=1\r\n if verbose_flag: print \"Success\"\r\n #print \"com_component flag=\"+str(com_component_flag)\r\n return com_component_flag\r\n\r\n# Scans the robots.txt file\r\ndef scan_robots_txt():\r\n \"\"\"\r\n\tscan_robots_txt()\r\n\t\r\n\tScans the robots.txt file of website. \r\n\r\n\tThe robots.txt file, if readable, has clues that Joomla is being used.\r\n\r\n \"\"\"\r\n target_robots_txt=provided_url+\"/robots.txt\"\r\n if verbose_flag: print \"\\t[.] Trying to access robots.txt file...\",#+target_robots_txt\r\n try:\r\n response = urllib2.urlopen(target_robots_txt)\r\n html = response.read()\r\n get_robots = string.find(html, \"Joomla\")\r\n except HTTPError, e:\r\n robots_flag=0\r\n #print \"robots flag=\"+str(robots_flag)\r\n if verbose_flag: print \"Failed\"\r\n return robots_flag\r\n else:\r\n if get_robots==-1:\r\n robots_flag=0\r\n if verbose_flag: print \"Failed\"\r\n else:\r\n robots_flag=1\r\n if verbose_flag: print \"Success\"\r\n #print \"robots flag=\"+str(robots_flag)\r\n return robots_flag\r\n\r\n# Scans the htaccess.txt file\r\ndef scan_htaccess():\r\n \"\"\"\r\n\tscan_htaccess()\r\n\t\r\n\tScans the htaccess file of website. \r\n\r\n\tThe htaccess file, if readable, has clues that Joomla is being used.\r\n\r\n \"\"\"\r\n target_htacess=provided_url+\"/htaccess.txt\"\r\n if verbose_flag: print \"\\t[.] Trying to access htaccess file...\",#+target_htacess\r\n try:\r\n response = urllib2.urlopen(target_htacess)\r\n html = response.read()\r\n get_htaccess = string.find(html, \"Joomla\")\r\n except HTTPError, e:\r\n htaccess_flag=0\r\n #print \"htaccess flag=\"+str(htaccess_flag)\r\n if verbose_flag: print \"Failed\"\r\n return htaccess_flag\r\n else:\r\n if get_htaccess==-1:\r\n htaccess_flag=0\r\n if verbose_flag: print \"Failed\"\r\n else:\r\n htaccess_flag=1\r\n if verbose_flag: print \"Success\"\r\n #print \"htaccess flag=\"+str(htaccess_flag)\r\n return htaccess_flag\r\n\r\n# Scans the system.css file \r\ndef scan_system_css():\r\n \"\"\"\r\n\tscan_system_css()\r\n\t\r\n\tScans the system.css file of website. \r\n\r\n\tThe system.css file, if readable, has clues that Joomla is being used.\r\n\r\n \"\"\"\r\n pass\r\n\r\n# Scans the MooTools.js file\r\ndef scan_mootools():\r\n \"\"\"\r\n\tscan_mootools()\r\n\t\r\n\tScans the mootools.js file of website. \r\n\r\n\tThe mootools.js file, if readable, has clues that Joomla is being used.\r\n\r\n \"\"\"\r\n target_mootools=provided_url+\"/media/system/js/mootools-more.js\"\r\n if verbose_flag: print \"\\t[.] Trying to access MooTools file...\", #+ target_mootools\r\n try:\r\n response = urllib2.urlopen(target_mootools)\r\n html = response.read(3300)\r\n #print html\r\n get_mootools = string.find(html, 'MooTools.More={version:\"1.4.0.1\"')\r\n except HTTPError, e:\r\n mootools_flag=0\r\n #print \"mootools flag=\"+str(mootools_flag)\r\n if verbose_flag: print \"Failed\"\r\n return mootools_flag\r\n else:\r\n if get_mootools==-1:\r\n mootools_flag=0\r\n if verbose_flag: print \"Failed\"\r\n else:\r\n mootools_flag=1\r\n if verbose_flag: print \"Success\"\r\n joomla_version=\"2.x or 3.x\"\r\n #print \"mootools flag=\"+str(mootools_flag)\r\n return mootools_flag \r\n\r\n# Scans the en-GB.xml file\r\ndef scan_engb_ini():\r\n \"\"\"\r\n\tscan_engb_ini()\r\n\t\r\n\tScans the en-GB.ini file of website. \r\n\r\n\tThe en-GB.ini file, if readable, has clues that Joomla is being used.\r\n\r\n \"\"\"\r\n target_engb=provided_url+\"/language/en-GB/en-GB.xml\"\r\n if verbose_flag: print \"\\t[.] Trying to access en-GB file...\", #+ target_engb\r\n try:\r\n response = urllib2.urlopen(target_engb)\r\n html = response.read(200)\r\n #print html\r\n t1 = string.find(html, '<version>')\r\n target_engb = html[t1+9:t1+14]\r\n \r\n except HTTPError, e:\r\n engb_flag=0\r\n #print \"engb flag=\"+str(engb_flag)\r\n if verbose_flag: print \"Failed\"\r\n return engb_flag\r\n else:\r\n if t1==-1:\r\n engb_flag=0\r\n if verbose_flag: print \"Failed\"\r\n else:\r\n engb_flag=1\r\n if verbose_flag: print \"Success\"\r\n global joomla_version\r\n joomla_version=target_engb\r\n #print \"engb flag=\"+str(engb_flag)\r\n return engb_flag \r\n\r\n# Computes the result of the scans\r\ndef compute_result(a,b,c,d,e,f,g):\r\n \"\"\"\r\n\tcompute_result()\r\n\t\r\n\tComputes the final result. \r\n\r\n \"\"\"\r\n if (a or b or c or d or e or f or g)and ((a+b+c+d+e+f+g)>=3):\r\n return 1\r\n else:\r\n return 0\r\n\r\n# Reads URL's from an input file and processes them\r\ndef process_from_file():\r\n \"\"\"\r\n\tprocess_from_file()\r\n\t\r\n\tStarts processing the URL's from the input file. \r\n\r\n \"\"\"\r\n global default_input_path\r\n print \"JoomFind v 1.0\"\r\n print \"\\n\\nTrying to read URL(s) form \" + default_input_path + \" file...\\n\"\r\n try:\r\n if not default_input_path:\r\n f = open(\"urls.txt\")\r\n else:\r\n f=open(default_input_path)\r\n cwd=os.getcwd()\r\n file_path = cwd + path_slash + f.name\r\n\t# extracting url's to list from file\r\n start_urls = [url.strip() for url in f.readlines() if url[0] not in ['#',' ',\"\\n\"]]\r\n if not start_urls:\r\n print \"File is empty. Add some URL(s) first.\\n\"\r\n f.close()\r\n return 0\r\n except:\r\n print \"File not found. Make sure it exists.\\n\"\r\n return 0\r\n #print start_urls\r\n \r\n num=str(len(start_urls))\r\n print \"Found \" + num + \" URL(s) on \" + time.asctime(time.localtime(time.time())) + \"\\n\"\r\n \r\n of=open(default_output_path,'a+')\r\n of.write(\"\\n\\n\\tScanning \" + num + \" URL(s) \")\r\n of.write(\"\\n\\n\\tDate\\Time : \" + time.asctime(time.localtime(time.time())) )\r\n of.write(\"\\n\\n\\tInput file path : \" + default_input_path + \"\\n\\n\")\r\n of.close()\r\n \r\n for url in start_urls:\r\n global provided_url\r\n provided_url=url\r\n print \"\\nWorking on URL \" + str(start_urls.index(url)+1) + \": \" + provided_url\r\n processing()\r\n print \"\\nAll done! Check '\" + default_output_path +\"' file for results.\\n\" \r\n\r\n\r\n# Calls other scans and writes results to output file\r\ndef processing():\r\n \"\"\"\r\n\tprocessing()\r\n\t\r\n\tCalls other helper functions. \r\n\r\n \"\"\"\r\n err=test_url()\r\n of=open(default_output_path,'a+')\r\n if err!=0: \r\n metaf=scan_target_metatag()\r\n adminf=scan_admin_url()\r\n comf=scan_com_content()\r\n robotsf=scan_robots_txt()\r\n htf=scan_htaccess()\r\n moof=scan_mootools()\r\n engbf=scan_engb_ini()\r\n result=compute_result(metaf,adminf,comf,robotsf,htf,moof,engbf)\r\n if result==1:\r\n #print \"THE TARGET IS USING JOOMLA CMS\"\r\n #print \"Joomla version is \" + joomla_version\r\n of.write(\"\\nJOOMLA USED (version : \" + joomla_version + \") --> \" + provided_url + \"\\n\")\r\n else:\r\n #print \"JOOMLA NOT USED\"\r\n of.write(\"\\nJOMLA NOT USED --> \" + provided_url + \"\\n\")\r\n else:\r\n of.write(\"\\nBAD URL --> \" + provided_url + \"\\n\")\r\n of.close()\r\n return 0\r\n\r\n# main method\r\ndef main():\r\n \"\"\"\r\n\tmain()\r\n\t\r\n\tStarting point of program execution. \r\n\r\n \"\"\"\r\n# Checking if argument was provided\r\n if len(sys.argv) <=1:\r\n print_usage()\r\n sys.exit(1)\r\n \r\n for arg in sys.argv:\r\n # Checking if help was called\r\n if arg == \"-h\" or arg == \"--help\":\r\n print_usage()\r\n sys.exit(1)\r\n \r\n # Checking for verbose mode \r\n if arg == \"-v\" or arg == \"--verbose\":\r\n global verbose_flag\r\n verbose_flag=1\r\n\r\n # Checking for input file\r\n if arg == \"-f\" or arg == \"--file\":\r\n global default_input_path\r\n global default_output_path\r\n default_input_path = sys.argv[2]\r\n default_output_path=default_input_path[:-4] + \"_results.txt\"\r\n\r\n #if arg == \"-u\" or arg == \"--url\":\r\n # input_url = sys.argv[2]\r\n\t \r\n if os.name == \"nt\":\r\n os.system('cls')\r\n else:\r\n os.system('clear')\r\n \r\n process_from_file()\r\n\r\n\r\nif __name__==\"__main__\":\r\n main()\r\n \r\n#EOF\r\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
x, y = [float(x) for x in raw_input().split(" ")] print(x*y)
normal
{ "blob_id": "1ed7fb0dd5f0fa5e60c855eceaaf3259092918ef", "index": 1240, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint(x * y)\n", "step-3": "x, y = [float(x) for x in raw_input().split(' ')]\nprint(x * y)\n", "step-4": "x, y = [float(x) for x in raw_input().split(\" \")]\nprint(x*y)", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
# -*- coding: utf-8 -*- import datetime from urllib import parse import scrapy from scrapy import Request from BrexitNews.items import BrexitNewsItem def check_url(url): if url is not None: url = url.strip() if url != '' and url != 'None': return True return False class TheguardianSpider(scrapy.Spider): name = 'ft' allowed_domains = ['www.ft.com'] start_urls = ['https://www.ft.com/search?q=brexit&dateTo=2016-06-24&dateFrom=2016-06-16&sort=date'] cookies = { } def article(self, response): brexit_news = BrexitNewsItem() title = response.xpath('string(//h1[@data-trackable="header"])').extract_first().strip() brexit_news['title'] = title text = '' for sel in response.xpath('//div[contains(@class,"article__content-body")]//p'): line = sel.xpath('string(.)').extract_first() if line is not None: text += line + '\n\n' brexit_news['text'] = text brexit_news['url'] = response.url brexit_news['media'] = 'ft' brexit_news['date'] = response.xpath('//time[contains(@class,"article-info__timestamp")]/@datetime').extract_first()[:10] # print(brexit_news) yield brexit_news def parse(self, response): for sel in response.xpath('//li[@class="search-results__list-item"]//a[@data-trackable="heading-link"]'): article_url = parse.urljoin(response.url, sel.xpath('@href').extract_first()) if check_url(article_url) and 'video' not in article_url: yield Request(article_url, self.article, cookies=self.cookies) # handle every page next_page_url = parse.urljoin(response.url, response.xpath('//a[@data-trackable="next-page"]/@href').extract_first()) if check_url(next_page_url): yield Request(next_page_url, self.parse, cookies=self.cookies)
normal
{ "blob_id": "8180dac5d33334d7f16ab6bef41f1fe800879ca7", "index": 2255, "step-1": "<mask token>\n\n\nclass TheguardianSpider(scrapy.Spider):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def article(self, response):\n brexit_news = BrexitNewsItem()\n title = response.xpath('string(//h1[@data-trackable=\"header\"])'\n ).extract_first().strip()\n brexit_news['title'] = title\n text = ''\n for sel in response.xpath(\n '//div[contains(@class,\"article__content-body\")]//p'):\n line = sel.xpath('string(.)').extract_first()\n if line is not None:\n text += line + '\\n\\n'\n brexit_news['text'] = text\n brexit_news['url'] = response.url\n brexit_news['media'] = 'ft'\n brexit_news['date'] = response.xpath(\n '//time[contains(@class,\"article-info__timestamp\")]/@datetime'\n ).extract_first()[:10]\n yield brexit_news\n\n def parse(self, response):\n for sel in response.xpath(\n '//li[@class=\"search-results__list-item\"]//a[@data-trackable=\"heading-link\"]'\n ):\n article_url = parse.urljoin(response.url, sel.xpath('@href').\n extract_first())\n if check_url(article_url) and 'video' not in article_url:\n yield Request(article_url, self.article, cookies=self.cookies)\n next_page_url = parse.urljoin(response.url, response.xpath(\n '//a[@data-trackable=\"next-page\"]/@href').extract_first())\n if check_url(next_page_url):\n yield Request(next_page_url, self.parse, cookies=self.cookies)\n", "step-2": "<mask token>\n\n\nclass TheguardianSpider(scrapy.Spider):\n name = 'ft'\n allowed_domains = ['www.ft.com']\n start_urls = [\n 'https://www.ft.com/search?q=brexit&dateTo=2016-06-24&dateFrom=2016-06-16&sort=date'\n ]\n cookies = {}\n\n def article(self, response):\n brexit_news = BrexitNewsItem()\n title = response.xpath('string(//h1[@data-trackable=\"header\"])'\n ).extract_first().strip()\n brexit_news['title'] = title\n text = ''\n for sel in response.xpath(\n '//div[contains(@class,\"article__content-body\")]//p'):\n line = sel.xpath('string(.)').extract_first()\n if line is not None:\n text += line + '\\n\\n'\n brexit_news['text'] = text\n brexit_news['url'] = response.url\n brexit_news['media'] = 'ft'\n brexit_news['date'] = response.xpath(\n '//time[contains(@class,\"article-info__timestamp\")]/@datetime'\n ).extract_first()[:10]\n yield brexit_news\n\n def parse(self, response):\n for sel in response.xpath(\n '//li[@class=\"search-results__list-item\"]//a[@data-trackable=\"heading-link\"]'\n ):\n article_url = parse.urljoin(response.url, sel.xpath('@href').\n extract_first())\n if check_url(article_url) and 'video' not in article_url:\n yield Request(article_url, self.article, cookies=self.cookies)\n next_page_url = parse.urljoin(response.url, response.xpath(\n '//a[@data-trackable=\"next-page\"]/@href').extract_first())\n if check_url(next_page_url):\n yield Request(next_page_url, self.parse, cookies=self.cookies)\n", "step-3": "<mask token>\n\n\ndef check_url(url):\n if url is not None:\n url = url.strip()\n if url != '' and url != 'None':\n return True\n return False\n\n\nclass TheguardianSpider(scrapy.Spider):\n name = 'ft'\n allowed_domains = ['www.ft.com']\n start_urls = [\n 'https://www.ft.com/search?q=brexit&dateTo=2016-06-24&dateFrom=2016-06-16&sort=date'\n ]\n cookies = {}\n\n def article(self, response):\n brexit_news = BrexitNewsItem()\n title = response.xpath('string(//h1[@data-trackable=\"header\"])'\n ).extract_first().strip()\n brexit_news['title'] = title\n text = ''\n for sel in response.xpath(\n '//div[contains(@class,\"article__content-body\")]//p'):\n line = sel.xpath('string(.)').extract_first()\n if line is not None:\n text += line + '\\n\\n'\n brexit_news['text'] = text\n brexit_news['url'] = response.url\n brexit_news['media'] = 'ft'\n brexit_news['date'] = response.xpath(\n '//time[contains(@class,\"article-info__timestamp\")]/@datetime'\n ).extract_first()[:10]\n yield brexit_news\n\n def parse(self, response):\n for sel in response.xpath(\n '//li[@class=\"search-results__list-item\"]//a[@data-trackable=\"heading-link\"]'\n ):\n article_url = parse.urljoin(response.url, sel.xpath('@href').\n extract_first())\n if check_url(article_url) and 'video' not in article_url:\n yield Request(article_url, self.article, cookies=self.cookies)\n next_page_url = parse.urljoin(response.url, response.xpath(\n '//a[@data-trackable=\"next-page\"]/@href').extract_first())\n if check_url(next_page_url):\n yield Request(next_page_url, self.parse, cookies=self.cookies)\n", "step-4": "import datetime\nfrom urllib import parse\nimport scrapy\nfrom scrapy import Request\nfrom BrexitNews.items import BrexitNewsItem\n\n\ndef check_url(url):\n if url is not None:\n url = url.strip()\n if url != '' and url != 'None':\n return True\n return False\n\n\nclass TheguardianSpider(scrapy.Spider):\n name = 'ft'\n allowed_domains = ['www.ft.com']\n start_urls = [\n 'https://www.ft.com/search?q=brexit&dateTo=2016-06-24&dateFrom=2016-06-16&sort=date'\n ]\n cookies = {}\n\n def article(self, response):\n brexit_news = BrexitNewsItem()\n title = response.xpath('string(//h1[@data-trackable=\"header\"])'\n ).extract_first().strip()\n brexit_news['title'] = title\n text = ''\n for sel in response.xpath(\n '//div[contains(@class,\"article__content-body\")]//p'):\n line = sel.xpath('string(.)').extract_first()\n if line is not None:\n text += line + '\\n\\n'\n brexit_news['text'] = text\n brexit_news['url'] = response.url\n brexit_news['media'] = 'ft'\n brexit_news['date'] = response.xpath(\n '//time[contains(@class,\"article-info__timestamp\")]/@datetime'\n ).extract_first()[:10]\n yield brexit_news\n\n def parse(self, response):\n for sel in response.xpath(\n '//li[@class=\"search-results__list-item\"]//a[@data-trackable=\"heading-link\"]'\n ):\n article_url = parse.urljoin(response.url, sel.xpath('@href').\n extract_first())\n if check_url(article_url) and 'video' not in article_url:\n yield Request(article_url, self.article, cookies=self.cookies)\n next_page_url = parse.urljoin(response.url, response.xpath(\n '//a[@data-trackable=\"next-page\"]/@href').extract_first())\n if check_url(next_page_url):\n yield Request(next_page_url, self.parse, cookies=self.cookies)\n", "step-5": "# -*- coding: utf-8 -*-\nimport datetime\nfrom urllib import parse\n\nimport scrapy\nfrom scrapy import Request\n\nfrom BrexitNews.items import BrexitNewsItem\n\n\ndef check_url(url):\n if url is not None:\n url = url.strip()\n if url != '' and url != 'None':\n return True\n return False\n\n\nclass TheguardianSpider(scrapy.Spider):\n\n name = 'ft'\n allowed_domains = ['www.ft.com']\n start_urls = ['https://www.ft.com/search?q=brexit&dateTo=2016-06-24&dateFrom=2016-06-16&sort=date']\n cookies = {\n\n }\n\n\n def article(self, response):\n brexit_news = BrexitNewsItem()\n title = response.xpath('string(//h1[@data-trackable=\"header\"])').extract_first().strip()\n brexit_news['title'] = title\n text = ''\n for sel in response.xpath('//div[contains(@class,\"article__content-body\")]//p'):\n line = sel.xpath('string(.)').extract_first()\n if line is not None:\n text += line + '\\n\\n'\n brexit_news['text'] = text\n brexit_news['url'] = response.url\n brexit_news['media'] = 'ft'\n brexit_news['date'] = response.xpath('//time[contains(@class,\"article-info__timestamp\")]/@datetime').extract_first()[:10]\n # print(brexit_news)\n yield brexit_news\n\n\n def parse(self, response):\n\n for sel in response.xpath('//li[@class=\"search-results__list-item\"]//a[@data-trackable=\"heading-link\"]'):\n article_url = parse.urljoin(response.url, sel.xpath('@href').extract_first())\n if check_url(article_url) and 'video' not in article_url:\n yield Request(article_url, self.article, cookies=self.cookies)\n\n # handle every page\n next_page_url = parse.urljoin(response.url, response.xpath('//a[@data-trackable=\"next-page\"]/@href').extract_first())\n if check_url(next_page_url):\n yield Request(next_page_url, self.parse, cookies=self.cookies)\n", "step-ids": [ 3, 4, 5, 6, 7 ] }
[ 3, 4, 5, 6, 7 ]
# Register all decoders import ludwig.schema.decoders.base import ludwig.schema.decoders.sequence_decoders # noqa
normal
{ "blob_id": "53509d826b82211bac02ea5f545802007b06781c", "index": 1630, "step-1": "<mask token>\n", "step-2": "import ludwig.schema.decoders.base\nimport ludwig.schema.decoders.sequence_decoders\n", "step-3": "# Register all decoders\nimport ludwig.schema.decoders.base\nimport ludwig.schema.decoders.sequence_decoders # noqa\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
from logupload import * log = LogUpload() log.uploadLogs(4)
normal
{ "blob_id": "421837698b7fc188c84a3221271f11a40d1625d9", "index": 7280, "step-1": "<mask token>\n", "step-2": "<mask token>\nlog.uploadLogs(4)\n", "step-3": "<mask token>\nlog = LogUpload()\nlog.uploadLogs(4)\n", "step-4": "from logupload import *\nlog = LogUpload()\nlog.uploadLogs(4)\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
from nintendo.nex import backend, authentication, friends, matchmaking, common from nintendo.account import AccountAPI from nintendo.games import MK8, Friends import struct import logging logging.basicConfig(level=logging.INFO) #Device id can be retrieved with a call to MCP_GetDeviceId on the Wii U #Serial number can be found on the back of the Wii U DEVICE_ID = 12345678 SERIAL_NUMBER = "..." SYSTEM_VERSION = 0x220 REGION = 4 #EUR COUNTRY = "NL" USERNAME = "..." #Nintendo network id PASSWORD = "..." #Nintendo network password #This function logs in on a game server def backend_login(title, use_auth_info, use_login_data, settings=None): api.set_title(title.TITLE_ID_EUR, title.LATEST_VERSION) nex_token = api.get_nex_token(title.GAME_SERVER_ID) auth_info = None login_data = None if use_auth_info: auth_info = authentication.AuthenticationInfo() auth_info.token = nex_token.token auth_info.server_version = title.SERVER_VERSION if use_login_data: login_data = authentication.NintendoLoginData() login_data.token = nex_token.token client = backend.BackEndClient(settings) clietn.configure(title.ACCESS_KEY, title.NEX_VERSION) client.connect(nex_token.host, nex_token.port) client.login( nex_token.username, nex_token.password, auth_info, login_data ) return client api = AccountAPI() api.set_device(DEVICE_ID, SERIAL_NUMBER, SYSTEM_VERSION, REGION, COUNTRY) api.login(USERNAME, PASSWORD) #Connect to both the Mario Kart 8 server and the Wii U friends server friends_backend = backend_login( Friends, False, True, "friends.cfg" ) game_backend = backend_login(MK8, True, False) pid = game_backend.get_pid() friends_client = friends.FriendsClient(friends_backend.secure_client) matchmaker = matchmaking.MatchmakeExtensionClient(game_backend.secure_client) #Create a matchmake session matchmake_session = matchmaking.MatchmakeSession() matchmake_session.player_min = 2 matchmake_session.player_max = 12 matchmake_session.participation_policy = 98 matchmake_session.game_mode = 3 matchmake_session.attribs[4] = 0x403 #DLCs enabled matchmake_session.matchmake_system = matchmaking.MatchmakeSystem.FRIENDS session_id = matchmaker.create_matchmake_session( matchmake_session, "", 1 ).gid #Tell friends we're playing MK8 and have created a room application_data = b"\0\0\x20\x03\0\0\0\0\0\0\0\0\x18" + struct.pack("<I", pid) + b"\0\0\0" presence = friends.NintendoPresenceV2() presence.flags = 0x1EE presence.is_online = True presence.game_key.title_id = MK8.TITLE_ID_EUR presence.game_key.title_version = MK8.LATEST_VERSION presence.message = "I'm a Python client" presence.unk2 = 2 presence.unk3 = 2 presence.game_server_id = MK8.GAME_SERVER_ID presence.unk4 = 3 presence.pid = pid presence.gathering_id = session_id presence.application_data = application_data friends_client.update_presence(presence) input("Press enter to disconnect and exit\n") #Tell friends we've gone offline presence = friends.NintendoPresenceV2() friends_client.update_presence(presence) #Disconnect from servers game_backend.close() friends_backend.close()
normal
{ "blob_id": "43315abf9e096cdca89ed7f4de976d2706ff9c20", "index": 9234, "step-1": "<mask token>\n\n\ndef backend_login(title, use_auth_info, use_login_data, settings=None):\n api.set_title(title.TITLE_ID_EUR, title.LATEST_VERSION)\n nex_token = api.get_nex_token(title.GAME_SERVER_ID)\n auth_info = None\n login_data = None\n if use_auth_info:\n auth_info = authentication.AuthenticationInfo()\n auth_info.token = nex_token.token\n auth_info.server_version = title.SERVER_VERSION\n if use_login_data:\n login_data = authentication.NintendoLoginData()\n login_data.token = nex_token.token\n client = backend.BackEndClient(settings)\n clietn.configure(title.ACCESS_KEY, title.NEX_VERSION)\n client.connect(nex_token.host, nex_token.port)\n client.login(nex_token.username, nex_token.password, auth_info, login_data)\n return client\n\n\n<mask token>\n", "step-2": "<mask token>\nlogging.basicConfig(level=logging.INFO)\n<mask token>\n\n\ndef backend_login(title, use_auth_info, use_login_data, settings=None):\n api.set_title(title.TITLE_ID_EUR, title.LATEST_VERSION)\n nex_token = api.get_nex_token(title.GAME_SERVER_ID)\n auth_info = None\n login_data = None\n if use_auth_info:\n auth_info = authentication.AuthenticationInfo()\n auth_info.token = nex_token.token\n auth_info.server_version = title.SERVER_VERSION\n if use_login_data:\n login_data = authentication.NintendoLoginData()\n login_data.token = nex_token.token\n client = backend.BackEndClient(settings)\n clietn.configure(title.ACCESS_KEY, title.NEX_VERSION)\n client.connect(nex_token.host, nex_token.port)\n client.login(nex_token.username, nex_token.password, auth_info, login_data)\n return client\n\n\n<mask token>\napi.set_device(DEVICE_ID, SERIAL_NUMBER, SYSTEM_VERSION, REGION, COUNTRY)\napi.login(USERNAME, PASSWORD)\n<mask token>\nfriends_client.update_presence(presence)\ninput('Press enter to disconnect and exit\\n')\n<mask token>\nfriends_client.update_presence(presence)\ngame_backend.close()\nfriends_backend.close()\n", "step-3": "<mask token>\nlogging.basicConfig(level=logging.INFO)\nDEVICE_ID = 12345678\nSERIAL_NUMBER = '...'\nSYSTEM_VERSION = 544\nREGION = 4\nCOUNTRY = 'NL'\nUSERNAME = '...'\nPASSWORD = '...'\n\n\ndef backend_login(title, use_auth_info, use_login_data, settings=None):\n api.set_title(title.TITLE_ID_EUR, title.LATEST_VERSION)\n nex_token = api.get_nex_token(title.GAME_SERVER_ID)\n auth_info = None\n login_data = None\n if use_auth_info:\n auth_info = authentication.AuthenticationInfo()\n auth_info.token = nex_token.token\n auth_info.server_version = title.SERVER_VERSION\n if use_login_data:\n login_data = authentication.NintendoLoginData()\n login_data.token = nex_token.token\n client = backend.BackEndClient(settings)\n clietn.configure(title.ACCESS_KEY, title.NEX_VERSION)\n client.connect(nex_token.host, nex_token.port)\n client.login(nex_token.username, nex_token.password, auth_info, login_data)\n return client\n\n\napi = AccountAPI()\napi.set_device(DEVICE_ID, SERIAL_NUMBER, SYSTEM_VERSION, REGION, COUNTRY)\napi.login(USERNAME, PASSWORD)\nfriends_backend = backend_login(Friends, False, True, 'friends.cfg')\ngame_backend = backend_login(MK8, True, False)\npid = game_backend.get_pid()\nfriends_client = friends.FriendsClient(friends_backend.secure_client)\nmatchmaker = matchmaking.MatchmakeExtensionClient(game_backend.secure_client)\nmatchmake_session = matchmaking.MatchmakeSession()\nmatchmake_session.player_min = 2\nmatchmake_session.player_max = 12\nmatchmake_session.participation_policy = 98\nmatchmake_session.game_mode = 3\nmatchmake_session.attribs[4] = 1027\nmatchmake_session.matchmake_system = matchmaking.MatchmakeSystem.FRIENDS\nsession_id = matchmaker.create_matchmake_session(matchmake_session, '', 1).gid\napplication_data = (b'\\x00\\x00 \\x03\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x18' +\n struct.pack('<I', pid) + b'\\x00\\x00\\x00')\npresence = friends.NintendoPresenceV2()\npresence.flags = 494\npresence.is_online = True\npresence.game_key.title_id = MK8.TITLE_ID_EUR\npresence.game_key.title_version = MK8.LATEST_VERSION\npresence.message = \"I'm a Python client\"\npresence.unk2 = 2\npresence.unk3 = 2\npresence.game_server_id = MK8.GAME_SERVER_ID\npresence.unk4 = 3\npresence.pid = pid\npresence.gathering_id = session_id\npresence.application_data = application_data\nfriends_client.update_presence(presence)\ninput('Press enter to disconnect and exit\\n')\npresence = friends.NintendoPresenceV2()\nfriends_client.update_presence(presence)\ngame_backend.close()\nfriends_backend.close()\n", "step-4": "from nintendo.nex import backend, authentication, friends, matchmaking, common\nfrom nintendo.account import AccountAPI\nfrom nintendo.games import MK8, Friends\nimport struct\nimport logging\nlogging.basicConfig(level=logging.INFO)\nDEVICE_ID = 12345678\nSERIAL_NUMBER = '...'\nSYSTEM_VERSION = 544\nREGION = 4\nCOUNTRY = 'NL'\nUSERNAME = '...'\nPASSWORD = '...'\n\n\ndef backend_login(title, use_auth_info, use_login_data, settings=None):\n api.set_title(title.TITLE_ID_EUR, title.LATEST_VERSION)\n nex_token = api.get_nex_token(title.GAME_SERVER_ID)\n auth_info = None\n login_data = None\n if use_auth_info:\n auth_info = authentication.AuthenticationInfo()\n auth_info.token = nex_token.token\n auth_info.server_version = title.SERVER_VERSION\n if use_login_data:\n login_data = authentication.NintendoLoginData()\n login_data.token = nex_token.token\n client = backend.BackEndClient(settings)\n clietn.configure(title.ACCESS_KEY, title.NEX_VERSION)\n client.connect(nex_token.host, nex_token.port)\n client.login(nex_token.username, nex_token.password, auth_info, login_data)\n return client\n\n\napi = AccountAPI()\napi.set_device(DEVICE_ID, SERIAL_NUMBER, SYSTEM_VERSION, REGION, COUNTRY)\napi.login(USERNAME, PASSWORD)\nfriends_backend = backend_login(Friends, False, True, 'friends.cfg')\ngame_backend = backend_login(MK8, True, False)\npid = game_backend.get_pid()\nfriends_client = friends.FriendsClient(friends_backend.secure_client)\nmatchmaker = matchmaking.MatchmakeExtensionClient(game_backend.secure_client)\nmatchmake_session = matchmaking.MatchmakeSession()\nmatchmake_session.player_min = 2\nmatchmake_session.player_max = 12\nmatchmake_session.participation_policy = 98\nmatchmake_session.game_mode = 3\nmatchmake_session.attribs[4] = 1027\nmatchmake_session.matchmake_system = matchmaking.MatchmakeSystem.FRIENDS\nsession_id = matchmaker.create_matchmake_session(matchmake_session, '', 1).gid\napplication_data = (b'\\x00\\x00 \\x03\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x18' +\n struct.pack('<I', pid) + b'\\x00\\x00\\x00')\npresence = friends.NintendoPresenceV2()\npresence.flags = 494\npresence.is_online = True\npresence.game_key.title_id = MK8.TITLE_ID_EUR\npresence.game_key.title_version = MK8.LATEST_VERSION\npresence.message = \"I'm a Python client\"\npresence.unk2 = 2\npresence.unk3 = 2\npresence.game_server_id = MK8.GAME_SERVER_ID\npresence.unk4 = 3\npresence.pid = pid\npresence.gathering_id = session_id\npresence.application_data = application_data\nfriends_client.update_presence(presence)\ninput('Press enter to disconnect and exit\\n')\npresence = friends.NintendoPresenceV2()\nfriends_client.update_presence(presence)\ngame_backend.close()\nfriends_backend.close()\n", "step-5": "\nfrom nintendo.nex import backend, authentication, friends, matchmaking, common\nfrom nintendo.account import AccountAPI\nfrom nintendo.games import MK8, Friends\nimport struct\n\nimport logging\nlogging.basicConfig(level=logging.INFO)\n\n#Device id can be retrieved with a call to MCP_GetDeviceId on the Wii U\n#Serial number can be found on the back of the Wii U\nDEVICE_ID = 12345678\nSERIAL_NUMBER = \"...\"\nSYSTEM_VERSION = 0x220\nREGION = 4 #EUR\nCOUNTRY = \"NL\"\n\nUSERNAME = \"...\" #Nintendo network id\nPASSWORD = \"...\" #Nintendo network password\n\n\n#This function logs in on a game server\ndef backend_login(title, use_auth_info, use_login_data, settings=None):\n\tapi.set_title(title.TITLE_ID_EUR, title.LATEST_VERSION)\n\tnex_token = api.get_nex_token(title.GAME_SERVER_ID)\n\n\tauth_info = None\n\tlogin_data = None\n\tif use_auth_info:\n\t\tauth_info = authentication.AuthenticationInfo()\n\t\tauth_info.token = nex_token.token\n\t\tauth_info.server_version = title.SERVER_VERSION\n\tif use_login_data:\n\t\tlogin_data = authentication.NintendoLoginData()\n\t\tlogin_data.token = nex_token.token\n\t\n\tclient = backend.BackEndClient(settings)\n\tclietn.configure(title.ACCESS_KEY, title.NEX_VERSION)\n\tclient.connect(nex_token.host, nex_token.port)\n\tclient.login(\n\t\tnex_token.username, nex_token.password, auth_info, login_data\n\t)\n\treturn client\n\n\napi = AccountAPI()\napi.set_device(DEVICE_ID, SERIAL_NUMBER, SYSTEM_VERSION, REGION, COUNTRY)\napi.login(USERNAME, PASSWORD)\n\n#Connect to both the Mario Kart 8 server and the Wii U friends server\nfriends_backend = backend_login(\n\tFriends, False, True, \"friends.cfg\"\n)\ngame_backend = backend_login(MK8, True, False)\n\npid = game_backend.get_pid()\n\nfriends_client = friends.FriendsClient(friends_backend.secure_client)\nmatchmaker = matchmaking.MatchmakeExtensionClient(game_backend.secure_client)\n\n#Create a matchmake session\nmatchmake_session = matchmaking.MatchmakeSession()\nmatchmake_session.player_min = 2\nmatchmake_session.player_max = 12\nmatchmake_session.participation_policy = 98\nmatchmake_session.game_mode = 3\nmatchmake_session.attribs[4] = 0x403 #DLCs enabled\nmatchmake_session.matchmake_system = matchmaking.MatchmakeSystem.FRIENDS\n\nsession_id = matchmaker.create_matchmake_session(\n\tmatchmake_session, \"\", 1\n).gid\n\n#Tell friends we're playing MK8 and have created a room\napplication_data = b\"\\0\\0\\x20\\x03\\0\\0\\0\\0\\0\\0\\0\\0\\x18\" + struct.pack(\"<I\", pid) + b\"\\0\\0\\0\"\n\npresence = friends.NintendoPresenceV2()\npresence.flags = 0x1EE\npresence.is_online = True\npresence.game_key.title_id = MK8.TITLE_ID_EUR\npresence.game_key.title_version = MK8.LATEST_VERSION\npresence.message = \"I'm a Python client\"\npresence.unk2 = 2\npresence.unk3 = 2\npresence.game_server_id = MK8.GAME_SERVER_ID\npresence.unk4 = 3\npresence.pid = pid\npresence.gathering_id = session_id\npresence.application_data = application_data\n\nfriends_client.update_presence(presence)\n\ninput(\"Press enter to disconnect and exit\\n\")\n\n#Tell friends we've gone offline\npresence = friends.NintendoPresenceV2()\nfriends_client.update_presence(presence)\n\n#Disconnect from servers\ngame_backend.close()\nfriends_backend.close()\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
__author__ = 'zhaobin022' class Cmd(object): pass
normal
{ "blob_id": "0eca1693caffcd9fe32a8a54ca3a33687763e5ce", "index": 6809, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Cmd(object):\n pass\n", "step-3": "__author__ = 'zhaobin022'\n\n\nclass Cmd(object):\n pass\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
''' Each new term in the Fibonacci sequence is generated by adding the previous two terms. By starting with 1 and 2, the first 10 terms will be: 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, ... By considering the terms in the Fibonacci sequence whose values do not exceed four million, find the sum of the even-valued terms. ''' def fib(n): ''' Binet's formula for nth Fibonacci number http://mathworld.wolfram.com/BinetsFibonacciNumberFormula.html ((1+sqrt(5))**n-(1-sqrt(5))**n)/(2**n*sqrt(5)) ''' return int(0.4472135954999579392818347337462552470881236719223051448541* (pow(1.6180339887498948482045868343656381177203091798057628621354,n) - pow(-0.618033988749894848204586834365638117720309179805762862135,n))) total = 0 max = 4000000 for k in range(2, max): x = fib(k) if x > max: break if x % 2 == 0: total += x print total
normal
{ "blob_id": "02a1f84e72b412636d86b9bdb59856ae8c309255", "index": 9373, "step-1": "'''\nEach new term in the Fibonacci sequence is generated by adding the previous two terms. \nBy starting with 1 and 2, the first 10 terms will be:\n\n1, 2, 3, 5, 8, 13, 21, 34, 55, 89, ...\n\nBy considering the terms in the Fibonacci sequence whose values do not exceed four million, \nfind the sum of the even-valued terms.\n'''\ndef fib(n):\n '''\n Binet's formula for nth Fibonacci number\n http://mathworld.wolfram.com/BinetsFibonacciNumberFormula.html\n ((1+sqrt(5))**n-(1-sqrt(5))**n)/(2**n*sqrt(5)) \n '''\n return int(0.4472135954999579392818347337462552470881236719223051448541*\n (pow(1.6180339887498948482045868343656381177203091798057628621354,n) - \n pow(-0.618033988749894848204586834365638117720309179805762862135,n)))\n \ntotal = 0\nmax = 4000000\nfor k in range(2, max):\n x = fib(k)\n if x > max:\n break\n if x % 2 == 0:\n total += x\nprint total\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
# 217 is a prime number. In order 2017 to be a divisor of for sigma(a)=Product((p**(n+1)-1) // (p-1) for all divisors) it must be a power of a prime with (p**(a+1)-1) // (p-1) % 2017 == 0 # so we need only to check all such primes 'p' and count all k*p for k=1..N//p. We check p^n with n>=2 by brute force all primes. For n = 1 we generate all candidates by the formula p = k*2017-1 and check if it's a prime in a effective way. # we also take care of duplicates when p'=k*p is not a qualified number if k is a qualified number itself. from tools import * from math import ceil def sig(f): P = 1 for p,a in f: P *= (p**(a+1)-1) // (p-1) return P err = 1e-10 def calc(N, D): def check(n): return not sig(factor(n)) % D cachePrimes(int(N**.5)) def a2n(a): return (a + 1) // D from math import log def genPrimeSigDivs(minA, maxA): # generate all primes and primes**k with sig % 2017 == 0 if minA > maxA: return None maxE = int(ceil(log(maxA)/log(2))) for e in range(2, maxE): # for power >= 2 check all primes for p in primesN2M(ceil(minA**(1/e)-err), int(maxA**(1/e)+err)): if ((p**(e+1)-1) // (p-1)) % D == 0: yield p, e for n in range(ceil((minA + 1)/D-err), (maxA+1)//D+1): # for power==1 check all 2017*k-1 for primality p = n * D - 1 if isPrimeMillerRabin(p, 5): yield p, 1 def excl(p, e, N=N): a = p**e mem = set(range(0, N+1, a*p)) for p2, e2 in genPrimeSigDivs(a+1, N//a): ex2 = excl(p2, e2, N//a) for a2 in range(0, N+1, a*p2**e2): k2 = a2//a if check(k2) and not k2 in ex2 and not a2 in mem: mem.add(a2) return mem elapsed() resetTime() res = 0 for p, e in genPrimeSigDivs(D-1, N): # walk though all qualified primes**e a = p**e k = N // a res += a*k*(k + 1) // 2 # sum of all qualified numbers res -= sum(excl(p, e)) if a % 10000 == 1: elapsed((a, 1, N)) return res print(calc(10**11, 2017)) elapsed()
normal
{ "blob_id": "fabd3f233753f63d731a43c8b8b311e50d9deefe", "index": 6349, "step-1": "<mask token>\n\n\ndef calc(N, D):\n\n def check(n):\n return not sig(factor(n)) % D\n cachePrimes(int(N ** 0.5))\n\n def a2n(a):\n return (a + 1) // D\n from math import log\n\n def genPrimeSigDivs(minA, maxA):\n if minA > maxA:\n return None\n maxE = int(ceil(log(maxA) / log(2)))\n for e in range(2, maxE):\n for p in primesN2M(ceil(minA ** (1 / e) - err), int(maxA ** (1 /\n e) + err)):\n if (p ** (e + 1) - 1) // (p - 1) % D == 0:\n yield p, e\n for n in range(ceil((minA + 1) / D - err), (maxA + 1) // D + 1):\n p = n * D - 1\n if isPrimeMillerRabin(p, 5):\n yield p, 1\n\n def excl(p, e, N=N):\n a = p ** e\n mem = set(range(0, N + 1, a * p))\n for p2, e2 in genPrimeSigDivs(a + 1, N // a):\n ex2 = excl(p2, e2, N // a)\n for a2 in range(0, N + 1, a * p2 ** e2):\n k2 = a2 // a\n if check(k2) and not k2 in ex2 and not a2 in mem:\n mem.add(a2)\n return mem\n elapsed()\n resetTime()\n res = 0\n for p, e in genPrimeSigDivs(D - 1, N):\n a = p ** e\n k = N // a\n res += a * k * (k + 1) // 2\n res -= sum(excl(p, e))\n if a % 10000 == 1:\n elapsed((a, 1, N))\n return res\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef sig(f):\n P = 1\n for p, a in f:\n P *= (p ** (a + 1) - 1) // (p - 1)\n return P\n\n\n<mask token>\n\n\ndef calc(N, D):\n\n def check(n):\n return not sig(factor(n)) % D\n cachePrimes(int(N ** 0.5))\n\n def a2n(a):\n return (a + 1) // D\n from math import log\n\n def genPrimeSigDivs(minA, maxA):\n if minA > maxA:\n return None\n maxE = int(ceil(log(maxA) / log(2)))\n for e in range(2, maxE):\n for p in primesN2M(ceil(minA ** (1 / e) - err), int(maxA ** (1 /\n e) + err)):\n if (p ** (e + 1) - 1) // (p - 1) % D == 0:\n yield p, e\n for n in range(ceil((minA + 1) / D - err), (maxA + 1) // D + 1):\n p = n * D - 1\n if isPrimeMillerRabin(p, 5):\n yield p, 1\n\n def excl(p, e, N=N):\n a = p ** e\n mem = set(range(0, N + 1, a * p))\n for p2, e2 in genPrimeSigDivs(a + 1, N // a):\n ex2 = excl(p2, e2, N // a)\n for a2 in range(0, N + 1, a * p2 ** e2):\n k2 = a2 // a\n if check(k2) and not k2 in ex2 and not a2 in mem:\n mem.add(a2)\n return mem\n elapsed()\n resetTime()\n res = 0\n for p, e in genPrimeSigDivs(D - 1, N):\n a = p ** e\n k = N // a\n res += a * k * (k + 1) // 2\n res -= sum(excl(p, e))\n if a % 10000 == 1:\n elapsed((a, 1, N))\n return res\n\n\nprint(calc(10 ** 11, 2017))\nelapsed()\n", "step-3": "<mask token>\n\n\ndef sig(f):\n P = 1\n for p, a in f:\n P *= (p ** (a + 1) - 1) // (p - 1)\n return P\n\n\nerr = 1e-10\n\n\ndef calc(N, D):\n\n def check(n):\n return not sig(factor(n)) % D\n cachePrimes(int(N ** 0.5))\n\n def a2n(a):\n return (a + 1) // D\n from math import log\n\n def genPrimeSigDivs(minA, maxA):\n if minA > maxA:\n return None\n maxE = int(ceil(log(maxA) / log(2)))\n for e in range(2, maxE):\n for p in primesN2M(ceil(minA ** (1 / e) - err), int(maxA ** (1 /\n e) + err)):\n if (p ** (e + 1) - 1) // (p - 1) % D == 0:\n yield p, e\n for n in range(ceil((minA + 1) / D - err), (maxA + 1) // D + 1):\n p = n * D - 1\n if isPrimeMillerRabin(p, 5):\n yield p, 1\n\n def excl(p, e, N=N):\n a = p ** e\n mem = set(range(0, N + 1, a * p))\n for p2, e2 in genPrimeSigDivs(a + 1, N // a):\n ex2 = excl(p2, e2, N // a)\n for a2 in range(0, N + 1, a * p2 ** e2):\n k2 = a2 // a\n if check(k2) and not k2 in ex2 and not a2 in mem:\n mem.add(a2)\n return mem\n elapsed()\n resetTime()\n res = 0\n for p, e in genPrimeSigDivs(D - 1, N):\n a = p ** e\n k = N // a\n res += a * k * (k + 1) // 2\n res -= sum(excl(p, e))\n if a % 10000 == 1:\n elapsed((a, 1, N))\n return res\n\n\nprint(calc(10 ** 11, 2017))\nelapsed()\n", "step-4": "from tools import *\nfrom math import ceil\n\n\ndef sig(f):\n P = 1\n for p, a in f:\n P *= (p ** (a + 1) - 1) // (p - 1)\n return P\n\n\nerr = 1e-10\n\n\ndef calc(N, D):\n\n def check(n):\n return not sig(factor(n)) % D\n cachePrimes(int(N ** 0.5))\n\n def a2n(a):\n return (a + 1) // D\n from math import log\n\n def genPrimeSigDivs(minA, maxA):\n if minA > maxA:\n return None\n maxE = int(ceil(log(maxA) / log(2)))\n for e in range(2, maxE):\n for p in primesN2M(ceil(minA ** (1 / e) - err), int(maxA ** (1 /\n e) + err)):\n if (p ** (e + 1) - 1) // (p - 1) % D == 0:\n yield p, e\n for n in range(ceil((minA + 1) / D - err), (maxA + 1) // D + 1):\n p = n * D - 1\n if isPrimeMillerRabin(p, 5):\n yield p, 1\n\n def excl(p, e, N=N):\n a = p ** e\n mem = set(range(0, N + 1, a * p))\n for p2, e2 in genPrimeSigDivs(a + 1, N // a):\n ex2 = excl(p2, e2, N // a)\n for a2 in range(0, N + 1, a * p2 ** e2):\n k2 = a2 // a\n if check(k2) and not k2 in ex2 and not a2 in mem:\n mem.add(a2)\n return mem\n elapsed()\n resetTime()\n res = 0\n for p, e in genPrimeSigDivs(D - 1, N):\n a = p ** e\n k = N // a\n res += a * k * (k + 1) // 2\n res -= sum(excl(p, e))\n if a % 10000 == 1:\n elapsed((a, 1, N))\n return res\n\n\nprint(calc(10 ** 11, 2017))\nelapsed()\n", "step-5": "# 217 is a prime number. In order 2017 to be a divisor of for sigma(a)=Product((p**(n+1)-1) // (p-1) for all divisors) it must be a power of a prime with (p**(a+1)-1) // (p-1) % 2017 == 0\r\n# so we need only to check all such primes 'p' and count all k*p for k=1..N//p. We check p^n with n>=2 by brute force all primes. For n = 1 we generate all candidates by the formula p = k*2017-1 and check if it's a prime in a effective way.\r\n# we also take care of duplicates when p'=k*p is not a qualified number if k is a qualified number itself.\r\nfrom tools import *\r\nfrom math import ceil\r\n\r\ndef sig(f):\r\n P = 1\r\n for p,a in f:\r\n P *= (p**(a+1)-1) // (p-1)\r\n return P\r\n\r\nerr = 1e-10\r\n\r\ndef calc(N, D):\r\n def check(n):\r\n return not sig(factor(n)) % D\r\n cachePrimes(int(N**.5))\r\n def a2n(a):\r\n return (a + 1) // D\r\n from math import log\r\n def genPrimeSigDivs(minA, maxA): # generate all primes and primes**k with sig % 2017 == 0\r\n if minA > maxA:\r\n return None\r\n maxE = int(ceil(log(maxA)/log(2)))\r\n for e in range(2, maxE): # for power >= 2 check all primes\r\n for p in primesN2M(ceil(minA**(1/e)-err), int(maxA**(1/e)+err)):\r\n if ((p**(e+1)-1) // (p-1)) % D == 0:\r\n yield p, e\r\n\r\n for n in range(ceil((minA + 1)/D-err), (maxA+1)//D+1): # for power==1 check all 2017*k-1 for primality\r\n p = n * D - 1\r\n if isPrimeMillerRabin(p, 5): \r\n yield p, 1\r\n\r\n def excl(p, e, N=N):\r\n a = p**e\r\n mem = set(range(0, N+1, a*p))\r\n\r\n for p2, e2 in genPrimeSigDivs(a+1, N//a):\r\n ex2 = excl(p2, e2, N//a)\r\n for a2 in range(0, N+1, a*p2**e2):\r\n k2 = a2//a\r\n if check(k2) and not k2 in ex2 and not a2 in mem:\r\n mem.add(a2)\r\n return mem\r\n\r\n elapsed()\r\n resetTime()\r\n\r\n res = 0\r\n for p, e in genPrimeSigDivs(D-1, N): # walk though all qualified primes**e\r\n a = p**e\r\n k = N // a\r\n res += a*k*(k + 1) // 2 # sum of all qualified numbers\r\n res -= sum(excl(p, e))\r\n\r\n if a % 10000 == 1:\r\n elapsed((a, 1, N))\r\n return res\r\n\r\nprint(calc(10**11, 2017))\r\n\r\nelapsed()\r\n", "step-ids": [ 1, 3, 4, 5, 6 ] }
[ 1, 3, 4, 5, 6 ]
#!/bin/python3 import socket HOST = '127.0.0.1' PORT= 4444 s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((HOST,PORT))
normal
{ "blob_id": "14a39b9aa56777c8198794fe2f51c9a068500743", "index": 4075, "step-1": "<mask token>\n", "step-2": "<mask token>\ns.connect((HOST, PORT))\n", "step-3": "<mask token>\nHOST = '127.0.0.1'\nPORT = 4444\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.connect((HOST, PORT))\n", "step-4": "import socket\nHOST = '127.0.0.1'\nPORT = 4444\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.connect((HOST, PORT))\n", "step-5": "#!/bin/python3\nimport socket\nHOST = '127.0.0.1'\nPORT= 4444\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.connect((HOST,PORT))", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import os.path import numpy as np import matplotlib.pyplot as plt import util import collections def learn_distributions(file_lists_by_category): """ Estimate the parameters p_d, and q_d from the training set Input ----- file_lists_by_category: A two-element list. The first element is a list of spam files, and the second element is a list of ham files. Output ------ probabilities_by_category: A two-element tuple. The first element is a dict whose keys are words, and whose values are the smoothed estimates of p_d; the second element is a dict whose keys are words, and whose values are the smoothed estimates of q_d """ ### TODO: Write your code here #get word frequncies in each email category #key:word, value: number of occurences in this email loader spam_dict = util.get_word_freq(file_lists_by_category[0]) ham_dict = util.get_word_freq(file_lists_by_category[1]) #get total length of each email loader spam_length = sum(spam_dict.values()) ham_length = sum(ham_dict.values()) #get the length of the dictionary: D dict_D = util.Counter() for key in spam_dict: dict_D[key] += spam_dict[key] for key in ham_dict: dict_D[key] += ham_dict[key] D = len(dict_D) spam_distribution = {} ham_distribution = {} #get the distributions of two email loaders for i in dict_D: spam_distribution[i] = (spam_dict[i] + 1) / (D + spam_length) for i in dict_D: ham_distribution[i] = (ham_dict[i] + 1) / (D + ham_length) #create the required tuple probabilities_by_category = (spam_distribution, ham_distribution) return probabilities_by_category def classify_new_email(filename,probabilities_by_category,prior_by_category): """ Use Naive Bayes classification to classify the email in the given file. Inputs ------ filename: name of the file to be classified probabilities_by_category: output of function learn_distributions prior_by_category: A two-element list as [\pi, 1-\pi], where \pi is the parameter in the prior class distribution Output ------ classify_result: A two-element tuple. The first element is a string whose value is either 'spam' or 'ham' depending on the classification result, and the second element is a two-element list as [log p(y=1|x), log p(y=0|x)], representing the log posterior probabilities """ ### TODO: Write your code here spam_distribution = 0 ham_distribution = 0 word_frequency = util.get_word_freq([filename]) for w in word_frequency: if w in probabilities_by_category[0]: spam_distribution += word_frequency[w] * np.log(probabilities_by_category[0][w]) if w in probabilities_by_category[1]: ham_distribution += word_frequency[w] * np.log(probabilities_by_category[1][w]) spam_distribution += np.log(prior_by_category[0]) ham_distribution += np.log(prior_by_category[1]) predict = "" if(spam_distribution > ham_distribution): predict = "spam" else: predict = "ham" word_distribution = [spam_distribution, ham_distribution] classify_result = (predict, word_distribution) return classify_result if __name__ == '__main__': # folder for training and testing spam_folder = "data/spam" ham_folder = "data/ham" test_folder = "data/testing" # generate the file lists for training file_lists = [] for folder in (spam_folder, ham_folder): file_lists.append(util.get_files_in_folder(folder)) # Learn the distributions probabilities_by_category = learn_distributions(file_lists) # prior class distribution priors_by_category = [0.5, 0.5] # Store the classification results performance_measures = np.zeros([2,2]) # explanation of performance_measures: # columns and rows are indexed by 0 = 'spam' and 1 = 'ham' # rows correspond to true label, columns correspond to guessed label # to be more clear, performance_measures = [[p1 p2] # [p3 p4]] # p1 = Number of emails whose true label is 'spam' and classified as 'spam' # p2 = Number of emails whose true label is 'spam' and classified as 'ham' # p3 = Number of emails whose true label is 'ham' and classified as 'spam' # p4 = Number of emails whose true label is 'ham' and classified as 'ham' # Classify emails from testing set and measure the performance for filename in (util.get_files_in_folder(test_folder)): # Classify label,log_posterior = classify_new_email(filename, probabilities_by_category, priors_by_category) # Measure performance (the filename indicates the true label) base = os.path.basename(filename) true_index = ('ham' in base) guessed_index = (label == 'ham') performance_measures[int(true_index), int(guessed_index)] += 1 template="You correctly classified %d out of %d spam emails, and %d out of %d ham emails." # Correct counts are on the diagonal correct = np.diag(performance_measures) # totals are obtained by summing across guessed labels totals = np.sum(performance_measures, 1) print(template % (correct[0],totals[0],correct[1],totals[1])) ### TODO: Write your code here to modify the decision rule such that ### Type 1 and Type 2 errors can be traded off, plot the trade-off curve print("----type 1 and 2 here-----") offset = [-1E2, -1E1, -1E0, 1E0, 1E1] type1 = [] type2 = [] for offset_value in offset: performance_measures = np.zeros([2, 2]) for filename in (util.get_files_in_folder(test_folder)): # Classify label, log_posterior = classify_new_email(filename, probabilities_by_category, priors_by_category) #add offset if(log_posterior[0] + offset_value > log_posterior[1]): label = "spam" else: label = "ham" # Measure performance (the filename indicates the true label) base = os.path.basename(filename) true_index = ('ham' in base) guessed_index = (label == 'ham') performance_measures[int(true_index), int(guessed_index)] += 1 type1.append(performance_measures[0][1]) type2.append(performance_measures[1][0]) template = "You correctly classified %d out of %d spam emails, and %d out of %d ham emails." # Correct counts are on the diagonal correct = np.diag(performance_measures) # totals are obtained by summing across guessed labels totals = np.sum(performance_measures, 1) print(template % (correct[0], totals[0], correct[1], totals[1])) plt.title("Type1 vs Type2 Error") for i in range(0, len(type1)): plt.scatter(type1[i], type2[i]) plt.xlabel("type1") plt.ylabel("type2") plt.legend(offset, loc='best') plt.show()
normal
{ "blob_id": "7ed84706ace2cbf523021887df1e13d113f9ce4c", "index": 4172, "step-1": "<mask token>\n\n\ndef learn_distributions(file_lists_by_category):\n \"\"\"\n Estimate the parameters p_d, and q_d from the training set\n\n Input\n -----\n file_lists_by_category: A two-element list. The first element is a list of\n spam files, and the second element is a list of ham files.\n\n Output\n ------\n probabilities_by_category: A two-element tuple. The first element is a dict\n whose keys are words, and whose values are the smoothed estimates of p_d;\n the second element is a dict whose keys are words, and whose values are the\n smoothed estimates of q_d\n \"\"\"\n spam_dict = util.get_word_freq(file_lists_by_category[0])\n ham_dict = util.get_word_freq(file_lists_by_category[1])\n spam_length = sum(spam_dict.values())\n ham_length = sum(ham_dict.values())\n dict_D = util.Counter()\n for key in spam_dict:\n dict_D[key] += spam_dict[key]\n for key in ham_dict:\n dict_D[key] += ham_dict[key]\n D = len(dict_D)\n spam_distribution = {}\n ham_distribution = {}\n for i in dict_D:\n spam_distribution[i] = (spam_dict[i] + 1) / (D + spam_length)\n for i in dict_D:\n ham_distribution[i] = (ham_dict[i] + 1) / (D + ham_length)\n probabilities_by_category = spam_distribution, ham_distribution\n return probabilities_by_category\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef learn_distributions(file_lists_by_category):\n \"\"\"\n Estimate the parameters p_d, and q_d from the training set\n\n Input\n -----\n file_lists_by_category: A two-element list. The first element is a list of\n spam files, and the second element is a list of ham files.\n\n Output\n ------\n probabilities_by_category: A two-element tuple. The first element is a dict\n whose keys are words, and whose values are the smoothed estimates of p_d;\n the second element is a dict whose keys are words, and whose values are the\n smoothed estimates of q_d\n \"\"\"\n spam_dict = util.get_word_freq(file_lists_by_category[0])\n ham_dict = util.get_word_freq(file_lists_by_category[1])\n spam_length = sum(spam_dict.values())\n ham_length = sum(ham_dict.values())\n dict_D = util.Counter()\n for key in spam_dict:\n dict_D[key] += spam_dict[key]\n for key in ham_dict:\n dict_D[key] += ham_dict[key]\n D = len(dict_D)\n spam_distribution = {}\n ham_distribution = {}\n for i in dict_D:\n spam_distribution[i] = (spam_dict[i] + 1) / (D + spam_length)\n for i in dict_D:\n ham_distribution[i] = (ham_dict[i] + 1) / (D + ham_length)\n probabilities_by_category = spam_distribution, ham_distribution\n return probabilities_by_category\n\n\ndef classify_new_email(filename, probabilities_by_category, prior_by_category):\n \"\"\"\n Use Naive Bayes classification to classify the email in the given file.\n\n Inputs\n ------\n filename: name of the file to be classified\n probabilities_by_category: output of function learn_distributions\n prior_by_category: A two-element list as [\\\\pi, 1-\\\\pi], where \\\\pi is the\n parameter in the prior class distribution\n\n Output\n ------\n classify_result: A two-element tuple. The first element is a string whose value\n is either 'spam' or 'ham' depending on the classification result, and the\n second element is a two-element list as [log p(y=1|x), log p(y=0|x)],\n representing the log posterior probabilities\n \"\"\"\n spam_distribution = 0\n ham_distribution = 0\n word_frequency = util.get_word_freq([filename])\n for w in word_frequency:\n if w in probabilities_by_category[0]:\n spam_distribution += word_frequency[w] * np.log(\n probabilities_by_category[0][w])\n if w in probabilities_by_category[1]:\n ham_distribution += word_frequency[w] * np.log(\n probabilities_by_category[1][w])\n spam_distribution += np.log(prior_by_category[0])\n ham_distribution += np.log(prior_by_category[1])\n predict = ''\n if spam_distribution > ham_distribution:\n predict = 'spam'\n else:\n predict = 'ham'\n word_distribution = [spam_distribution, ham_distribution]\n classify_result = predict, word_distribution\n return classify_result\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef learn_distributions(file_lists_by_category):\n \"\"\"\n Estimate the parameters p_d, and q_d from the training set\n\n Input\n -----\n file_lists_by_category: A two-element list. The first element is a list of\n spam files, and the second element is a list of ham files.\n\n Output\n ------\n probabilities_by_category: A two-element tuple. The first element is a dict\n whose keys are words, and whose values are the smoothed estimates of p_d;\n the second element is a dict whose keys are words, and whose values are the\n smoothed estimates of q_d\n \"\"\"\n spam_dict = util.get_word_freq(file_lists_by_category[0])\n ham_dict = util.get_word_freq(file_lists_by_category[1])\n spam_length = sum(spam_dict.values())\n ham_length = sum(ham_dict.values())\n dict_D = util.Counter()\n for key in spam_dict:\n dict_D[key] += spam_dict[key]\n for key in ham_dict:\n dict_D[key] += ham_dict[key]\n D = len(dict_D)\n spam_distribution = {}\n ham_distribution = {}\n for i in dict_D:\n spam_distribution[i] = (spam_dict[i] + 1) / (D + spam_length)\n for i in dict_D:\n ham_distribution[i] = (ham_dict[i] + 1) / (D + ham_length)\n probabilities_by_category = spam_distribution, ham_distribution\n return probabilities_by_category\n\n\ndef classify_new_email(filename, probabilities_by_category, prior_by_category):\n \"\"\"\n Use Naive Bayes classification to classify the email in the given file.\n\n Inputs\n ------\n filename: name of the file to be classified\n probabilities_by_category: output of function learn_distributions\n prior_by_category: A two-element list as [\\\\pi, 1-\\\\pi], where \\\\pi is the\n parameter in the prior class distribution\n\n Output\n ------\n classify_result: A two-element tuple. The first element is a string whose value\n is either 'spam' or 'ham' depending on the classification result, and the\n second element is a two-element list as [log p(y=1|x), log p(y=0|x)],\n representing the log posterior probabilities\n \"\"\"\n spam_distribution = 0\n ham_distribution = 0\n word_frequency = util.get_word_freq([filename])\n for w in word_frequency:\n if w in probabilities_by_category[0]:\n spam_distribution += word_frequency[w] * np.log(\n probabilities_by_category[0][w])\n if w in probabilities_by_category[1]:\n ham_distribution += word_frequency[w] * np.log(\n probabilities_by_category[1][w])\n spam_distribution += np.log(prior_by_category[0])\n ham_distribution += np.log(prior_by_category[1])\n predict = ''\n if spam_distribution > ham_distribution:\n predict = 'spam'\n else:\n predict = 'ham'\n word_distribution = [spam_distribution, ham_distribution]\n classify_result = predict, word_distribution\n return classify_result\n\n\nif __name__ == '__main__':\n spam_folder = 'data/spam'\n ham_folder = 'data/ham'\n test_folder = 'data/testing'\n file_lists = []\n for folder in (spam_folder, ham_folder):\n file_lists.append(util.get_files_in_folder(folder))\n probabilities_by_category = learn_distributions(file_lists)\n priors_by_category = [0.5, 0.5]\n performance_measures = np.zeros([2, 2])\n for filename in util.get_files_in_folder(test_folder):\n label, log_posterior = classify_new_email(filename,\n probabilities_by_category, priors_by_category)\n base = os.path.basename(filename)\n true_index = 'ham' in base\n guessed_index = label == 'ham'\n performance_measures[int(true_index), int(guessed_index)] += 1\n template = (\n 'You correctly classified %d out of %d spam emails, and %d out of %d ham emails.'\n )\n correct = np.diag(performance_measures)\n totals = np.sum(performance_measures, 1)\n print(template % (correct[0], totals[0], correct[1], totals[1]))\n print('----type 1 and 2 here-----')\n offset = [-100.0, -10.0, -1.0, 1.0, 10.0]\n type1 = []\n type2 = []\n for offset_value in offset:\n performance_measures = np.zeros([2, 2])\n for filename in util.get_files_in_folder(test_folder):\n label, log_posterior = classify_new_email(filename,\n probabilities_by_category, priors_by_category)\n if log_posterior[0] + offset_value > log_posterior[1]:\n label = 'spam'\n else:\n label = 'ham'\n base = os.path.basename(filename)\n true_index = 'ham' in base\n guessed_index = label == 'ham'\n performance_measures[int(true_index), int(guessed_index)] += 1\n type1.append(performance_measures[0][1])\n type2.append(performance_measures[1][0])\n template = (\n 'You correctly classified %d out of %d spam emails, and %d out of %d ham emails.'\n )\n correct = np.diag(performance_measures)\n totals = np.sum(performance_measures, 1)\n print(template % (correct[0], totals[0], correct[1], totals[1]))\n plt.title('Type1 vs Type2 Error')\n for i in range(0, len(type1)):\n plt.scatter(type1[i], type2[i])\n plt.xlabel('type1')\n plt.ylabel('type2')\n plt.legend(offset, loc='best')\n plt.show()\n", "step-4": "import os.path\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport util\nimport collections\n\n\ndef learn_distributions(file_lists_by_category):\n \"\"\"\n Estimate the parameters p_d, and q_d from the training set\n\n Input\n -----\n file_lists_by_category: A two-element list. The first element is a list of\n spam files, and the second element is a list of ham files.\n\n Output\n ------\n probabilities_by_category: A two-element tuple. The first element is a dict\n whose keys are words, and whose values are the smoothed estimates of p_d;\n the second element is a dict whose keys are words, and whose values are the\n smoothed estimates of q_d\n \"\"\"\n spam_dict = util.get_word_freq(file_lists_by_category[0])\n ham_dict = util.get_word_freq(file_lists_by_category[1])\n spam_length = sum(spam_dict.values())\n ham_length = sum(ham_dict.values())\n dict_D = util.Counter()\n for key in spam_dict:\n dict_D[key] += spam_dict[key]\n for key in ham_dict:\n dict_D[key] += ham_dict[key]\n D = len(dict_D)\n spam_distribution = {}\n ham_distribution = {}\n for i in dict_D:\n spam_distribution[i] = (spam_dict[i] + 1) / (D + spam_length)\n for i in dict_D:\n ham_distribution[i] = (ham_dict[i] + 1) / (D + ham_length)\n probabilities_by_category = spam_distribution, ham_distribution\n return probabilities_by_category\n\n\ndef classify_new_email(filename, probabilities_by_category, prior_by_category):\n \"\"\"\n Use Naive Bayes classification to classify the email in the given file.\n\n Inputs\n ------\n filename: name of the file to be classified\n probabilities_by_category: output of function learn_distributions\n prior_by_category: A two-element list as [\\\\pi, 1-\\\\pi], where \\\\pi is the\n parameter in the prior class distribution\n\n Output\n ------\n classify_result: A two-element tuple. The first element is a string whose value\n is either 'spam' or 'ham' depending on the classification result, and the\n second element is a two-element list as [log p(y=1|x), log p(y=0|x)],\n representing the log posterior probabilities\n \"\"\"\n spam_distribution = 0\n ham_distribution = 0\n word_frequency = util.get_word_freq([filename])\n for w in word_frequency:\n if w in probabilities_by_category[0]:\n spam_distribution += word_frequency[w] * np.log(\n probabilities_by_category[0][w])\n if w in probabilities_by_category[1]:\n ham_distribution += word_frequency[w] * np.log(\n probabilities_by_category[1][w])\n spam_distribution += np.log(prior_by_category[0])\n ham_distribution += np.log(prior_by_category[1])\n predict = ''\n if spam_distribution > ham_distribution:\n predict = 'spam'\n else:\n predict = 'ham'\n word_distribution = [spam_distribution, ham_distribution]\n classify_result = predict, word_distribution\n return classify_result\n\n\nif __name__ == '__main__':\n spam_folder = 'data/spam'\n ham_folder = 'data/ham'\n test_folder = 'data/testing'\n file_lists = []\n for folder in (spam_folder, ham_folder):\n file_lists.append(util.get_files_in_folder(folder))\n probabilities_by_category = learn_distributions(file_lists)\n priors_by_category = [0.5, 0.5]\n performance_measures = np.zeros([2, 2])\n for filename in util.get_files_in_folder(test_folder):\n label, log_posterior = classify_new_email(filename,\n probabilities_by_category, priors_by_category)\n base = os.path.basename(filename)\n true_index = 'ham' in base\n guessed_index = label == 'ham'\n performance_measures[int(true_index), int(guessed_index)] += 1\n template = (\n 'You correctly classified %d out of %d spam emails, and %d out of %d ham emails.'\n )\n correct = np.diag(performance_measures)\n totals = np.sum(performance_measures, 1)\n print(template % (correct[0], totals[0], correct[1], totals[1]))\n print('----type 1 and 2 here-----')\n offset = [-100.0, -10.0, -1.0, 1.0, 10.0]\n type1 = []\n type2 = []\n for offset_value in offset:\n performance_measures = np.zeros([2, 2])\n for filename in util.get_files_in_folder(test_folder):\n label, log_posterior = classify_new_email(filename,\n probabilities_by_category, priors_by_category)\n if log_posterior[0] + offset_value > log_posterior[1]:\n label = 'spam'\n else:\n label = 'ham'\n base = os.path.basename(filename)\n true_index = 'ham' in base\n guessed_index = label == 'ham'\n performance_measures[int(true_index), int(guessed_index)] += 1\n type1.append(performance_measures[0][1])\n type2.append(performance_measures[1][0])\n template = (\n 'You correctly classified %d out of %d spam emails, and %d out of %d ham emails.'\n )\n correct = np.diag(performance_measures)\n totals = np.sum(performance_measures, 1)\n print(template % (correct[0], totals[0], correct[1], totals[1]))\n plt.title('Type1 vs Type2 Error')\n for i in range(0, len(type1)):\n plt.scatter(type1[i], type2[i])\n plt.xlabel('type1')\n plt.ylabel('type2')\n plt.legend(offset, loc='best')\n plt.show()\n", "step-5": "import os.path\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport util\nimport collections\n\ndef learn_distributions(file_lists_by_category):\n \"\"\"\n Estimate the parameters p_d, and q_d from the training set\n\n Input\n -----\n file_lists_by_category: A two-element list. The first element is a list of\n spam files, and the second element is a list of ham files.\n\n Output\n ------\n probabilities_by_category: A two-element tuple. The first element is a dict\n whose keys are words, and whose values are the smoothed estimates of p_d;\n the second element is a dict whose keys are words, and whose values are the\n smoothed estimates of q_d\n \"\"\"\n ### TODO: Write your code here\n\n #get word frequncies in each email category\n #key:word, value: number of occurences in this email loader\n spam_dict = util.get_word_freq(file_lists_by_category[0])\n ham_dict = util.get_word_freq(file_lists_by_category[1])\n\n #get total length of each email loader\n spam_length = sum(spam_dict.values())\n ham_length = sum(ham_dict.values())\n\n #get the length of the dictionary: D\n dict_D = util.Counter()\n for key in spam_dict:\n dict_D[key] += spam_dict[key]\n for key in ham_dict:\n dict_D[key] += ham_dict[key]\n D = len(dict_D)\n\n spam_distribution = {}\n ham_distribution = {}\n #get the distributions of two email loaders\n for i in dict_D:\n spam_distribution[i] = (spam_dict[i] + 1) / (D + spam_length)\n\n for i in dict_D:\n ham_distribution[i] = (ham_dict[i] + 1) / (D + ham_length)\n #create the required tuple\n probabilities_by_category = (spam_distribution, ham_distribution)\n return probabilities_by_category\n\n\ndef classify_new_email(filename,probabilities_by_category,prior_by_category):\n \"\"\"\n Use Naive Bayes classification to classify the email in the given file.\n\n Inputs\n ------\n filename: name of the file to be classified\n probabilities_by_category: output of function learn_distributions\n prior_by_category: A two-element list as [\\pi, 1-\\pi], where \\pi is the\n parameter in the prior class distribution\n\n Output\n ------\n classify_result: A two-element tuple. The first element is a string whose value\n is either 'spam' or 'ham' depending on the classification result, and the\n second element is a two-element list as [log p(y=1|x), log p(y=0|x)],\n representing the log posterior probabilities\n \"\"\"\n ### TODO: Write your code here\n spam_distribution = 0\n ham_distribution = 0\n word_frequency = util.get_word_freq([filename])\n for w in word_frequency:\n if w in probabilities_by_category[0]:\n spam_distribution += word_frequency[w] * np.log(probabilities_by_category[0][w])\n if w in probabilities_by_category[1]:\n ham_distribution += word_frequency[w] * np.log(probabilities_by_category[1][w])\n spam_distribution += np.log(prior_by_category[0])\n ham_distribution += np.log(prior_by_category[1])\n\n predict = \"\"\n if(spam_distribution > ham_distribution):\n predict = \"spam\"\n else:\n predict = \"ham\"\n\n word_distribution = [spam_distribution, ham_distribution]\n\n classify_result = (predict, word_distribution)\n\n return classify_result\n\nif __name__ == '__main__':\n\n # folder for training and testing\n spam_folder = \"data/spam\"\n ham_folder = \"data/ham\"\n test_folder = \"data/testing\"\n\n # generate the file lists for training\n file_lists = []\n for folder in (spam_folder, ham_folder):\n file_lists.append(util.get_files_in_folder(folder))\n\n\n # Learn the distributions\n probabilities_by_category = learn_distributions(file_lists)\n\n # prior class distribution\n priors_by_category = [0.5, 0.5]\n\n # Store the classification results\n performance_measures = np.zeros([2,2])\n # explanation of performance_measures:\n # columns and rows are indexed by 0 = 'spam' and 1 = 'ham'\n # rows correspond to true label, columns correspond to guessed label\n # to be more clear, performance_measures = [[p1 p2]\n # [p3 p4]]\n # p1 = Number of emails whose true label is 'spam' and classified as 'spam'\n # p2 = Number of emails whose true label is 'spam' and classified as 'ham'\n # p3 = Number of emails whose true label is 'ham' and classified as 'spam'\n # p4 = Number of emails whose true label is 'ham' and classified as 'ham'\n\n # Classify emails from testing set and measure the performance\n for filename in (util.get_files_in_folder(test_folder)):\n # Classify\n label,log_posterior = classify_new_email(filename,\n probabilities_by_category,\n priors_by_category)\n\n # Measure performance (the filename indicates the true label)\n base = os.path.basename(filename)\n true_index = ('ham' in base)\n guessed_index = (label == 'ham')\n performance_measures[int(true_index), int(guessed_index)] += 1\n\n template=\"You correctly classified %d out of %d spam emails, and %d out of %d ham emails.\"\n # Correct counts are on the diagonal\n correct = np.diag(performance_measures)\n # totals are obtained by summing across guessed labels\n totals = np.sum(performance_measures, 1)\n print(template % (correct[0],totals[0],correct[1],totals[1]))\n\n\n ### TODO: Write your code here to modify the decision rule such that\n ### Type 1 and Type 2 errors can be traded off, plot the trade-off curve\n print(\"----type 1 and 2 here-----\")\n offset = [-1E2, -1E1, -1E0, 1E0, 1E1]\n type1 = []\n type2 = []\n for offset_value in offset:\n performance_measures = np.zeros([2, 2])\n for filename in (util.get_files_in_folder(test_folder)):\n # Classify\n label, log_posterior = classify_new_email(filename,\n probabilities_by_category,\n priors_by_category)\n\n #add offset\n if(log_posterior[0] + offset_value > log_posterior[1]):\n label = \"spam\"\n else:\n label = \"ham\"\n\n # Measure performance (the filename indicates the true label)\n base = os.path.basename(filename)\n true_index = ('ham' in base)\n guessed_index = (label == 'ham')\n performance_measures[int(true_index), int(guessed_index)] += 1\n\n type1.append(performance_measures[0][1])\n type2.append(performance_measures[1][0])\n\n template = \"You correctly classified %d out of %d spam emails, and %d out of %d ham emails.\"\n # Correct counts are on the diagonal\n correct = np.diag(performance_measures)\n # totals are obtained by summing across guessed labels\n totals = np.sum(performance_measures, 1)\n print(template % (correct[0], totals[0], correct[1], totals[1]))\n plt.title(\"Type1 vs Type2 Error\")\n for i in range(0, len(type1)):\n plt.scatter(type1[i], type2[i])\n\n plt.xlabel(\"type1\")\n plt.ylabel(\"type2\")\n plt.legend(offset, loc='best')\n plt.show()\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
# Generated by Django 2.0.3 on 2018-04-30 16:25 from django.db import migrations, models import django.utils.timezone class Migration(migrations.Migration): dependencies = [ ('threads', '0007_auto_20180430_1617'), ] operations = [ migrations.AlterField( model_name='thread', name='last_activity', field=models.DateTimeField(default=django.utils.timezone.now), ), ]
normal
{ "blob_id": "6cd250b3bffd87657ec7cc28eaffe817c6d9f73f", "index": 9794, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('threads', '0007_auto_20180430_1617')]\n operations = [migrations.AlterField(model_name='thread', name=\n 'last_activity', field=models.DateTimeField(default=django.utils.\n timezone.now))]\n", "step-4": "from django.db import migrations, models\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n dependencies = [('threads', '0007_auto_20180430_1617')]\n operations = [migrations.AlterField(model_name='thread', name=\n 'last_activity', field=models.DateTimeField(default=django.utils.\n timezone.now))]\n", "step-5": "# Generated by Django 2.0.3 on 2018-04-30 16:25\n\nfrom django.db import migrations, models\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('threads', '0007_auto_20180430_1617'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='thread',\n name='last_activity',\n field=models.DateTimeField(default=django.utils.timezone.now),\n ),\n ]\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
from django.http import HttpResponse from django.shortcuts import render from dashboard.models import Farmer import random, json, requests from django.core import serializers from collections import namedtuple def sendSMS(message): if message: assert isinstance(message, (str, unicode)) payload = {"From":"18126524546", "To":"+18126524546", "Body":message} else: payload = {"From":"18126524546", "To":"+18126524546", "Body":"we curlin"} r = requests.post("https://api.twilio.com/2010-04-01/Accounts/AC2538516e85acddb63169a9c56019a68a/Messages", auth=('AC2538516e85acddb63169a9c56019a68a','170945ab2aed0d2ec992a22e9fa41ca4'), data=payload) print r.text def jsonify(content): response = serializers.serialize('json', content); return HttpResponse(response, mimetype='application/json') def getLatestSMS(request): return jsonify(str(getTwilioSMSData())) def getTwilioSMSData(request): r = requests.get('https://api.twilio.com/2010-04-01/Accounts/AC2538516e85acddb63169a9c56019a68a/Messages.json', auth=('AC2538516e85acddb63169a9c56019a68a', '170945ab2aed0d2ec992a22e9fa41ca4')) all_messages = [] for item in r.json()['messages']: all_messages.append(SMS( phone = item['from'], body = item['body'], direction = item['direction'], #inbound or outbound date_created = item['date_created'], sid = item['sid'] )) last_SMS_id = all_messages[0].sid farmer = matchPhoneToFarmer(all_messages[0].phone[1:]) return str({'farmer':farmer, 'text':all_messages[0]}) def matchPhoneToFarmer(phone): print "PHONE: ", phone for farmer in sample_farmers: if phone==farmer.phone: return farmer return None def generateSampleData(): sample_names = ("Bob", "Dan", "Chloe", "Lyra", "Dev", "Eric") sample_land_area = (100, 140, 120, 30, 10, 1500) sample_phone = ("17792329691","17792329696","17792329691","17792329691","17792329691","17792329691") sample_lat = (11.1 + random.random()/15, 11.1 + random.random()/15, 11.1 + random.random()/15, 11.1 + random.random()/15, 11.1 + random.random()/15, 11.1 + random.random()/15) sample_long = (79.646 + random.random()/15, 79.646 + random.random()/15, 79.646 + random.random()/15, 79.646 + random.random()/15, 79.646 + random.random()/15, 79.646) sample_diseased = (True, False, False, False, True, True) for i in range(6): name=sample_names[i] land_area=sample_land_area[i], phone = sample_phone[i] latitude=sample_lat[i], longitude=sample_long[i], is_diseased=sample_diseased[i] sample_farmers.append(Farmer(name=name, land_area=land_area, phone=phone, latitude=latitude, longitude=longitude, is_diseased=is_diseased)) def returnFarmerDataJSON(request): data = [] for item in sample_farmers: data.append(str(item)) response = serializers.serialize('json', sample_farmers); return HttpResponse(response, mimetype='application/json') def dashboard(request): # sendSMS("yo what's up") print getTwilioSMSData(request) context = {} return render(request, 'dashboard/dashboard.html', context) sample_farmers = [] generateSampleData() SMS = namedtuple('SMS', ['phone', 'body', 'direction', 'date_created', 'sid']) last_SMS_id = ""
normal
{ "blob_id": "0d07ad60c58828ce19153063fb5d7d80135cb9ec", "index": 9737, "step-1": "from django.http import HttpResponse\nfrom django.shortcuts import render\nfrom dashboard.models import Farmer\nimport random, json, requests\nfrom django.core import serializers\n\nfrom collections import namedtuple\n\ndef sendSMS(message):\n if message:\n assert isinstance(message, (str, unicode))\n payload = {\"From\":\"18126524546\", \"To\":\"+18126524546\", \"Body\":message}\n else:\n payload = {\"From\":\"18126524546\", \"To\":\"+18126524546\", \"Body\":\"we curlin\"}\n r = requests.post(\"https://api.twilio.com/2010-04-01/Accounts/AC2538516e85acddb63169a9c56019a68a/Messages\", \n auth=('AC2538516e85acddb63169a9c56019a68a','170945ab2aed0d2ec992a22e9fa41ca4'), \n data=payload)\n print r.text\n\ndef jsonify(content):\n response = serializers.serialize('json', content);\n return HttpResponse(response, mimetype='application/json')\n\ndef getLatestSMS(request):\n return jsonify(str(getTwilioSMSData()))\n\ndef getTwilioSMSData(request):\n r = requests.get('https://api.twilio.com/2010-04-01/Accounts/AC2538516e85acddb63169a9c56019a68a/Messages.json', auth=('AC2538516e85acddb63169a9c56019a68a', '170945ab2aed0d2ec992a22e9fa41ca4'))\n all_messages = []\n for item in r.json()['messages']:\n all_messages.append(SMS(\n phone = item['from'],\n body = item['body'],\n direction = item['direction'], #inbound or outbound\n date_created = item['date_created'],\n sid = item['sid']\n ))\n last_SMS_id = all_messages[0].sid\n farmer = matchPhoneToFarmer(all_messages[0].phone[1:])\n return str({'farmer':farmer, 'text':all_messages[0]})\n\n\ndef matchPhoneToFarmer(phone):\n print \"PHONE: \", phone\n for farmer in sample_farmers:\n if phone==farmer.phone:\n return farmer\n return None\n\n\ndef generateSampleData():\n sample_names = (\"Bob\", \"Dan\", \"Chloe\", \"Lyra\", \"Dev\", \"Eric\")\n sample_land_area = (100, 140, 120, 30, 10, 1500)\n sample_phone = (\"17792329691\",\"17792329696\",\"17792329691\",\"17792329691\",\"17792329691\",\"17792329691\")\n sample_lat = (11.1 + random.random()/15,\n 11.1 + random.random()/15,\n 11.1 + random.random()/15,\n 11.1 + random.random()/15,\n 11.1 + random.random()/15,\n 11.1 + random.random()/15)\n sample_long = (79.646 + random.random()/15,\n 79.646 + random.random()/15,\n 79.646 + random.random()/15,\n 79.646 + random.random()/15,\n 79.646 + random.random()/15,\n 79.646)\n sample_diseased = (True, False, False, False, True, True)\n\n for i in range(6):\n name=sample_names[i]\n land_area=sample_land_area[i],\n phone = sample_phone[i]\n latitude=sample_lat[i], \n longitude=sample_long[i],\n is_diseased=sample_diseased[i]\n sample_farmers.append(Farmer(name=name, land_area=land_area, phone=phone,\n latitude=latitude, longitude=longitude,\n is_diseased=is_diseased))\n\ndef returnFarmerDataJSON(request):\n data = []\n for item in sample_farmers:\n data.append(str(item))\n response = serializers.serialize('json', sample_farmers);\n return HttpResponse(response, mimetype='application/json')\n\ndef dashboard(request):\n # sendSMS(\"yo what's up\")\n print getTwilioSMSData(request)\n context = {}\n return render(request, 'dashboard/dashboard.html', context)\n\nsample_farmers = []\ngenerateSampleData()\nSMS = namedtuple('SMS', ['phone', 'body', 'direction', 'date_created', 'sid'])\nlast_SMS_id = \"\"\n\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
import random import colorama from termcolor import colored from reusables.string_manipulation import int_to_words from app.common_functions import comma_separated, add_dicts_together, remove_little_words, odds from app.load_data import items, buildings, wild_mobs, names, adjectives colorama.init() def find_unique_names(quantity, name_list, taken_names): free_names = [x for x in name_list if x not in taken_names] random.shuffle(free_names) return free_names[:quantity] def dropper(rarity): results = {'super rare': 100, 'rare': 50, 'uncommon': 25, 'common': 5, 'super common': 2} quantity = 0 countdown = random.randint(0, 10) while countdown > 0: if random.randint(0, results[rarity]) == 1: quantity += 1 countdown -= 1 return quantity def drop_building(dictionary, p, limit=None): limit = limit or len(adjectives) drops_i = [] for k, v in dictionary.items(): quantity = dropper(v['rarity']) quantity = quantity if quantity < limit else limit limit -= quantity if quantity: if quantity > 1 and v['category'] != 'residence': n = random.randint(0, quantity) unique_names = find_unique_names(quantity - n, names, p.square.unique_building_names) p.square.unique_building_names += unique_names for i in range(0, quantity - n): drops_i.append(Building(name=f"{unique_names[i]}'s {remove_little_words(k).capitalize()}", p=p, **v)) unique_adjectives = find_unique_names(n, adjectives, p.square.unique_building_names) p.square.unique_building_names += unique_adjectives for i in range(0, n): drops_i.append(Building(name=f"the {unique_adjectives[i]} {remove_little_words(k).capitalize()}", p=p, **v)) elif quantity > 1 and v['category'] == 'residence': unique_house_names = find_unique_names(quantity, names, p.square.unique_house_names) p.square.unique_house_names += unique_house_names for i in range(0, quantity): drops_i.append(Building(name=f"{unique_house_names[i]}'s {remove_little_words(k)}", p=p, **v)) else: drops_i.append(Building(name=k, p=p, **v)) return drops_i def drop_mob(dictionary, p, limit=None, square=None): square = square or p.square limit = limit or len(names) - len(square.unique_mob_names) drops_i = [] for k, v in dictionary.items(): quantity = dropper(v['rarity']) quantity = quantity if quantity < limit else limit limit -= quantity if quantity: if quantity > 1: unique_names = find_unique_names(quantity, names, square.unique_mob_names) p.square.unique_mob_names += unique_names for i in range(0, len(unique_names)): drops_i.append(Mob(name=f"{k} named {unique_names[i]}", p=p, **v)) else: if k not in [n.name for n in p.square.mobs]: drops_i.append(Mob(name=k, p=p, **v)) else: name = find_unique_names(1, names, square.unique_mob_names)[0] drops_i.append(Mob(name=f"{k} named {name}", p=p, **v)) return drops_i def drop_item(dictionary): """ Randomly generates objects based on rarity """ drops_i = [] for k, v in dictionary.items(): quantity = dropper(v['rarity']) if quantity: drops_i.append(Item(name=k, quantity=quantity, **v)) return drops_i class MapSquare: def __init__(self, name="", square_type=None): square_types = ["forest", "mountains", "desert", "city", "swamp", "ocean"] self.square_type = square_type or square_types[random.randint(0, len(square_types) - 1)] self.name = name self.unique_mob_names = [] self.unique_building_names = [] self.unique_house_names = [] mobs = [] items = [] buildings = [] def generate_items(self): self.items = drop_item(add_dicts_together(items["master"], items[self.square_type])) def generate_buildings(self, p): self.buildings = drop_building(add_dicts_together(buildings["master"], buildings[self.square_type]), p) def generate_mobs(self, p): self.mobs = drop_mob(add_dicts_together(wild_mobs["master"], wild_mobs[self.square_type]), p) def clean_up_map(self): """ Remove items with quantity of zero from the map inventory""" self.items = [i for i in self.items if i.quantity != 0] @staticmethod def map_picture(the_map, p): """With the player's location in the center, draw a 5 x 5 map with map square type and coordinates in each square""" xy = (p.location[0] - 2, p.location[1] + 2) map_coords = [] for y in range(0, 5): row = [(xy[0] + x, xy[1] - y) for x in range(0, 5)] map_coords.append(row) pretty_map = [] for r in map_coords: row = [] for coordinates in r: if coordinates in the_map.keys(): if p.quest and p.job and p.quest[1] == coordinates and p.job.location == coordinates: star = '*$ ' elif p.quest and p.quest[1] == coordinates: star = ' * ' elif p.job and p.job.location == coordinates: star = ' $ ' else: star = ' ' row.append("|{!s:9}{}|".format(the_map[coordinates].square_type, star)) else: row.append("|{!s:12}|".format(' ')) pretty_map.append(row) for row in pretty_map: print(''.join(row)) class Player: def __init__(self, name, location): self.name = name self.location = location self.square = None self.money = 0 self.quest = None self.job = None self.phase = "day" self.equipped_weapon = None self.major_armor = None self.minor_armor = None self.building_local = None self.inventory = [] self.skills = {} self.health = 100 self.greeting_count = 0 self.body_count = 0 self.assassination_count = 0 self.hit_list = [] self.death_count = 0 # TODO increase insurance cost every death? self.food_count = 0 self.run_away_count = 0 self.speed_bonus = False self.game_won = False def game_over(self): if self.game_won is False: self.game_won = True print(colored("You have won the game!", "green")) print("You may continue playing to earn more achievements if you wish.") if self.run_away_count == 0: print("Congratulations, you have achieved the True Bravery achievement, having won the game without ever running away from a fight.") if self.run_away_count > 100: print("Congratulations, you have achieved the True Cowardice achievement, having won the game after running away from over 100 battles.") def clean_up_inventory(self): """ Remove items with quantity of zero from the map inventory""" self.inventory = [i for i in self.inventory if i.quantity != 0] def phase_change(self, the_map): self.phase = 'day' if self.phase == 'night' else 'night' for k, square in the_map.items(): if self.location != k: square.generate_items() for b in square.buildings: if b.ware_list: b.wares = drop_item(b.ware_list) while not b.wares: b.wares = drop_item(b.ware_list) if b.name not in ('a castle', 'a volcanic base'): jobs = {} buiding_dict = add_dicts_together(buildings['master'], buildings[square.square_type]) for key, v in buiding_dict.items(): if key == b.name and v.get('jobs'): for name, values in v['jobs'].items(): jobs[name] = values b.jobs = b.drop_job(jobs) if self.phase == 'day': self.speed_bonus = False for mob in square.mobs: mob.health = 100 mob.irritation_level = 0 mob.quest = None if self.quest is None else mob.quest if not square.mobs: square.mobs = drop_mob(add_dicts_together(wild_mobs["master"], wild_mobs[self.square.square_type]), self, limit=len(names), square=square) def formatted_inventory(self): formatted = [] for item in self.inventory: if item.quantity > 1: formatted.append(f"{int_to_words(item.quantity)} {item.plural}") else: formatted.append(item.name) if formatted: return comma_separated(formatted) else: return "nothing" def pretty_inventory(self): w = self.equipped_weapon major = self.major_armor.defense if self.major_armor else 0 minor = self.minor_armor.defense if self.minor_armor else 0 armor_defense = (major + minor) * 5 armors = [self.major_armor.name if self.major_armor else None, self.minor_armor.name if self.minor_armor else None] inventory = {'inventory_items': f"You have {self.formatted_inventory()} in your inventory.", 'weapon': f"You are wielding {int_to_words(w.quantity)} " f"{remove_little_words(w.name) if w.quantity == 1 else w.plural}." if w else None, 'armor': f"You are wearing {' and '.join(x for x in armors if x)}, " f"giving you a {armor_defense}% reduction in incoming damage." if self.minor_armor or self.major_armor else None} return '\n'.join(v for v in inventory.values() if v) def status(self): skills = [f"{k}: {v}%." for k, v in self.skills.items()] job = f"You have a job as a {self.job.name}." if self.job else None quest = "You have a quest." if self.quest else None if job and quest: job_string = "\n".join([job, quest]) elif job or quest: job_string = job if job else quest else: job_string = "You do not have a job, and you are not contributing to society." status_string = { 'health': f'Currently, you have {self.health} health.', 'location': f'You are located on map coordinates {self.location}, ' f'which is {self.square.square_type}.', 'building_local': f'You are inside {self.building_local.name}.' if self.building_local else None, 'skills': '\n'.join(skills) if skills else "You don't have any skills.", 'money': f"You have ${self.money} in your wallet.", 'job': job_string} return '\n'.join(v for v in status_string.values() if v) def statistics(self): print(f"You have killed {self.body_count} mobs.") print(f"You have ran away from {self.run_away_count} battles.") print(f"You have eaten {self.food_count} items.") print(f"You have performed {self.assassination_count} assassinations.") print(f"You have talked to mobs {self.greeting_count} times.") def view_hit_list(self): if self.hit_list: print(f"If you ever run across these shady characters, be sure to take their names off your list: {comma_separated(self.hit_list)}") else: print("Looks like you don't know of anyone who needs to be dead.") def increase_skill(self, skill, increase): try: self.skills[skill] += increase except KeyError: self.skills[skill] = increase print(f"You have increased your mastery of {skill} by {increase}% for a total of {self.skills[skill]}%.") class Item: def __init__(self, name, quantity, plural, category=None, perishable=None, flammable=None, rarity=None, price=None, weapon_rating=None, defense=None): self.name = name self.quantity = quantity self.plural = plural self.category = category or None self.perishable = perishable or None self.flammable = flammable or None self.rarity = rarity or None self.price = price or None self.weapon_rating = weapon_rating or None self.defense = defense or None def copy(self): return Item(name=self.name, quantity=self.quantity, plural=self.plural, category=self.category, perishable=self.perishable, flammable=self.flammable, rarity=self.rarity, weapon_rating=self.weapon_rating, defense=self.defense) class Building(object): def __init__(self, name, p, plural, category=None, rarity=None, ware_list=None, mobs=None, jobs=None): self.name = name self.p = p self.quantity = 1 self.plural = plural self.category = category or None self.rarity = rarity or None self.ware_list = ware_list self.wares = self.drop_wares() self.mobs = drop_mob(mobs, p) if mobs else None self.jobs = self.drop_job(jobs) if jobs else None if self.name in ('a castle', 'a volcanic base'): self.boss_mobs_and_jobs() def drop_wares(self): if self.ware_list: wares = drop_item(self.ware_list) while not wares: wares = drop_item(self.ware_list) return wares else: return [] def drop_job(self, jobs): drops_i = [] for k, v in jobs.items(): if odds(2): drops_i.append(Job(name=k, location=self.p.location, **v)) return drops_i def boss_mobs_and_jobs(self): boss_major_armors = [Item('a coat of impervious dragon scales', plural='coats of dragon scales', quantity=1, category='major armor', rarity='super rare', defense=5), Item('an enchanted leather duster', plural='enchanted leather dusters', quantity=1, category='major armor', defense=5, rarity='super rare'), Item('a coat of actual live grizzly bears', plural='coats of actual live grizzly bears', quantity=1, category='major armor', defense=5, rarity='super rare')] boss_minor_armors = [Item('wings of an angel', plural='wings of angels', quantity=1, rarity='super rare', category='minor armor', defense=5), Item('an OSHA approved hard hat', plural='OSHA approved hard hats', quantity=1, rarity='super rare', category='minor armor', defense=5), Item('a pair boots that were made for walkin', plural='pairs of boots that were made for walkin', quantity=1, rarity='super rare', category='minor armor', defense=5)] boss_weapons = [Item('an apache helicopter', plural='apache helicopters', rarity='super rare', weapon_rating=6, quantity=1), Item('a trebuchet', plural='trebuchets', weapon_rating=6, quantity=1, rarity='super rare'), Item('an army of attacking wizards', plural='armies of attacking wizards', weapon_rating=6, quantity=1, rarity='super rare')] boss_names = ["the Terrifying Dragon of Soul Slaying", "the Great Salamander of Darkness", "the Squirrel of Destiny", ] random.shuffle(boss_names) random.shuffle(boss_weapons) random.shuffle(boss_major_armors) random.shuffle(boss_minor_armors) boss = Mob(boss_names[0], self.p, plural=boss_names[0], rarity='super rare') boss.health = 500 boss.equipped_weapon = boss_weapons[0] boss.major_armor = boss_major_armors[0] boss.minor_armor = boss_minor_armors[0] boss.irritation_level = 10 self.mobs = [boss] if self.name == 'a castle': self.jobs = [Job('king of the realm', location=self.p.location, salary=1100)] if self.name == 'a volcanic base': self.jobs = [Job('evil overlord', location=self.p.location, salary=1100)] class Job: def __init__(self, name, location, skills_needed=None, salary=0, skills_learned=None, inventory_needed=None): self.name = name self.location = location self.skills_needed = skills_needed or None self.salary = salary or 0 self.skills_learned = skills_learned or None self.inventory_needed = inventory_needed or None self.application_attempts = 0 class Mob: def __init__(self, name, p, plural, rarity, inventory=None): self.name = name self.p = p self.plural = plural self.quantity = 1 self.rarity = rarity self.skills = self.skills() self.quest = None self.inventory = inventory or drop_item(add_dicts_together(items['master'], items[p.square.square_type])) self.health = 100 self.equipped_weapon = self.equip() major = [x for x in self.inventory if x.category == 'major armor'] minor = [x for x in self.inventory if x.category == 'minor armor'] self.major_armor = major[0] if major else None self.minor_armor = minor[0] if minor else None self.irritation_level = 0 def equip(self): nice_weapons = [] for i in self.inventory: try: if i.weapon_rating: nice_weapons.append(i) except AttributeError: pass nice_weapons.sort(key=lambda x: x.weapon_rating, reverse=True) if nice_weapons: self.inventory.remove(nice_weapons[0]) return nice_weapons[0] else: return None @staticmethod def skills(): """ Pick the skills for a mob, these determine what a player can get from completing a quest """ all_skills = ["strength", "patience", "cleanliness", "leadership", "communication", "science", "math", "engineering", "intelligence", "driving"] random.shuffle(all_skills) return all_skills[0:2] def generate_quest(self): """ inventory based bring me x of an object to learn a skill """ if odds(3): quest_items = add_dicts_together(items["master"], items[self.p.square.square_type]) quest_item = random.choice(list(quest_items.keys())) i = Item(quest_item, 0, **quest_items[quest_item]) self.inventory.append(i) quantity = {'super rare': '1', 'rare': '2', 'uncommon': '3', 'common': '6', 'super common': '15'} q = quantity[i.rarity] self.quest = i, int(q), f"{self.p.name}, if you bring " \ f"me {q} {i.plural if int(q) > 1 else remove_little_words(i.name)}, " \ f"I will teach you a valuable skill." return elif odds(5): mobs = [] for biome, building in buildings.items(): for b, attributes in building.items(): if attributes.get('mobs'): for k in attributes['mobs'].keys(): mobs.append(k) for biome, mob in wild_mobs.items(): for k in mob.keys(): mobs.append(k) target = f"{mobs[random.randint(0, len(mobs)-1)]} named {names[random.randint(0, len(names)-1)]}" print(f"Well, we'll keep this off the record, but I can arrange for some money to find its way " f"into your account if you make {colored(target, 'yellow')} disappear, if you know what I mean...") self.p.hit_list.append(target) return False else: return None
normal
{ "blob_id": "535c0975c688a19963e4c53f6029626d286b41d6", "index": 5630, "step-1": "<mask token>\n\n\nclass Player:\n\n def __init__(self, name, location):\n self.name = name\n self.location = location\n self.square = None\n self.money = 0\n self.quest = None\n self.job = None\n self.phase = 'day'\n self.equipped_weapon = None\n self.major_armor = None\n self.minor_armor = None\n self.building_local = None\n self.inventory = []\n self.skills = {}\n self.health = 100\n self.greeting_count = 0\n self.body_count = 0\n self.assassination_count = 0\n self.hit_list = []\n self.death_count = 0\n self.food_count = 0\n self.run_away_count = 0\n self.speed_bonus = False\n self.game_won = False\n\n def game_over(self):\n if self.game_won is False:\n self.game_won = True\n print(colored('You have won the game!', 'green'))\n print(\n 'You may continue playing to earn more achievements if you wish.'\n )\n if self.run_away_count == 0:\n print(\n 'Congratulations, you have achieved the True Bravery achievement, having won the game without ever running away from a fight.'\n )\n if self.run_away_count > 100:\n print(\n 'Congratulations, you have achieved the True Cowardice achievement, having won the game after running away from over 100 battles.'\n )\n\n def clean_up_inventory(self):\n \"\"\" Remove items with quantity of zero from the map inventory\"\"\"\n self.inventory = [i for i in self.inventory if i.quantity != 0]\n\n def phase_change(self, the_map):\n self.phase = 'day' if self.phase == 'night' else 'night'\n for k, square in the_map.items():\n if self.location != k:\n square.generate_items()\n for b in square.buildings:\n if b.ware_list:\n b.wares = drop_item(b.ware_list)\n while not b.wares:\n b.wares = drop_item(b.ware_list)\n if b.name not in ('a castle', 'a volcanic base'):\n jobs = {}\n buiding_dict = add_dicts_together(buildings[\n 'master'], buildings[square.square_type])\n for key, v in buiding_dict.items():\n if key == b.name and v.get('jobs'):\n for name, values in v['jobs'].items():\n jobs[name] = values\n b.jobs = b.drop_job(jobs)\n if self.phase == 'day':\n self.speed_bonus = False\n for mob in square.mobs:\n mob.health = 100\n mob.irritation_level = 0\n mob.quest = None if self.quest is None else mob.quest\n if not square.mobs:\n square.mobs = drop_mob(add_dicts_together(wild_mobs\n ['master'], wild_mobs[self.square.square_type]),\n self, limit=len(names), square=square)\n\n def formatted_inventory(self):\n formatted = []\n for item in self.inventory:\n if item.quantity > 1:\n formatted.append(f'{int_to_words(item.quantity)} {item.plural}'\n )\n else:\n formatted.append(item.name)\n if formatted:\n return comma_separated(formatted)\n else:\n return 'nothing'\n\n def pretty_inventory(self):\n w = self.equipped_weapon\n major = self.major_armor.defense if self.major_armor else 0\n minor = self.minor_armor.defense if self.minor_armor else 0\n armor_defense = (major + minor) * 5\n armors = [self.major_armor.name if self.major_armor else None, self\n .minor_armor.name if self.minor_armor else None]\n inventory = {'inventory_items':\n f'You have {self.formatted_inventory()} in your inventory.',\n 'weapon': \n f'You are wielding {int_to_words(w.quantity)} {remove_little_words(w.name) if w.quantity == 1 else w.plural}.'\n if w else None, 'armor': \n f\"You are wearing {' and '.join(x for x in armors if x)}, giving you a {armor_defense}% reduction in incoming damage.\"\n if self.minor_armor or self.major_armor else None}\n return '\\n'.join(v for v in inventory.values() if v)\n <mask token>\n <mask token>\n <mask token>\n\n def increase_skill(self, skill, increase):\n try:\n self.skills[skill] += increase\n except KeyError:\n self.skills[skill] = increase\n print(\n f'You have increased your mastery of {skill} by {increase}% for a total of {self.skills[skill]}%.'\n )\n\n\nclass Item:\n\n def __init__(self, name, quantity, plural, category=None, perishable=\n None, flammable=None, rarity=None, price=None, weapon_rating=None,\n defense=None):\n self.name = name\n self.quantity = quantity\n self.plural = plural\n self.category = category or None\n self.perishable = perishable or None\n self.flammable = flammable or None\n self.rarity = rarity or None\n self.price = price or None\n self.weapon_rating = weapon_rating or None\n self.defense = defense or None\n\n def copy(self):\n return Item(name=self.name, quantity=self.quantity, plural=self.\n plural, category=self.category, perishable=self.perishable,\n flammable=self.flammable, rarity=self.rarity, weapon_rating=\n self.weapon_rating, defense=self.defense)\n\n\nclass Building(object):\n\n def __init__(self, name, p, plural, category=None, rarity=None,\n ware_list=None, mobs=None, jobs=None):\n self.name = name\n self.p = p\n self.quantity = 1\n self.plural = plural\n self.category = category or None\n self.rarity = rarity or None\n self.ware_list = ware_list\n self.wares = self.drop_wares()\n self.mobs = drop_mob(mobs, p) if mobs else None\n self.jobs = self.drop_job(jobs) if jobs else None\n if self.name in ('a castle', 'a volcanic base'):\n self.boss_mobs_and_jobs()\n\n def drop_wares(self):\n if self.ware_list:\n wares = drop_item(self.ware_list)\n while not wares:\n wares = drop_item(self.ware_list)\n return wares\n else:\n return []\n\n def drop_job(self, jobs):\n drops_i = []\n for k, v in jobs.items():\n if odds(2):\n drops_i.append(Job(name=k, location=self.p.location, **v))\n return drops_i\n\n def boss_mobs_and_jobs(self):\n boss_major_armors = [Item('a coat of impervious dragon scales',\n plural='coats of dragon scales', quantity=1, category=\n 'major armor', rarity='super rare', defense=5), Item(\n 'an enchanted leather duster', plural=\n 'enchanted leather dusters', quantity=1, category='major armor',\n defense=5, rarity='super rare'), Item(\n 'a coat of actual live grizzly bears', plural=\n 'coats of actual live grizzly bears', quantity=1, category=\n 'major armor', defense=5, rarity='super rare')]\n boss_minor_armors = [Item('wings of an angel', plural=\n 'wings of angels', quantity=1, rarity='super rare', category=\n 'minor armor', defense=5), Item('an OSHA approved hard hat',\n plural='OSHA approved hard hats', quantity=1, rarity=\n 'super rare', category='minor armor', defense=5), Item(\n 'a pair boots that were made for walkin', plural=\n 'pairs of boots that were made for walkin', quantity=1, rarity=\n 'super rare', category='minor armor', defense=5)]\n boss_weapons = [Item('an apache helicopter', plural=\n 'apache helicopters', rarity='super rare', weapon_rating=6,\n quantity=1), Item('a trebuchet', plural='trebuchets',\n weapon_rating=6, quantity=1, rarity='super rare'), Item(\n 'an army of attacking wizards', plural=\n 'armies of attacking wizards', weapon_rating=6, quantity=1,\n rarity='super rare')]\n boss_names = ['the Terrifying Dragon of Soul Slaying',\n 'the Great Salamander of Darkness', 'the Squirrel of Destiny']\n random.shuffle(boss_names)\n random.shuffle(boss_weapons)\n random.shuffle(boss_major_armors)\n random.shuffle(boss_minor_armors)\n boss = Mob(boss_names[0], self.p, plural=boss_names[0], rarity=\n 'super rare')\n boss.health = 500\n boss.equipped_weapon = boss_weapons[0]\n boss.major_armor = boss_major_armors[0]\n boss.minor_armor = boss_minor_armors[0]\n boss.irritation_level = 10\n self.mobs = [boss]\n if self.name == 'a castle':\n self.jobs = [Job('king of the realm', location=self.p.location,\n salary=1100)]\n if self.name == 'a volcanic base':\n self.jobs = [Job('evil overlord', location=self.p.location,\n salary=1100)]\n\n\nclass Job:\n\n def __init__(self, name, location, skills_needed=None, salary=0,\n skills_learned=None, inventory_needed=None):\n self.name = name\n self.location = location\n self.skills_needed = skills_needed or None\n self.salary = salary or 0\n self.skills_learned = skills_learned or None\n self.inventory_needed = inventory_needed or None\n self.application_attempts = 0\n\n\nclass Mob:\n\n def __init__(self, name, p, plural, rarity, inventory=None):\n self.name = name\n self.p = p\n self.plural = plural\n self.quantity = 1\n self.rarity = rarity\n self.skills = self.skills()\n self.quest = None\n self.inventory = inventory or drop_item(add_dicts_together(items[\n 'master'], items[p.square.square_type]))\n self.health = 100\n self.equipped_weapon = self.equip()\n major = [x for x in self.inventory if x.category == 'major armor']\n minor = [x for x in self.inventory if x.category == 'minor armor']\n self.major_armor = major[0] if major else None\n self.minor_armor = minor[0] if minor else None\n self.irritation_level = 0\n\n def equip(self):\n nice_weapons = []\n for i in self.inventory:\n try:\n if i.weapon_rating:\n nice_weapons.append(i)\n except AttributeError:\n pass\n nice_weapons.sort(key=lambda x: x.weapon_rating, reverse=True)\n if nice_weapons:\n self.inventory.remove(nice_weapons[0])\n return nice_weapons[0]\n else:\n return None\n\n @staticmethod\n def skills():\n \"\"\" Pick the skills for a mob, these determine what a player can get from completing a quest \"\"\"\n all_skills = ['strength', 'patience', 'cleanliness', 'leadership',\n 'communication', 'science', 'math', 'engineering',\n 'intelligence', 'driving']\n random.shuffle(all_skills)\n return all_skills[0:2]\n\n def generate_quest(self):\n \"\"\"\n inventory based\n bring me x of an object to learn a skill\n \"\"\"\n if odds(3):\n quest_items = add_dicts_together(items['master'], items[self.p.\n square.square_type])\n quest_item = random.choice(list(quest_items.keys()))\n i = Item(quest_item, 0, **quest_items[quest_item])\n self.inventory.append(i)\n quantity = {'super rare': '1', 'rare': '2', 'uncommon': '3',\n 'common': '6', 'super common': '15'}\n q = quantity[i.rarity]\n self.quest = (i, int(q),\n f'{self.p.name}, if you bring me {q} {i.plural if int(q) > 1 else remove_little_words(i.name)}, I will teach you a valuable skill.'\n )\n return\n elif odds(5):\n mobs = []\n for biome, building in buildings.items():\n for b, attributes in building.items():\n if attributes.get('mobs'):\n for k in attributes['mobs'].keys():\n mobs.append(k)\n for biome, mob in wild_mobs.items():\n for k in mob.keys():\n mobs.append(k)\n target = (\n f'{mobs[random.randint(0, len(mobs) - 1)]} named {names[random.randint(0, len(names) - 1)]}'\n )\n print(\n f\"Well, we'll keep this off the record, but I can arrange for some money to find its way into your account if you make {colored(target, 'yellow')} disappear, if you know what I mean...\"\n )\n self.p.hit_list.append(target)\n return False\n else:\n return None\n", "step-2": "<mask token>\n\n\nclass MapSquare:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def generate_buildings(self, p):\n self.buildings = drop_building(add_dicts_together(buildings[\n 'master'], buildings[self.square_type]), p)\n\n def generate_mobs(self, p):\n self.mobs = drop_mob(add_dicts_together(wild_mobs['master'],\n wild_mobs[self.square_type]), p)\n\n def clean_up_map(self):\n \"\"\" Remove items with quantity of zero from the map inventory\"\"\"\n self.items = [i for i in self.items if i.quantity != 0]\n\n @staticmethod\n def map_picture(the_map, p):\n \"\"\"With the player's location in the center, draw a 5 x 5 map with map square type\n and coordinates in each square\"\"\"\n xy = p.location[0] - 2, p.location[1] + 2\n map_coords = []\n for y in range(0, 5):\n row = [(xy[0] + x, xy[1] - y) for x in range(0, 5)]\n map_coords.append(row)\n pretty_map = []\n for r in map_coords:\n row = []\n for coordinates in r:\n if coordinates in the_map.keys():\n if p.quest and p.job and p.quest[1\n ] == coordinates and p.job.location == coordinates:\n star = '*$ '\n elif p.quest and p.quest[1] == coordinates:\n star = ' * '\n elif p.job and p.job.location == coordinates:\n star = ' $ '\n else:\n star = ' '\n row.append('|{!s:9}{}|'.format(the_map[coordinates].\n square_type, star))\n else:\n row.append('|{!s:12}|'.format(' '))\n pretty_map.append(row)\n for row in pretty_map:\n print(''.join(row))\n\n\nclass Player:\n\n def __init__(self, name, location):\n self.name = name\n self.location = location\n self.square = None\n self.money = 0\n self.quest = None\n self.job = None\n self.phase = 'day'\n self.equipped_weapon = None\n self.major_armor = None\n self.minor_armor = None\n self.building_local = None\n self.inventory = []\n self.skills = {}\n self.health = 100\n self.greeting_count = 0\n self.body_count = 0\n self.assassination_count = 0\n self.hit_list = []\n self.death_count = 0\n self.food_count = 0\n self.run_away_count = 0\n self.speed_bonus = False\n self.game_won = False\n\n def game_over(self):\n if self.game_won is False:\n self.game_won = True\n print(colored('You have won the game!', 'green'))\n print(\n 'You may continue playing to earn more achievements if you wish.'\n )\n if self.run_away_count == 0:\n print(\n 'Congratulations, you have achieved the True Bravery achievement, having won the game without ever running away from a fight.'\n )\n if self.run_away_count > 100:\n print(\n 'Congratulations, you have achieved the True Cowardice achievement, having won the game after running away from over 100 battles.'\n )\n\n def clean_up_inventory(self):\n \"\"\" Remove items with quantity of zero from the map inventory\"\"\"\n self.inventory = [i for i in self.inventory if i.quantity != 0]\n\n def phase_change(self, the_map):\n self.phase = 'day' if self.phase == 'night' else 'night'\n for k, square in the_map.items():\n if self.location != k:\n square.generate_items()\n for b in square.buildings:\n if b.ware_list:\n b.wares = drop_item(b.ware_list)\n while not b.wares:\n b.wares = drop_item(b.ware_list)\n if b.name not in ('a castle', 'a volcanic base'):\n jobs = {}\n buiding_dict = add_dicts_together(buildings[\n 'master'], buildings[square.square_type])\n for key, v in buiding_dict.items():\n if key == b.name and v.get('jobs'):\n for name, values in v['jobs'].items():\n jobs[name] = values\n b.jobs = b.drop_job(jobs)\n if self.phase == 'day':\n self.speed_bonus = False\n for mob in square.mobs:\n mob.health = 100\n mob.irritation_level = 0\n mob.quest = None if self.quest is None else mob.quest\n if not square.mobs:\n square.mobs = drop_mob(add_dicts_together(wild_mobs\n ['master'], wild_mobs[self.square.square_type]),\n self, limit=len(names), square=square)\n\n def formatted_inventory(self):\n formatted = []\n for item in self.inventory:\n if item.quantity > 1:\n formatted.append(f'{int_to_words(item.quantity)} {item.plural}'\n )\n else:\n formatted.append(item.name)\n if formatted:\n return comma_separated(formatted)\n else:\n return 'nothing'\n\n def pretty_inventory(self):\n w = self.equipped_weapon\n major = self.major_armor.defense if self.major_armor else 0\n minor = self.minor_armor.defense if self.minor_armor else 0\n armor_defense = (major + minor) * 5\n armors = [self.major_armor.name if self.major_armor else None, self\n .minor_armor.name if self.minor_armor else None]\n inventory = {'inventory_items':\n f'You have {self.formatted_inventory()} in your inventory.',\n 'weapon': \n f'You are wielding {int_to_words(w.quantity)} {remove_little_words(w.name) if w.quantity == 1 else w.plural}.'\n if w else None, 'armor': \n f\"You are wearing {' and '.join(x for x in armors if x)}, giving you a {armor_defense}% reduction in incoming damage.\"\n if self.minor_armor or self.major_armor else None}\n return '\\n'.join(v for v in inventory.values() if v)\n\n def status(self):\n skills = [f'{k}: {v}%.' for k, v in self.skills.items()]\n job = f'You have a job as a {self.job.name}.' if self.job else None\n quest = 'You have a quest.' if self.quest else None\n if job and quest:\n job_string = '\\n'.join([job, quest])\n elif job or quest:\n job_string = job if job else quest\n else:\n job_string = (\n 'You do not have a job, and you are not contributing to society.'\n )\n status_string = {'health':\n f'Currently, you have {self.health} health.', 'location':\n f'You are located on map coordinates {self.location}, which is {self.square.square_type}.'\n , 'building_local': \n f'You are inside {self.building_local.name}.' if self.\n building_local else None, 'skills': '\\n'.join(skills) if skills\n else \"You don't have any skills.\", 'money':\n f'You have ${self.money} in your wallet.', 'job': job_string}\n return '\\n'.join(v for v in status_string.values() if v)\n\n def statistics(self):\n print(f'You have killed {self.body_count} mobs.')\n print(f'You have ran away from {self.run_away_count} battles.')\n print(f'You have eaten {self.food_count} items.')\n print(f'You have performed {self.assassination_count} assassinations.')\n print(f'You have talked to mobs {self.greeting_count} times.')\n\n def view_hit_list(self):\n if self.hit_list:\n print(\n f'If you ever run across these shady characters, be sure to take their names off your list: {comma_separated(self.hit_list)}'\n )\n else:\n print(\"Looks like you don't know of anyone who needs to be dead.\")\n\n def increase_skill(self, skill, increase):\n try:\n self.skills[skill] += increase\n except KeyError:\n self.skills[skill] = increase\n print(\n f'You have increased your mastery of {skill} by {increase}% for a total of {self.skills[skill]}%.'\n )\n\n\nclass Item:\n\n def __init__(self, name, quantity, plural, category=None, perishable=\n None, flammable=None, rarity=None, price=None, weapon_rating=None,\n defense=None):\n self.name = name\n self.quantity = quantity\n self.plural = plural\n self.category = category or None\n self.perishable = perishable or None\n self.flammable = flammable or None\n self.rarity = rarity or None\n self.price = price or None\n self.weapon_rating = weapon_rating or None\n self.defense = defense or None\n\n def copy(self):\n return Item(name=self.name, quantity=self.quantity, plural=self.\n plural, category=self.category, perishable=self.perishable,\n flammable=self.flammable, rarity=self.rarity, weapon_rating=\n self.weapon_rating, defense=self.defense)\n\n\nclass Building(object):\n\n def __init__(self, name, p, plural, category=None, rarity=None,\n ware_list=None, mobs=None, jobs=None):\n self.name = name\n self.p = p\n self.quantity = 1\n self.plural = plural\n self.category = category or None\n self.rarity = rarity or None\n self.ware_list = ware_list\n self.wares = self.drop_wares()\n self.mobs = drop_mob(mobs, p) if mobs else None\n self.jobs = self.drop_job(jobs) if jobs else None\n if self.name in ('a castle', 'a volcanic base'):\n self.boss_mobs_and_jobs()\n\n def drop_wares(self):\n if self.ware_list:\n wares = drop_item(self.ware_list)\n while not wares:\n wares = drop_item(self.ware_list)\n return wares\n else:\n return []\n\n def drop_job(self, jobs):\n drops_i = []\n for k, v in jobs.items():\n if odds(2):\n drops_i.append(Job(name=k, location=self.p.location, **v))\n return drops_i\n\n def boss_mobs_and_jobs(self):\n boss_major_armors = [Item('a coat of impervious dragon scales',\n plural='coats of dragon scales', quantity=1, category=\n 'major armor', rarity='super rare', defense=5), Item(\n 'an enchanted leather duster', plural=\n 'enchanted leather dusters', quantity=1, category='major armor',\n defense=5, rarity='super rare'), Item(\n 'a coat of actual live grizzly bears', plural=\n 'coats of actual live grizzly bears', quantity=1, category=\n 'major armor', defense=5, rarity='super rare')]\n boss_minor_armors = [Item('wings of an angel', plural=\n 'wings of angels', quantity=1, rarity='super rare', category=\n 'minor armor', defense=5), Item('an OSHA approved hard hat',\n plural='OSHA approved hard hats', quantity=1, rarity=\n 'super rare', category='minor armor', defense=5), Item(\n 'a pair boots that were made for walkin', plural=\n 'pairs of boots that were made for walkin', quantity=1, rarity=\n 'super rare', category='minor armor', defense=5)]\n boss_weapons = [Item('an apache helicopter', plural=\n 'apache helicopters', rarity='super rare', weapon_rating=6,\n quantity=1), Item('a trebuchet', plural='trebuchets',\n weapon_rating=6, quantity=1, rarity='super rare'), Item(\n 'an army of attacking wizards', plural=\n 'armies of attacking wizards', weapon_rating=6, quantity=1,\n rarity='super rare')]\n boss_names = ['the Terrifying Dragon of Soul Slaying',\n 'the Great Salamander of Darkness', 'the Squirrel of Destiny']\n random.shuffle(boss_names)\n random.shuffle(boss_weapons)\n random.shuffle(boss_major_armors)\n random.shuffle(boss_minor_armors)\n boss = Mob(boss_names[0], self.p, plural=boss_names[0], rarity=\n 'super rare')\n boss.health = 500\n boss.equipped_weapon = boss_weapons[0]\n boss.major_armor = boss_major_armors[0]\n boss.minor_armor = boss_minor_armors[0]\n boss.irritation_level = 10\n self.mobs = [boss]\n if self.name == 'a castle':\n self.jobs = [Job('king of the realm', location=self.p.location,\n salary=1100)]\n if self.name == 'a volcanic base':\n self.jobs = [Job('evil overlord', location=self.p.location,\n salary=1100)]\n\n\nclass Job:\n\n def __init__(self, name, location, skills_needed=None, salary=0,\n skills_learned=None, inventory_needed=None):\n self.name = name\n self.location = location\n self.skills_needed = skills_needed or None\n self.salary = salary or 0\n self.skills_learned = skills_learned or None\n self.inventory_needed = inventory_needed or None\n self.application_attempts = 0\n\n\nclass Mob:\n\n def __init__(self, name, p, plural, rarity, inventory=None):\n self.name = name\n self.p = p\n self.plural = plural\n self.quantity = 1\n self.rarity = rarity\n self.skills = self.skills()\n self.quest = None\n self.inventory = inventory or drop_item(add_dicts_together(items[\n 'master'], items[p.square.square_type]))\n self.health = 100\n self.equipped_weapon = self.equip()\n major = [x for x in self.inventory if x.category == 'major armor']\n minor = [x for x in self.inventory if x.category == 'minor armor']\n self.major_armor = major[0] if major else None\n self.minor_armor = minor[0] if minor else None\n self.irritation_level = 0\n\n def equip(self):\n nice_weapons = []\n for i in self.inventory:\n try:\n if i.weapon_rating:\n nice_weapons.append(i)\n except AttributeError:\n pass\n nice_weapons.sort(key=lambda x: x.weapon_rating, reverse=True)\n if nice_weapons:\n self.inventory.remove(nice_weapons[0])\n return nice_weapons[0]\n else:\n return None\n\n @staticmethod\n def skills():\n \"\"\" Pick the skills for a mob, these determine what a player can get from completing a quest \"\"\"\n all_skills = ['strength', 'patience', 'cleanliness', 'leadership',\n 'communication', 'science', 'math', 'engineering',\n 'intelligence', 'driving']\n random.shuffle(all_skills)\n return all_skills[0:2]\n\n def generate_quest(self):\n \"\"\"\n inventory based\n bring me x of an object to learn a skill\n \"\"\"\n if odds(3):\n quest_items = add_dicts_together(items['master'], items[self.p.\n square.square_type])\n quest_item = random.choice(list(quest_items.keys()))\n i = Item(quest_item, 0, **quest_items[quest_item])\n self.inventory.append(i)\n quantity = {'super rare': '1', 'rare': '2', 'uncommon': '3',\n 'common': '6', 'super common': '15'}\n q = quantity[i.rarity]\n self.quest = (i, int(q),\n f'{self.p.name}, if you bring me {q} {i.plural if int(q) > 1 else remove_little_words(i.name)}, I will teach you a valuable skill.'\n )\n return\n elif odds(5):\n mobs = []\n for biome, building in buildings.items():\n for b, attributes in building.items():\n if attributes.get('mobs'):\n for k in attributes['mobs'].keys():\n mobs.append(k)\n for biome, mob in wild_mobs.items():\n for k in mob.keys():\n mobs.append(k)\n target = (\n f'{mobs[random.randint(0, len(mobs) - 1)]} named {names[random.randint(0, len(names) - 1)]}'\n )\n print(\n f\"Well, we'll keep this off the record, but I can arrange for some money to find its way into your account if you make {colored(target, 'yellow')} disappear, if you know what I mean...\"\n )\n self.p.hit_list.append(target)\n return False\n else:\n return None\n", "step-3": "<mask token>\n\n\nclass MapSquare:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def generate_items(self):\n self.items = drop_item(add_dicts_together(items['master'], items[\n self.square_type]))\n\n def generate_buildings(self, p):\n self.buildings = drop_building(add_dicts_together(buildings[\n 'master'], buildings[self.square_type]), p)\n\n def generate_mobs(self, p):\n self.mobs = drop_mob(add_dicts_together(wild_mobs['master'],\n wild_mobs[self.square_type]), p)\n\n def clean_up_map(self):\n \"\"\" Remove items with quantity of zero from the map inventory\"\"\"\n self.items = [i for i in self.items if i.quantity != 0]\n\n @staticmethod\n def map_picture(the_map, p):\n \"\"\"With the player's location in the center, draw a 5 x 5 map with map square type\n and coordinates in each square\"\"\"\n xy = p.location[0] - 2, p.location[1] + 2\n map_coords = []\n for y in range(0, 5):\n row = [(xy[0] + x, xy[1] - y) for x in range(0, 5)]\n map_coords.append(row)\n pretty_map = []\n for r in map_coords:\n row = []\n for coordinates in r:\n if coordinates in the_map.keys():\n if p.quest and p.job and p.quest[1\n ] == coordinates and p.job.location == coordinates:\n star = '*$ '\n elif p.quest and p.quest[1] == coordinates:\n star = ' * '\n elif p.job and p.job.location == coordinates:\n star = ' $ '\n else:\n star = ' '\n row.append('|{!s:9}{}|'.format(the_map[coordinates].\n square_type, star))\n else:\n row.append('|{!s:12}|'.format(' '))\n pretty_map.append(row)\n for row in pretty_map:\n print(''.join(row))\n\n\nclass Player:\n\n def __init__(self, name, location):\n self.name = name\n self.location = location\n self.square = None\n self.money = 0\n self.quest = None\n self.job = None\n self.phase = 'day'\n self.equipped_weapon = None\n self.major_armor = None\n self.minor_armor = None\n self.building_local = None\n self.inventory = []\n self.skills = {}\n self.health = 100\n self.greeting_count = 0\n self.body_count = 0\n self.assassination_count = 0\n self.hit_list = []\n self.death_count = 0\n self.food_count = 0\n self.run_away_count = 0\n self.speed_bonus = False\n self.game_won = False\n\n def game_over(self):\n if self.game_won is False:\n self.game_won = True\n print(colored('You have won the game!', 'green'))\n print(\n 'You may continue playing to earn more achievements if you wish.'\n )\n if self.run_away_count == 0:\n print(\n 'Congratulations, you have achieved the True Bravery achievement, having won the game without ever running away from a fight.'\n )\n if self.run_away_count > 100:\n print(\n 'Congratulations, you have achieved the True Cowardice achievement, having won the game after running away from over 100 battles.'\n )\n\n def clean_up_inventory(self):\n \"\"\" Remove items with quantity of zero from the map inventory\"\"\"\n self.inventory = [i for i in self.inventory if i.quantity != 0]\n\n def phase_change(self, the_map):\n self.phase = 'day' if self.phase == 'night' else 'night'\n for k, square in the_map.items():\n if self.location != k:\n square.generate_items()\n for b in square.buildings:\n if b.ware_list:\n b.wares = drop_item(b.ware_list)\n while not b.wares:\n b.wares = drop_item(b.ware_list)\n if b.name not in ('a castle', 'a volcanic base'):\n jobs = {}\n buiding_dict = add_dicts_together(buildings[\n 'master'], buildings[square.square_type])\n for key, v in buiding_dict.items():\n if key == b.name and v.get('jobs'):\n for name, values in v['jobs'].items():\n jobs[name] = values\n b.jobs = b.drop_job(jobs)\n if self.phase == 'day':\n self.speed_bonus = False\n for mob in square.mobs:\n mob.health = 100\n mob.irritation_level = 0\n mob.quest = None if self.quest is None else mob.quest\n if not square.mobs:\n square.mobs = drop_mob(add_dicts_together(wild_mobs\n ['master'], wild_mobs[self.square.square_type]),\n self, limit=len(names), square=square)\n\n def formatted_inventory(self):\n formatted = []\n for item in self.inventory:\n if item.quantity > 1:\n formatted.append(f'{int_to_words(item.quantity)} {item.plural}'\n )\n else:\n formatted.append(item.name)\n if formatted:\n return comma_separated(formatted)\n else:\n return 'nothing'\n\n def pretty_inventory(self):\n w = self.equipped_weapon\n major = self.major_armor.defense if self.major_armor else 0\n minor = self.minor_armor.defense if self.minor_armor else 0\n armor_defense = (major + minor) * 5\n armors = [self.major_armor.name if self.major_armor else None, self\n .minor_armor.name if self.minor_armor else None]\n inventory = {'inventory_items':\n f'You have {self.formatted_inventory()} in your inventory.',\n 'weapon': \n f'You are wielding {int_to_words(w.quantity)} {remove_little_words(w.name) if w.quantity == 1 else w.plural}.'\n if w else None, 'armor': \n f\"You are wearing {' and '.join(x for x in armors if x)}, giving you a {armor_defense}% reduction in incoming damage.\"\n if self.minor_armor or self.major_armor else None}\n return '\\n'.join(v for v in inventory.values() if v)\n\n def status(self):\n skills = [f'{k}: {v}%.' for k, v in self.skills.items()]\n job = f'You have a job as a {self.job.name}.' if self.job else None\n quest = 'You have a quest.' if self.quest else None\n if job and quest:\n job_string = '\\n'.join([job, quest])\n elif job or quest:\n job_string = job if job else quest\n else:\n job_string = (\n 'You do not have a job, and you are not contributing to society.'\n )\n status_string = {'health':\n f'Currently, you have {self.health} health.', 'location':\n f'You are located on map coordinates {self.location}, which is {self.square.square_type}.'\n , 'building_local': \n f'You are inside {self.building_local.name}.' if self.\n building_local else None, 'skills': '\\n'.join(skills) if skills\n else \"You don't have any skills.\", 'money':\n f'You have ${self.money} in your wallet.', 'job': job_string}\n return '\\n'.join(v for v in status_string.values() if v)\n\n def statistics(self):\n print(f'You have killed {self.body_count} mobs.')\n print(f'You have ran away from {self.run_away_count} battles.')\n print(f'You have eaten {self.food_count} items.')\n print(f'You have performed {self.assassination_count} assassinations.')\n print(f'You have talked to mobs {self.greeting_count} times.')\n\n def view_hit_list(self):\n if self.hit_list:\n print(\n f'If you ever run across these shady characters, be sure to take their names off your list: {comma_separated(self.hit_list)}'\n )\n else:\n print(\"Looks like you don't know of anyone who needs to be dead.\")\n\n def increase_skill(self, skill, increase):\n try:\n self.skills[skill] += increase\n except KeyError:\n self.skills[skill] = increase\n print(\n f'You have increased your mastery of {skill} by {increase}% for a total of {self.skills[skill]}%.'\n )\n\n\nclass Item:\n\n def __init__(self, name, quantity, plural, category=None, perishable=\n None, flammable=None, rarity=None, price=None, weapon_rating=None,\n defense=None):\n self.name = name\n self.quantity = quantity\n self.plural = plural\n self.category = category or None\n self.perishable = perishable or None\n self.flammable = flammable or None\n self.rarity = rarity or None\n self.price = price or None\n self.weapon_rating = weapon_rating or None\n self.defense = defense or None\n\n def copy(self):\n return Item(name=self.name, quantity=self.quantity, plural=self.\n plural, category=self.category, perishable=self.perishable,\n flammable=self.flammable, rarity=self.rarity, weapon_rating=\n self.weapon_rating, defense=self.defense)\n\n\nclass Building(object):\n\n def __init__(self, name, p, plural, category=None, rarity=None,\n ware_list=None, mobs=None, jobs=None):\n self.name = name\n self.p = p\n self.quantity = 1\n self.plural = plural\n self.category = category or None\n self.rarity = rarity or None\n self.ware_list = ware_list\n self.wares = self.drop_wares()\n self.mobs = drop_mob(mobs, p) if mobs else None\n self.jobs = self.drop_job(jobs) if jobs else None\n if self.name in ('a castle', 'a volcanic base'):\n self.boss_mobs_and_jobs()\n\n def drop_wares(self):\n if self.ware_list:\n wares = drop_item(self.ware_list)\n while not wares:\n wares = drop_item(self.ware_list)\n return wares\n else:\n return []\n\n def drop_job(self, jobs):\n drops_i = []\n for k, v in jobs.items():\n if odds(2):\n drops_i.append(Job(name=k, location=self.p.location, **v))\n return drops_i\n\n def boss_mobs_and_jobs(self):\n boss_major_armors = [Item('a coat of impervious dragon scales',\n plural='coats of dragon scales', quantity=1, category=\n 'major armor', rarity='super rare', defense=5), Item(\n 'an enchanted leather duster', plural=\n 'enchanted leather dusters', quantity=1, category='major armor',\n defense=5, rarity='super rare'), Item(\n 'a coat of actual live grizzly bears', plural=\n 'coats of actual live grizzly bears', quantity=1, category=\n 'major armor', defense=5, rarity='super rare')]\n boss_minor_armors = [Item('wings of an angel', plural=\n 'wings of angels', quantity=1, rarity='super rare', category=\n 'minor armor', defense=5), Item('an OSHA approved hard hat',\n plural='OSHA approved hard hats', quantity=1, rarity=\n 'super rare', category='minor armor', defense=5), Item(\n 'a pair boots that were made for walkin', plural=\n 'pairs of boots that were made for walkin', quantity=1, rarity=\n 'super rare', category='minor armor', defense=5)]\n boss_weapons = [Item('an apache helicopter', plural=\n 'apache helicopters', rarity='super rare', weapon_rating=6,\n quantity=1), Item('a trebuchet', plural='trebuchets',\n weapon_rating=6, quantity=1, rarity='super rare'), Item(\n 'an army of attacking wizards', plural=\n 'armies of attacking wizards', weapon_rating=6, quantity=1,\n rarity='super rare')]\n boss_names = ['the Terrifying Dragon of Soul Slaying',\n 'the Great Salamander of Darkness', 'the Squirrel of Destiny']\n random.shuffle(boss_names)\n random.shuffle(boss_weapons)\n random.shuffle(boss_major_armors)\n random.shuffle(boss_minor_armors)\n boss = Mob(boss_names[0], self.p, plural=boss_names[0], rarity=\n 'super rare')\n boss.health = 500\n boss.equipped_weapon = boss_weapons[0]\n boss.major_armor = boss_major_armors[0]\n boss.minor_armor = boss_minor_armors[0]\n boss.irritation_level = 10\n self.mobs = [boss]\n if self.name == 'a castle':\n self.jobs = [Job('king of the realm', location=self.p.location,\n salary=1100)]\n if self.name == 'a volcanic base':\n self.jobs = [Job('evil overlord', location=self.p.location,\n salary=1100)]\n\n\nclass Job:\n\n def __init__(self, name, location, skills_needed=None, salary=0,\n skills_learned=None, inventory_needed=None):\n self.name = name\n self.location = location\n self.skills_needed = skills_needed or None\n self.salary = salary or 0\n self.skills_learned = skills_learned or None\n self.inventory_needed = inventory_needed or None\n self.application_attempts = 0\n\n\nclass Mob:\n\n def __init__(self, name, p, plural, rarity, inventory=None):\n self.name = name\n self.p = p\n self.plural = plural\n self.quantity = 1\n self.rarity = rarity\n self.skills = self.skills()\n self.quest = None\n self.inventory = inventory or drop_item(add_dicts_together(items[\n 'master'], items[p.square.square_type]))\n self.health = 100\n self.equipped_weapon = self.equip()\n major = [x for x in self.inventory if x.category == 'major armor']\n minor = [x for x in self.inventory if x.category == 'minor armor']\n self.major_armor = major[0] if major else None\n self.minor_armor = minor[0] if minor else None\n self.irritation_level = 0\n\n def equip(self):\n nice_weapons = []\n for i in self.inventory:\n try:\n if i.weapon_rating:\n nice_weapons.append(i)\n except AttributeError:\n pass\n nice_weapons.sort(key=lambda x: x.weapon_rating, reverse=True)\n if nice_weapons:\n self.inventory.remove(nice_weapons[0])\n return nice_weapons[0]\n else:\n return None\n\n @staticmethod\n def skills():\n \"\"\" Pick the skills for a mob, these determine what a player can get from completing a quest \"\"\"\n all_skills = ['strength', 'patience', 'cleanliness', 'leadership',\n 'communication', 'science', 'math', 'engineering',\n 'intelligence', 'driving']\n random.shuffle(all_skills)\n return all_skills[0:2]\n\n def generate_quest(self):\n \"\"\"\n inventory based\n bring me x of an object to learn a skill\n \"\"\"\n if odds(3):\n quest_items = add_dicts_together(items['master'], items[self.p.\n square.square_type])\n quest_item = random.choice(list(quest_items.keys()))\n i = Item(quest_item, 0, **quest_items[quest_item])\n self.inventory.append(i)\n quantity = {'super rare': '1', 'rare': '2', 'uncommon': '3',\n 'common': '6', 'super common': '15'}\n q = quantity[i.rarity]\n self.quest = (i, int(q),\n f'{self.p.name}, if you bring me {q} {i.plural if int(q) > 1 else remove_little_words(i.name)}, I will teach you a valuable skill.'\n )\n return\n elif odds(5):\n mobs = []\n for biome, building in buildings.items():\n for b, attributes in building.items():\n if attributes.get('mobs'):\n for k in attributes['mobs'].keys():\n mobs.append(k)\n for biome, mob in wild_mobs.items():\n for k in mob.keys():\n mobs.append(k)\n target = (\n f'{mobs[random.randint(0, len(mobs) - 1)]} named {names[random.randint(0, len(names) - 1)]}'\n )\n print(\n f\"Well, we'll keep this off the record, but I can arrange for some money to find its way into your account if you make {colored(target, 'yellow')} disappear, if you know what I mean...\"\n )\n self.p.hit_list.append(target)\n return False\n else:\n return None\n", "step-4": "<mask token>\n\n\ndef find_unique_names(quantity, name_list, taken_names):\n free_names = [x for x in name_list if x not in taken_names]\n random.shuffle(free_names)\n return free_names[:quantity]\n\n\ndef dropper(rarity):\n results = {'super rare': 100, 'rare': 50, 'uncommon': 25, 'common': 5,\n 'super common': 2}\n quantity = 0\n countdown = random.randint(0, 10)\n while countdown > 0:\n if random.randint(0, results[rarity]) == 1:\n quantity += 1\n countdown -= 1\n return quantity\n\n\ndef drop_building(dictionary, p, limit=None):\n limit = limit or len(adjectives)\n drops_i = []\n for k, v in dictionary.items():\n quantity = dropper(v['rarity'])\n quantity = quantity if quantity < limit else limit\n limit -= quantity\n if quantity:\n if quantity > 1 and v['category'] != 'residence':\n n = random.randint(0, quantity)\n unique_names = find_unique_names(quantity - n, names, p.\n square.unique_building_names)\n p.square.unique_building_names += unique_names\n for i in range(0, quantity - n):\n drops_i.append(Building(name=\n f\"{unique_names[i]}'s {remove_little_words(k).capitalize()}\"\n , p=p, **v))\n unique_adjectives = find_unique_names(n, adjectives, p.\n square.unique_building_names)\n p.square.unique_building_names += unique_adjectives\n for i in range(0, n):\n drops_i.append(Building(name=\n f'the {unique_adjectives[i]} {remove_little_words(k).capitalize()}'\n , p=p, **v))\n elif quantity > 1 and v['category'] == 'residence':\n unique_house_names = find_unique_names(quantity, names, p.\n square.unique_house_names)\n p.square.unique_house_names += unique_house_names\n for i in range(0, quantity):\n drops_i.append(Building(name=\n f\"{unique_house_names[i]}'s {remove_little_words(k)}\",\n p=p, **v))\n else:\n drops_i.append(Building(name=k, p=p, **v))\n return drops_i\n\n\n<mask token>\n\n\ndef drop_item(dictionary):\n \"\"\" Randomly generates objects based on rarity \"\"\"\n drops_i = []\n for k, v in dictionary.items():\n quantity = dropper(v['rarity'])\n if quantity:\n drops_i.append(Item(name=k, quantity=quantity, **v))\n return drops_i\n\n\nclass MapSquare:\n\n def __init__(self, name='', square_type=None):\n square_types = ['forest', 'mountains', 'desert', 'city', 'swamp',\n 'ocean']\n self.square_type = square_type or square_types[random.randint(0, \n len(square_types) - 1)]\n self.name = name\n self.unique_mob_names = []\n self.unique_building_names = []\n self.unique_house_names = []\n mobs = []\n items = []\n buildings = []\n\n def generate_items(self):\n self.items = drop_item(add_dicts_together(items['master'], items[\n self.square_type]))\n\n def generate_buildings(self, p):\n self.buildings = drop_building(add_dicts_together(buildings[\n 'master'], buildings[self.square_type]), p)\n\n def generate_mobs(self, p):\n self.mobs = drop_mob(add_dicts_together(wild_mobs['master'],\n wild_mobs[self.square_type]), p)\n\n def clean_up_map(self):\n \"\"\" Remove items with quantity of zero from the map inventory\"\"\"\n self.items = [i for i in self.items if i.quantity != 0]\n\n @staticmethod\n def map_picture(the_map, p):\n \"\"\"With the player's location in the center, draw a 5 x 5 map with map square type\n and coordinates in each square\"\"\"\n xy = p.location[0] - 2, p.location[1] + 2\n map_coords = []\n for y in range(0, 5):\n row = [(xy[0] + x, xy[1] - y) for x in range(0, 5)]\n map_coords.append(row)\n pretty_map = []\n for r in map_coords:\n row = []\n for coordinates in r:\n if coordinates in the_map.keys():\n if p.quest and p.job and p.quest[1\n ] == coordinates and p.job.location == coordinates:\n star = '*$ '\n elif p.quest and p.quest[1] == coordinates:\n star = ' * '\n elif p.job and p.job.location == coordinates:\n star = ' $ '\n else:\n star = ' '\n row.append('|{!s:9}{}|'.format(the_map[coordinates].\n square_type, star))\n else:\n row.append('|{!s:12}|'.format(' '))\n pretty_map.append(row)\n for row in pretty_map:\n print(''.join(row))\n\n\nclass Player:\n\n def __init__(self, name, location):\n self.name = name\n self.location = location\n self.square = None\n self.money = 0\n self.quest = None\n self.job = None\n self.phase = 'day'\n self.equipped_weapon = None\n self.major_armor = None\n self.minor_armor = None\n self.building_local = None\n self.inventory = []\n self.skills = {}\n self.health = 100\n self.greeting_count = 0\n self.body_count = 0\n self.assassination_count = 0\n self.hit_list = []\n self.death_count = 0\n self.food_count = 0\n self.run_away_count = 0\n self.speed_bonus = False\n self.game_won = False\n\n def game_over(self):\n if self.game_won is False:\n self.game_won = True\n print(colored('You have won the game!', 'green'))\n print(\n 'You may continue playing to earn more achievements if you wish.'\n )\n if self.run_away_count == 0:\n print(\n 'Congratulations, you have achieved the True Bravery achievement, having won the game without ever running away from a fight.'\n )\n if self.run_away_count > 100:\n print(\n 'Congratulations, you have achieved the True Cowardice achievement, having won the game after running away from over 100 battles.'\n )\n\n def clean_up_inventory(self):\n \"\"\" Remove items with quantity of zero from the map inventory\"\"\"\n self.inventory = [i for i in self.inventory if i.quantity != 0]\n\n def phase_change(self, the_map):\n self.phase = 'day' if self.phase == 'night' else 'night'\n for k, square in the_map.items():\n if self.location != k:\n square.generate_items()\n for b in square.buildings:\n if b.ware_list:\n b.wares = drop_item(b.ware_list)\n while not b.wares:\n b.wares = drop_item(b.ware_list)\n if b.name not in ('a castle', 'a volcanic base'):\n jobs = {}\n buiding_dict = add_dicts_together(buildings[\n 'master'], buildings[square.square_type])\n for key, v in buiding_dict.items():\n if key == b.name and v.get('jobs'):\n for name, values in v['jobs'].items():\n jobs[name] = values\n b.jobs = b.drop_job(jobs)\n if self.phase == 'day':\n self.speed_bonus = False\n for mob in square.mobs:\n mob.health = 100\n mob.irritation_level = 0\n mob.quest = None if self.quest is None else mob.quest\n if not square.mobs:\n square.mobs = drop_mob(add_dicts_together(wild_mobs\n ['master'], wild_mobs[self.square.square_type]),\n self, limit=len(names), square=square)\n\n def formatted_inventory(self):\n formatted = []\n for item in self.inventory:\n if item.quantity > 1:\n formatted.append(f'{int_to_words(item.quantity)} {item.plural}'\n )\n else:\n formatted.append(item.name)\n if formatted:\n return comma_separated(formatted)\n else:\n return 'nothing'\n\n def pretty_inventory(self):\n w = self.equipped_weapon\n major = self.major_armor.defense if self.major_armor else 0\n minor = self.minor_armor.defense if self.minor_armor else 0\n armor_defense = (major + minor) * 5\n armors = [self.major_armor.name if self.major_armor else None, self\n .minor_armor.name if self.minor_armor else None]\n inventory = {'inventory_items':\n f'You have {self.formatted_inventory()} in your inventory.',\n 'weapon': \n f'You are wielding {int_to_words(w.quantity)} {remove_little_words(w.name) if w.quantity == 1 else w.plural}.'\n if w else None, 'armor': \n f\"You are wearing {' and '.join(x for x in armors if x)}, giving you a {armor_defense}% reduction in incoming damage.\"\n if self.minor_armor or self.major_armor else None}\n return '\\n'.join(v for v in inventory.values() if v)\n\n def status(self):\n skills = [f'{k}: {v}%.' for k, v in self.skills.items()]\n job = f'You have a job as a {self.job.name}.' if self.job else None\n quest = 'You have a quest.' if self.quest else None\n if job and quest:\n job_string = '\\n'.join([job, quest])\n elif job or quest:\n job_string = job if job else quest\n else:\n job_string = (\n 'You do not have a job, and you are not contributing to society.'\n )\n status_string = {'health':\n f'Currently, you have {self.health} health.', 'location':\n f'You are located on map coordinates {self.location}, which is {self.square.square_type}.'\n , 'building_local': \n f'You are inside {self.building_local.name}.' if self.\n building_local else None, 'skills': '\\n'.join(skills) if skills\n else \"You don't have any skills.\", 'money':\n f'You have ${self.money} in your wallet.', 'job': job_string}\n return '\\n'.join(v for v in status_string.values() if v)\n\n def statistics(self):\n print(f'You have killed {self.body_count} mobs.')\n print(f'You have ran away from {self.run_away_count} battles.')\n print(f'You have eaten {self.food_count} items.')\n print(f'You have performed {self.assassination_count} assassinations.')\n print(f'You have talked to mobs {self.greeting_count} times.')\n\n def view_hit_list(self):\n if self.hit_list:\n print(\n f'If you ever run across these shady characters, be sure to take their names off your list: {comma_separated(self.hit_list)}'\n )\n else:\n print(\"Looks like you don't know of anyone who needs to be dead.\")\n\n def increase_skill(self, skill, increase):\n try:\n self.skills[skill] += increase\n except KeyError:\n self.skills[skill] = increase\n print(\n f'You have increased your mastery of {skill} by {increase}% for a total of {self.skills[skill]}%.'\n )\n\n\nclass Item:\n\n def __init__(self, name, quantity, plural, category=None, perishable=\n None, flammable=None, rarity=None, price=None, weapon_rating=None,\n defense=None):\n self.name = name\n self.quantity = quantity\n self.plural = plural\n self.category = category or None\n self.perishable = perishable or None\n self.flammable = flammable or None\n self.rarity = rarity or None\n self.price = price or None\n self.weapon_rating = weapon_rating or None\n self.defense = defense or None\n\n def copy(self):\n return Item(name=self.name, quantity=self.quantity, plural=self.\n plural, category=self.category, perishable=self.perishable,\n flammable=self.flammable, rarity=self.rarity, weapon_rating=\n self.weapon_rating, defense=self.defense)\n\n\nclass Building(object):\n\n def __init__(self, name, p, plural, category=None, rarity=None,\n ware_list=None, mobs=None, jobs=None):\n self.name = name\n self.p = p\n self.quantity = 1\n self.plural = plural\n self.category = category or None\n self.rarity = rarity or None\n self.ware_list = ware_list\n self.wares = self.drop_wares()\n self.mobs = drop_mob(mobs, p) if mobs else None\n self.jobs = self.drop_job(jobs) if jobs else None\n if self.name in ('a castle', 'a volcanic base'):\n self.boss_mobs_and_jobs()\n\n def drop_wares(self):\n if self.ware_list:\n wares = drop_item(self.ware_list)\n while not wares:\n wares = drop_item(self.ware_list)\n return wares\n else:\n return []\n\n def drop_job(self, jobs):\n drops_i = []\n for k, v in jobs.items():\n if odds(2):\n drops_i.append(Job(name=k, location=self.p.location, **v))\n return drops_i\n\n def boss_mobs_and_jobs(self):\n boss_major_armors = [Item('a coat of impervious dragon scales',\n plural='coats of dragon scales', quantity=1, category=\n 'major armor', rarity='super rare', defense=5), Item(\n 'an enchanted leather duster', plural=\n 'enchanted leather dusters', quantity=1, category='major armor',\n defense=5, rarity='super rare'), Item(\n 'a coat of actual live grizzly bears', plural=\n 'coats of actual live grizzly bears', quantity=1, category=\n 'major armor', defense=5, rarity='super rare')]\n boss_minor_armors = [Item('wings of an angel', plural=\n 'wings of angels', quantity=1, rarity='super rare', category=\n 'minor armor', defense=5), Item('an OSHA approved hard hat',\n plural='OSHA approved hard hats', quantity=1, rarity=\n 'super rare', category='minor armor', defense=5), Item(\n 'a pair boots that were made for walkin', plural=\n 'pairs of boots that were made for walkin', quantity=1, rarity=\n 'super rare', category='minor armor', defense=5)]\n boss_weapons = [Item('an apache helicopter', plural=\n 'apache helicopters', rarity='super rare', weapon_rating=6,\n quantity=1), Item('a trebuchet', plural='trebuchets',\n weapon_rating=6, quantity=1, rarity='super rare'), Item(\n 'an army of attacking wizards', plural=\n 'armies of attacking wizards', weapon_rating=6, quantity=1,\n rarity='super rare')]\n boss_names = ['the Terrifying Dragon of Soul Slaying',\n 'the Great Salamander of Darkness', 'the Squirrel of Destiny']\n random.shuffle(boss_names)\n random.shuffle(boss_weapons)\n random.shuffle(boss_major_armors)\n random.shuffle(boss_minor_armors)\n boss = Mob(boss_names[0], self.p, plural=boss_names[0], rarity=\n 'super rare')\n boss.health = 500\n boss.equipped_weapon = boss_weapons[0]\n boss.major_armor = boss_major_armors[0]\n boss.minor_armor = boss_minor_armors[0]\n boss.irritation_level = 10\n self.mobs = [boss]\n if self.name == 'a castle':\n self.jobs = [Job('king of the realm', location=self.p.location,\n salary=1100)]\n if self.name == 'a volcanic base':\n self.jobs = [Job('evil overlord', location=self.p.location,\n salary=1100)]\n\n\nclass Job:\n\n def __init__(self, name, location, skills_needed=None, salary=0,\n skills_learned=None, inventory_needed=None):\n self.name = name\n self.location = location\n self.skills_needed = skills_needed or None\n self.salary = salary or 0\n self.skills_learned = skills_learned or None\n self.inventory_needed = inventory_needed or None\n self.application_attempts = 0\n\n\nclass Mob:\n\n def __init__(self, name, p, plural, rarity, inventory=None):\n self.name = name\n self.p = p\n self.plural = plural\n self.quantity = 1\n self.rarity = rarity\n self.skills = self.skills()\n self.quest = None\n self.inventory = inventory or drop_item(add_dicts_together(items[\n 'master'], items[p.square.square_type]))\n self.health = 100\n self.equipped_weapon = self.equip()\n major = [x for x in self.inventory if x.category == 'major armor']\n minor = [x for x in self.inventory if x.category == 'minor armor']\n self.major_armor = major[0] if major else None\n self.minor_armor = minor[0] if minor else None\n self.irritation_level = 0\n\n def equip(self):\n nice_weapons = []\n for i in self.inventory:\n try:\n if i.weapon_rating:\n nice_weapons.append(i)\n except AttributeError:\n pass\n nice_weapons.sort(key=lambda x: x.weapon_rating, reverse=True)\n if nice_weapons:\n self.inventory.remove(nice_weapons[0])\n return nice_weapons[0]\n else:\n return None\n\n @staticmethod\n def skills():\n \"\"\" Pick the skills for a mob, these determine what a player can get from completing a quest \"\"\"\n all_skills = ['strength', 'patience', 'cleanliness', 'leadership',\n 'communication', 'science', 'math', 'engineering',\n 'intelligence', 'driving']\n random.shuffle(all_skills)\n return all_skills[0:2]\n\n def generate_quest(self):\n \"\"\"\n inventory based\n bring me x of an object to learn a skill\n \"\"\"\n if odds(3):\n quest_items = add_dicts_together(items['master'], items[self.p.\n square.square_type])\n quest_item = random.choice(list(quest_items.keys()))\n i = Item(quest_item, 0, **quest_items[quest_item])\n self.inventory.append(i)\n quantity = {'super rare': '1', 'rare': '2', 'uncommon': '3',\n 'common': '6', 'super common': '15'}\n q = quantity[i.rarity]\n self.quest = (i, int(q),\n f'{self.p.name}, if you bring me {q} {i.plural if int(q) > 1 else remove_little_words(i.name)}, I will teach you a valuable skill.'\n )\n return\n elif odds(5):\n mobs = []\n for biome, building in buildings.items():\n for b, attributes in building.items():\n if attributes.get('mobs'):\n for k in attributes['mobs'].keys():\n mobs.append(k)\n for biome, mob in wild_mobs.items():\n for k in mob.keys():\n mobs.append(k)\n target = (\n f'{mobs[random.randint(0, len(mobs) - 1)]} named {names[random.randint(0, len(names) - 1)]}'\n )\n print(\n f\"Well, we'll keep this off the record, but I can arrange for some money to find its way into your account if you make {colored(target, 'yellow')} disappear, if you know what I mean...\"\n )\n self.p.hit_list.append(target)\n return False\n else:\n return None\n", "step-5": "import random\n\nimport colorama\nfrom termcolor import colored\nfrom reusables.string_manipulation import int_to_words\n\nfrom app.common_functions import comma_separated, add_dicts_together, remove_little_words, odds\nfrom app.load_data import items, buildings, wild_mobs, names, adjectives\n\n\ncolorama.init()\n\n\ndef find_unique_names(quantity, name_list, taken_names):\n free_names = [x for x in name_list if x not in taken_names]\n random.shuffle(free_names)\n return free_names[:quantity]\n\n\ndef dropper(rarity):\n results = {'super rare': 100,\n 'rare': 50,\n 'uncommon': 25,\n 'common': 5,\n 'super common': 2}\n quantity = 0\n countdown = random.randint(0, 10)\n while countdown > 0:\n if random.randint(0, results[rarity]) == 1:\n quantity += 1\n countdown -= 1\n return quantity\n\n\ndef drop_building(dictionary, p, limit=None):\n limit = limit or len(adjectives)\n drops_i = []\n\n for k, v in dictionary.items():\n quantity = dropper(v['rarity'])\n quantity = quantity if quantity < limit else limit\n limit -= quantity\n if quantity:\n if quantity > 1 and v['category'] != 'residence':\n n = random.randint(0, quantity)\n unique_names = find_unique_names(quantity - n, names, p.square.unique_building_names)\n p.square.unique_building_names += unique_names\n for i in range(0, quantity - n):\n drops_i.append(Building(name=f\"{unique_names[i]}'s {remove_little_words(k).capitalize()}\", p=p, **v))\n unique_adjectives = find_unique_names(n, adjectives, p.square.unique_building_names)\n p.square.unique_building_names += unique_adjectives\n for i in range(0, n):\n drops_i.append(Building(name=f\"the {unique_adjectives[i]} {remove_little_words(k).capitalize()}\", p=p, **v))\n\n elif quantity > 1 and v['category'] == 'residence':\n unique_house_names = find_unique_names(quantity, names, p.square.unique_house_names)\n p.square.unique_house_names += unique_house_names\n for i in range(0, quantity):\n drops_i.append(Building(name=f\"{unique_house_names[i]}'s {remove_little_words(k)}\", p=p, **v))\n else:\n drops_i.append(Building(name=k, p=p, **v))\n return drops_i\n\n\ndef drop_mob(dictionary, p, limit=None, square=None):\n square = square or p.square\n limit = limit or len(names) - len(square.unique_mob_names)\n drops_i = []\n\n for k, v in dictionary.items():\n quantity = dropper(v['rarity'])\n quantity = quantity if quantity < limit else limit\n limit -= quantity\n if quantity:\n if quantity > 1:\n unique_names = find_unique_names(quantity, names, square.unique_mob_names)\n p.square.unique_mob_names += unique_names\n for i in range(0, len(unique_names)):\n drops_i.append(Mob(name=f\"{k} named {unique_names[i]}\", p=p, **v))\n else:\n if k not in [n.name for n in p.square.mobs]:\n drops_i.append(Mob(name=k, p=p, **v))\n else:\n name = find_unique_names(1, names, square.unique_mob_names)[0]\n drops_i.append(Mob(name=f\"{k} named {name}\", p=p, **v))\n return drops_i\n\n\ndef drop_item(dictionary):\n \"\"\" Randomly generates objects based on rarity \"\"\"\n drops_i = []\n\n for k, v in dictionary.items():\n quantity = dropper(v['rarity'])\n if quantity:\n drops_i.append(Item(name=k, quantity=quantity, **v))\n\n return drops_i\n\n\nclass MapSquare:\n def __init__(self, name=\"\", square_type=None):\n square_types = [\"forest\", \"mountains\", \"desert\", \"city\", \"swamp\", \"ocean\"]\n self.square_type = square_type or square_types[random.randint(0, len(square_types) - 1)]\n self.name = name\n self.unique_mob_names = []\n self.unique_building_names = []\n self.unique_house_names = []\n\n mobs = []\n items = []\n buildings = []\n\n def generate_items(self):\n self.items = drop_item(add_dicts_together(items[\"master\"], items[self.square_type]))\n\n def generate_buildings(self, p):\n self.buildings = drop_building(add_dicts_together(buildings[\"master\"], buildings[self.square_type]), p)\n\n def generate_mobs(self, p):\n self.mobs = drop_mob(add_dicts_together(wild_mobs[\"master\"], wild_mobs[self.square_type]), p)\n\n def clean_up_map(self):\n \"\"\" Remove items with quantity of zero from the map inventory\"\"\"\n self.items = [i for i in self.items if i.quantity != 0]\n\n @staticmethod\n def map_picture(the_map, p):\n \"\"\"With the player's location in the center, draw a 5 x 5 map with map square type\n and coordinates in each square\"\"\"\n xy = (p.location[0] - 2, p.location[1] + 2)\n map_coords = []\n for y in range(0, 5):\n row = [(xy[0] + x, xy[1] - y) for x in range(0, 5)]\n map_coords.append(row)\n\n pretty_map = []\n for r in map_coords:\n row = []\n for coordinates in r:\n if coordinates in the_map.keys():\n if p.quest and p.job and p.quest[1] == coordinates and p.job.location == coordinates:\n star = '*$ '\n elif p.quest and p.quest[1] == coordinates:\n star = ' * '\n elif p.job and p.job.location == coordinates:\n star = ' $ '\n else:\n star = ' '\n row.append(\"|{!s:9}{}|\".format(the_map[coordinates].square_type, star))\n else:\n row.append(\"|{!s:12}|\".format(' '))\n pretty_map.append(row)\n for row in pretty_map:\n print(''.join(row))\n\n\nclass Player:\n def __init__(self, name, location):\n self.name = name\n self.location = location\n self.square = None\n self.money = 0\n self.quest = None\n self.job = None\n self.phase = \"day\"\n self.equipped_weapon = None\n self.major_armor = None\n self.minor_armor = None\n self.building_local = None\n self.inventory = []\n self.skills = {}\n self.health = 100\n self.greeting_count = 0\n self.body_count = 0\n self.assassination_count = 0\n self.hit_list = []\n self.death_count = 0\n # TODO increase insurance cost every death?\n self.food_count = 0\n self.run_away_count = 0\n self.speed_bonus = False\n self.game_won = False\n\n def game_over(self):\n if self.game_won is False:\n self.game_won = True\n print(colored(\"You have won the game!\", \"green\"))\n print(\"You may continue playing to earn more achievements if you wish.\")\n if self.run_away_count == 0:\n print(\"Congratulations, you have achieved the True Bravery achievement, having won the game without ever running away from a fight.\")\n if self.run_away_count > 100:\n print(\"Congratulations, you have achieved the True Cowardice achievement, having won the game after running away from over 100 battles.\")\n\n def clean_up_inventory(self):\n \"\"\" Remove items with quantity of zero from the map inventory\"\"\"\n self.inventory = [i for i in self.inventory if i.quantity != 0]\n\n def phase_change(self, the_map):\n self.phase = 'day' if self.phase == 'night' else 'night'\n for k, square in the_map.items():\n if self.location != k:\n square.generate_items()\n for b in square.buildings:\n if b.ware_list:\n b.wares = drop_item(b.ware_list)\n while not b.wares:\n b.wares = drop_item(b.ware_list)\n if b.name not in ('a castle', 'a volcanic base'):\n jobs = {}\n buiding_dict = add_dicts_together(buildings['master'], buildings[square.square_type])\n for key, v in buiding_dict.items():\n if key == b.name and v.get('jobs'):\n for name, values in v['jobs'].items():\n jobs[name] = values\n b.jobs = b.drop_job(jobs)\n if self.phase == 'day':\n self.speed_bonus = False\n for mob in square.mobs:\n mob.health = 100\n mob.irritation_level = 0\n mob.quest = None if self.quest is None else mob.quest\n if not square.mobs:\n square.mobs = drop_mob(add_dicts_together(wild_mobs[\"master\"], wild_mobs[self.square.square_type]),\n self, limit=len(names), square=square)\n\n def formatted_inventory(self):\n formatted = []\n for item in self.inventory:\n\n if item.quantity > 1:\n formatted.append(f\"{int_to_words(item.quantity)} {item.plural}\")\n else:\n formatted.append(item.name)\n if formatted:\n return comma_separated(formatted)\n else:\n return \"nothing\"\n\n def pretty_inventory(self):\n w = self.equipped_weapon\n major = self.major_armor.defense if self.major_armor else 0\n minor = self.minor_armor.defense if self.minor_armor else 0\n armor_defense = (major + minor) * 5\n\n armors = [self.major_armor.name if self.major_armor else None, self.minor_armor.name if self.minor_armor else None]\n\n inventory = {'inventory_items': f\"You have {self.formatted_inventory()} in your inventory.\",\n 'weapon': f\"You are wielding {int_to_words(w.quantity)} \"\n f\"{remove_little_words(w.name) if w.quantity == 1 else w.plural}.\" if w else None,\n 'armor': f\"You are wearing {' and '.join(x for x in armors if x)}, \"\n f\"giving you a {armor_defense}% reduction in incoming damage.\" if self.minor_armor or self.major_armor else None}\n return '\\n'.join(v for v in inventory.values() if v)\n\n def status(self):\n skills = [f\"{k}: {v}%.\" for k, v in self.skills.items()]\n\n job = f\"You have a job as a {self.job.name}.\" if self.job else None\n quest = \"You have a quest.\" if self.quest else None\n if job and quest:\n job_string = \"\\n\".join([job, quest])\n elif job or quest:\n job_string = job if job else quest\n else:\n job_string = \"You do not have a job, and you are not contributing to society.\"\n\n status_string = {\n 'health': f'Currently, you have {self.health} health.',\n 'location': f'You are located on map coordinates {self.location}, '\n f'which is {self.square.square_type}.',\n 'building_local': f'You are inside {self.building_local.name}.' if self.building_local else None,\n 'skills': '\\n'.join(skills) if skills else \"You don't have any skills.\",\n 'money': f\"You have ${self.money} in your wallet.\",\n 'job': job_string}\n\n return '\\n'.join(v for v in status_string.values() if v)\n\n def statistics(self):\n print(f\"You have killed {self.body_count} mobs.\")\n print(f\"You have ran away from {self.run_away_count} battles.\")\n print(f\"You have eaten {self.food_count} items.\")\n print(f\"You have performed {self.assassination_count} assassinations.\")\n print(f\"You have talked to mobs {self.greeting_count} times.\")\n\n def view_hit_list(self):\n if self.hit_list:\n print(f\"If you ever run across these shady characters, be sure to take their names off your list: {comma_separated(self.hit_list)}\")\n else:\n print(\"Looks like you don't know of anyone who needs to be dead.\")\n\n def increase_skill(self, skill, increase):\n try:\n self.skills[skill] += increase\n except KeyError:\n self.skills[skill] = increase\n print(f\"You have increased your mastery of {skill} by {increase}% for a total of {self.skills[skill]}%.\")\n\n\nclass Item:\n def __init__(self, name, quantity, plural, category=None, perishable=None,\n flammable=None, rarity=None, price=None, weapon_rating=None, defense=None):\n self.name = name\n self.quantity = quantity\n self.plural = plural\n self.category = category or None\n self.perishable = perishable or None\n self.flammable = flammable or None\n self.rarity = rarity or None\n self.price = price or None\n self.weapon_rating = weapon_rating or None\n self.defense = defense or None\n\n def copy(self):\n return Item(name=self.name, quantity=self.quantity, plural=self.plural, category=self.category,\n perishable=self.perishable, flammable=self.flammable, rarity=self.rarity,\n weapon_rating=self.weapon_rating, defense=self.defense)\n\n\nclass Building(object):\n def __init__(self, name, p, plural, category=None, rarity=None, ware_list=None, mobs=None, jobs=None):\n self.name = name\n self.p = p\n self.quantity = 1\n self.plural = plural\n self.category = category or None\n self.rarity = rarity or None\n self.ware_list = ware_list\n self.wares = self.drop_wares()\n self.mobs = drop_mob(mobs, p) if mobs else None\n self.jobs = self.drop_job(jobs) if jobs else None\n\n if self.name in ('a castle', 'a volcanic base'):\n self.boss_mobs_and_jobs()\n\n def drop_wares(self):\n if self.ware_list:\n wares = drop_item(self.ware_list)\n while not wares:\n wares = drop_item(self.ware_list)\n return wares\n else:\n return []\n\n def drop_job(self, jobs):\n drops_i = []\n for k, v in jobs.items():\n if odds(2):\n drops_i.append(Job(name=k, location=self.p.location, **v))\n return drops_i\n\n def boss_mobs_and_jobs(self):\n boss_major_armors = [Item('a coat of impervious dragon scales', plural='coats of dragon scales', quantity=1, category='major armor', rarity='super rare', defense=5),\n Item('an enchanted leather duster', plural='enchanted leather dusters', quantity=1, category='major armor', defense=5, rarity='super rare'),\n Item('a coat of actual live grizzly bears', plural='coats of actual live grizzly bears', quantity=1, category='major armor', defense=5, rarity='super rare')]\n boss_minor_armors = [Item('wings of an angel', plural='wings of angels', quantity=1, rarity='super rare', category='minor armor', defense=5),\n Item('an OSHA approved hard hat', plural='OSHA approved hard hats', quantity=1, rarity='super rare', category='minor armor', defense=5),\n Item('a pair boots that were made for walkin', plural='pairs of boots that were made for walkin', quantity=1, rarity='super rare', category='minor armor', defense=5)]\n boss_weapons = [Item('an apache helicopter', plural='apache helicopters', rarity='super rare', weapon_rating=6, quantity=1),\n Item('a trebuchet', plural='trebuchets', weapon_rating=6, quantity=1, rarity='super rare'),\n Item('an army of attacking wizards', plural='armies of attacking wizards', weapon_rating=6, quantity=1, rarity='super rare')]\n boss_names = [\"the Terrifying Dragon of Soul Slaying\", \"the Great Salamander of Darkness\", \"the Squirrel of Destiny\", ]\n random.shuffle(boss_names)\n random.shuffle(boss_weapons)\n random.shuffle(boss_major_armors)\n random.shuffle(boss_minor_armors)\n\n boss = Mob(boss_names[0], self.p, plural=boss_names[0], rarity='super rare')\n boss.health = 500\n boss.equipped_weapon = boss_weapons[0]\n boss.major_armor = boss_major_armors[0]\n boss.minor_armor = boss_minor_armors[0]\n boss.irritation_level = 10\n self.mobs = [boss]\n if self.name == 'a castle':\n self.jobs = [Job('king of the realm', location=self.p.location, salary=1100)]\n if self.name == 'a volcanic base':\n self.jobs = [Job('evil overlord', location=self.p.location, salary=1100)]\n\n\nclass Job:\n def __init__(self, name, location, skills_needed=None, salary=0, skills_learned=None, inventory_needed=None):\n self.name = name\n self.location = location\n self.skills_needed = skills_needed or None\n self.salary = salary or 0\n self.skills_learned = skills_learned or None\n self.inventory_needed = inventory_needed or None\n self.application_attempts = 0\n\n\nclass Mob:\n def __init__(self, name, p, plural, rarity, inventory=None):\n self.name = name\n self.p = p\n self.plural = plural\n self.quantity = 1\n self.rarity = rarity\n\n self.skills = self.skills()\n self.quest = None\n\n self.inventory = inventory or drop_item(add_dicts_together(items['master'], items[p.square.square_type]))\n self.health = 100\n self.equipped_weapon = self.equip()\n major = [x for x in self.inventory if x.category == 'major armor']\n minor = [x for x in self.inventory if x.category == 'minor armor']\n self.major_armor = major[0] if major else None\n self.minor_armor = minor[0] if minor else None\n self.irritation_level = 0\n\n def equip(self):\n nice_weapons = []\n for i in self.inventory:\n try:\n if i.weapon_rating:\n nice_weapons.append(i)\n except AttributeError:\n pass\n nice_weapons.sort(key=lambda x: x.weapon_rating, reverse=True)\n if nice_weapons:\n self.inventory.remove(nice_weapons[0])\n return nice_weapons[0]\n else:\n return None\n\n @staticmethod\n def skills():\n \"\"\" Pick the skills for a mob, these determine what a player can get from completing a quest \"\"\"\n all_skills = [\"strength\", \"patience\", \"cleanliness\", \"leadership\", \"communication\",\n \"science\", \"math\", \"engineering\", \"intelligence\", \"driving\"]\n\n random.shuffle(all_skills)\n return all_skills[0:2]\n\n def generate_quest(self):\n \"\"\"\n inventory based\n bring me x of an object to learn a skill\n \"\"\"\n\n if odds(3):\n\n quest_items = add_dicts_together(items[\"master\"], items[self.p.square.square_type])\n quest_item = random.choice(list(quest_items.keys()))\n\n i = Item(quest_item, 0, **quest_items[quest_item])\n self.inventory.append(i)\n\n quantity = {'super rare': '1',\n 'rare': '2',\n 'uncommon': '3',\n 'common': '6',\n 'super common': '15'}\n q = quantity[i.rarity]\n\n self.quest = i, int(q), f\"{self.p.name}, if you bring \" \\\n f\"me {q} {i.plural if int(q) > 1 else remove_little_words(i.name)}, \" \\\n f\"I will teach you a valuable skill.\"\n return\n elif odds(5):\n mobs = []\n for biome, building in buildings.items():\n for b, attributes in building.items():\n if attributes.get('mobs'):\n for k in attributes['mobs'].keys():\n mobs.append(k)\n for biome, mob in wild_mobs.items():\n for k in mob.keys():\n mobs.append(k)\n target = f\"{mobs[random.randint(0, len(mobs)-1)]} named {names[random.randint(0, len(names)-1)]}\"\n print(f\"Well, we'll keep this off the record, but I can arrange for some money to find its way \"\n f\"into your account if you make {colored(target, 'yellow')} disappear, if you know what I mean...\")\n self.p.hit_list.append(target)\n return False\n\n else:\n return None\n", "step-ids": [ 23, 31, 32, 38, 42 ] }
[ 23, 31, 32, 38, 42 ]
""" 统计飞船信息 """ class GameStats: def __init__(self, setting): self.setting = setting self.ships_left = self.setting.ship_limit self.game_active = True
normal
{ "blob_id": "3ab26612111e3df59f41f5b5e0bf23398e015a8a", "index": 1595, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass GameStats:\n <mask token>\n", "step-3": "<mask token>\n\n\nclass GameStats:\n\n def __init__(self, setting):\n self.setting = setting\n self.ships_left = self.setting.ship_limit\n self.game_active = True\n", "step-4": "\"\"\"\n统计飞船信息\n\"\"\"\n\n\nclass GameStats:\n def __init__(self, setting):\n self.setting = setting\n self.ships_left = self.setting.ship_limit\n self.game_active = True\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
class BaseException(Exception): def __init__(self, message=""): super(BaseException, self).__init__() self.message = message
normal
{ "blob_id": "2ee1539e051677ad38ab7727ff5edefb1aebd015", "index": 9946, "step-1": "<mask token>\n", "step-2": "class BaseException(Exception):\n <mask token>\n", "step-3": "class BaseException(Exception):\n\n def __init__(self, message=''):\n super(BaseException, self).__init__()\n self.message = message\n", "step-4": "class BaseException(Exception):\n def __init__(self, message=\"\"):\n super(BaseException, self).__init__()\n self.message = message\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
import inspect import re import openquake.hazardlib.source as oqsrc # List of valid attributes for an area source AREAS_ATTRIBUTES = set(['source_id', 'name', 'tectonic_region_type', 'mfd', 'rupture_mesh_spacing', 'magnitude_scaling_relationship', 'rupture_aspect_ratio', 'temporal_occurrence_model', 'upper_seismogenic_depth', 'lower_seismogenic_depth', 'nodal_plane_distribution', 'hypocenter_distribution', 'polygon', 'area_discretization']) AREAS_ATTRIBUTES |= set(['gr_aval', 'gr_bval', 'source_type']) # List of valid attributes for a simple source SIMPLE_FAULT_ATTRIBUTES = set(['source_id', 'name', 'tectonic_region_type', 'mfd', 'rupture_mesh_spacing', 'magnitude_scaling_relationship', 'rupture_aspect_ratio', 'temporal_occurrence_model', 'upper_seismogenic_depth', 'lower_seismogenic_depth', 'fault_trace', 'dip', 'rake', 'hypo_list', 'sliprate']) SIMPLE_FAULT_ATTRIBUTES |= set(['gr_aval', 'gr_bval', 'dip', 'rake', 'hypo_list', 'slip_list']) SIMPLE_FAULT_ATTRIBUTES |= set(['gr_aval', 'gr_bval', 'source_type']) # This adds support for shapefiles created by the OpenQuake-engine SIMPLE_FAULT_ATTRIBUTES |= set(['']) # Create the set of valid source types SOURCE_TYPES = set() for name, obj in inspect.getmembers(oqsrc): if inspect.isclass(obj): if not re.search('Rupture', name): SOURCE_TYPES.add(name) class OQtSource(object): """ A container for information necessary to build and/or characterise an earthquake source :parameter str source_id: The ID of the source :parameter str source_type: Source type i.e. Object name amongst the ones admitted in the OpenQuake Hazardlib. """ def __init__(self, *args, **kwargs): # Checks if len(args): self.source_id = args[0] if len(args) > 1: self.source_type = args[1] if len(kwargs): self.__dict__.update(kwargs) # Check mandatory attributes: ID if 'source_id' not in self.__dict__: raise ValueError('Source must have an ID') elif not isinstance(self.source_id, str): raise ValueError('ID must be a string') # Check mandatory fields: SOURCE TYPE if 'source_type' not in self.__dict__: raise ValueError('Source must have a type') if self.source_type not in SOURCE_TYPES: raise ValueError('Unrecognized source type: %s' % self.source_type) if 'source_type' in self.__dict__: attribute_set = AREAS_ATTRIBUTES elif 'source_type' in self.__dict__: attribute_set = SIMPLE_FAULT_ATTRIBUTES else: raise ValueError('Unsupported source type') # Check attributes for key in self.__dict__: if key not in attribute_set: print ('Attribute set', attribute_set) msg = 'Parameter %s not compatible with this source' % (key) raise ValueError(msg) def get_info(self): for key in self.__dict__: print ('%30s:' % (key), getattr(self, key))
normal
{ "blob_id": "8adf8cfc72d5af955bf7509d3573a9bcc7c0845e", "index": 7537, "step-1": "<mask token>\n\n\nclass OQtSource(object):\n <mask token>\n\n def __init__(self, *args, **kwargs):\n if len(args):\n self.source_id = args[0]\n if len(args) > 1:\n self.source_type = args[1]\n if len(kwargs):\n self.__dict__.update(kwargs)\n if 'source_id' not in self.__dict__:\n raise ValueError('Source must have an ID')\n elif not isinstance(self.source_id, str):\n raise ValueError('ID must be a string')\n if 'source_type' not in self.__dict__:\n raise ValueError('Source must have a type')\n if self.source_type not in SOURCE_TYPES:\n raise ValueError('Unrecognized source type: %s' % self.source_type)\n if 'source_type' in self.__dict__:\n attribute_set = AREAS_ATTRIBUTES\n elif 'source_type' in self.__dict__:\n attribute_set = SIMPLE_FAULT_ATTRIBUTES\n else:\n raise ValueError('Unsupported source type')\n for key in self.__dict__:\n if key not in attribute_set:\n print('Attribute set', attribute_set)\n msg = 'Parameter %s not compatible with this source' % key\n raise ValueError(msg)\n <mask token>\n", "step-2": "<mask token>\n\n\nclass OQtSource(object):\n \"\"\"\n A container for information necessary to build and/or characterise an\n earthquake source\n\n :parameter str source_id:\n The ID of the source\n :parameter str source_type:\n Source type i.e. Object name amongst the ones admitted in the\n OpenQuake Hazardlib.\n\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n if len(args):\n self.source_id = args[0]\n if len(args) > 1:\n self.source_type = args[1]\n if len(kwargs):\n self.__dict__.update(kwargs)\n if 'source_id' not in self.__dict__:\n raise ValueError('Source must have an ID')\n elif not isinstance(self.source_id, str):\n raise ValueError('ID must be a string')\n if 'source_type' not in self.__dict__:\n raise ValueError('Source must have a type')\n if self.source_type not in SOURCE_TYPES:\n raise ValueError('Unrecognized source type: %s' % self.source_type)\n if 'source_type' in self.__dict__:\n attribute_set = AREAS_ATTRIBUTES\n elif 'source_type' in self.__dict__:\n attribute_set = SIMPLE_FAULT_ATTRIBUTES\n else:\n raise ValueError('Unsupported source type')\n for key in self.__dict__:\n if key not in attribute_set:\n print('Attribute set', attribute_set)\n msg = 'Parameter %s not compatible with this source' % key\n raise ValueError(msg)\n\n def get_info(self):\n for key in self.__dict__:\n print('%30s:' % key, getattr(self, key))\n", "step-3": "<mask token>\nAREAS_ATTRIBUTES |= set(['gr_aval', 'gr_bval', 'source_type'])\n<mask token>\nSIMPLE_FAULT_ATTRIBUTES |= set(['gr_aval', 'gr_bval', 'dip', 'rake',\n 'hypo_list', 'slip_list'])\nSIMPLE_FAULT_ATTRIBUTES |= set(['gr_aval', 'gr_bval', 'source_type'])\nSIMPLE_FAULT_ATTRIBUTES |= set([''])\n<mask token>\nfor name, obj in inspect.getmembers(oqsrc):\n if inspect.isclass(obj):\n if not re.search('Rupture', name):\n SOURCE_TYPES.add(name)\n\n\nclass OQtSource(object):\n \"\"\"\n A container for information necessary to build and/or characterise an\n earthquake source\n\n :parameter str source_id:\n The ID of the source\n :parameter str source_type:\n Source type i.e. Object name amongst the ones admitted in the\n OpenQuake Hazardlib.\n\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n if len(args):\n self.source_id = args[0]\n if len(args) > 1:\n self.source_type = args[1]\n if len(kwargs):\n self.__dict__.update(kwargs)\n if 'source_id' not in self.__dict__:\n raise ValueError('Source must have an ID')\n elif not isinstance(self.source_id, str):\n raise ValueError('ID must be a string')\n if 'source_type' not in self.__dict__:\n raise ValueError('Source must have a type')\n if self.source_type not in SOURCE_TYPES:\n raise ValueError('Unrecognized source type: %s' % self.source_type)\n if 'source_type' in self.__dict__:\n attribute_set = AREAS_ATTRIBUTES\n elif 'source_type' in self.__dict__:\n attribute_set = SIMPLE_FAULT_ATTRIBUTES\n else:\n raise ValueError('Unsupported source type')\n for key in self.__dict__:\n if key not in attribute_set:\n print('Attribute set', attribute_set)\n msg = 'Parameter %s not compatible with this source' % key\n raise ValueError(msg)\n\n def get_info(self):\n for key in self.__dict__:\n print('%30s:' % key, getattr(self, key))\n", "step-4": "import inspect\nimport re\nimport openquake.hazardlib.source as oqsrc\nAREAS_ATTRIBUTES = set(['source_id', 'name', 'tectonic_region_type', 'mfd',\n 'rupture_mesh_spacing', 'magnitude_scaling_relationship',\n 'rupture_aspect_ratio', 'temporal_occurrence_model',\n 'upper_seismogenic_depth', 'lower_seismogenic_depth',\n 'nodal_plane_distribution', 'hypocenter_distribution', 'polygon',\n 'area_discretization'])\nAREAS_ATTRIBUTES |= set(['gr_aval', 'gr_bval', 'source_type'])\nSIMPLE_FAULT_ATTRIBUTES = set(['source_id', 'name', 'tectonic_region_type',\n 'mfd', 'rupture_mesh_spacing', 'magnitude_scaling_relationship',\n 'rupture_aspect_ratio', 'temporal_occurrence_model',\n 'upper_seismogenic_depth', 'lower_seismogenic_depth', 'fault_trace',\n 'dip', 'rake', 'hypo_list', 'sliprate'])\nSIMPLE_FAULT_ATTRIBUTES |= set(['gr_aval', 'gr_bval', 'dip', 'rake',\n 'hypo_list', 'slip_list'])\nSIMPLE_FAULT_ATTRIBUTES |= set(['gr_aval', 'gr_bval', 'source_type'])\nSIMPLE_FAULT_ATTRIBUTES |= set([''])\nSOURCE_TYPES = set()\nfor name, obj in inspect.getmembers(oqsrc):\n if inspect.isclass(obj):\n if not re.search('Rupture', name):\n SOURCE_TYPES.add(name)\n\n\nclass OQtSource(object):\n \"\"\"\n A container for information necessary to build and/or characterise an\n earthquake source\n\n :parameter str source_id:\n The ID of the source\n :parameter str source_type:\n Source type i.e. Object name amongst the ones admitted in the\n OpenQuake Hazardlib.\n\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n if len(args):\n self.source_id = args[0]\n if len(args) > 1:\n self.source_type = args[1]\n if len(kwargs):\n self.__dict__.update(kwargs)\n if 'source_id' not in self.__dict__:\n raise ValueError('Source must have an ID')\n elif not isinstance(self.source_id, str):\n raise ValueError('ID must be a string')\n if 'source_type' not in self.__dict__:\n raise ValueError('Source must have a type')\n if self.source_type not in SOURCE_TYPES:\n raise ValueError('Unrecognized source type: %s' % self.source_type)\n if 'source_type' in self.__dict__:\n attribute_set = AREAS_ATTRIBUTES\n elif 'source_type' in self.__dict__:\n attribute_set = SIMPLE_FAULT_ATTRIBUTES\n else:\n raise ValueError('Unsupported source type')\n for key in self.__dict__:\n if key not in attribute_set:\n print('Attribute set', attribute_set)\n msg = 'Parameter %s not compatible with this source' % key\n raise ValueError(msg)\n\n def get_info(self):\n for key in self.__dict__:\n print('%30s:' % key, getattr(self, key))\n", "step-5": "import inspect\nimport re\nimport openquake.hazardlib.source as oqsrc\n\n# List of valid attributes for an area source\nAREAS_ATTRIBUTES = set(['source_id', \n\t\t\t'name', \n\t\t\t'tectonic_region_type', \n\t\t\t'mfd',\n 'rupture_mesh_spacing',\n 'magnitude_scaling_relationship',\n 'rupture_aspect_ratio',\n 'temporal_occurrence_model',\n 'upper_seismogenic_depth',\n 'lower_seismogenic_depth',\n 'nodal_plane_distribution',\n 'hypocenter_distribution',\n 'polygon',\n 'area_discretization'])\n \nAREAS_ATTRIBUTES |= set(['gr_aval', \n\t\t\t 'gr_bval', \n\t\t\t 'source_type'])\n\n# List of valid attributes for a simple source\nSIMPLE_FAULT_ATTRIBUTES = set(['source_id',\n 'name',\n 'tectonic_region_type',\n 'mfd',\n 'rupture_mesh_spacing',\n 'magnitude_scaling_relationship',\n 'rupture_aspect_ratio',\n 'temporal_occurrence_model',\n 'upper_seismogenic_depth',\n 'lower_seismogenic_depth',\n 'fault_trace',\n 'dip', \n 'rake', \n 'hypo_list', \n 'sliprate'])\n \nSIMPLE_FAULT_ATTRIBUTES |= set(['gr_aval', \n 'gr_bval', \n 'dip',\n 'rake',\n 'hypo_list',\n 'slip_list'])\n\nSIMPLE_FAULT_ATTRIBUTES |= set(['gr_aval',\n 'gr_bval',\n 'source_type'])\n\n# This adds support for shapefiles created by the OpenQuake-engine\nSIMPLE_FAULT_ATTRIBUTES |= set([''])\n\n# Create the set of valid source types\nSOURCE_TYPES = set()\nfor name, obj in inspect.getmembers(oqsrc):\n if inspect.isclass(obj):\n if not re.search('Rupture', name):\n SOURCE_TYPES.add(name)\n\nclass OQtSource(object):\n \"\"\"\n A container for information necessary to build and/or characterise an\n earthquake source\n\n :parameter str source_id:\n The ID of the source\n :parameter str source_type:\n Source type i.e. Object name amongst the ones admitted in the\n OpenQuake Hazardlib.\n\n \"\"\"\n def __init__(self, *args, **kwargs):\n # Checks\n if len(args):\n self.source_id = args[0]\n if len(args) > 1:\n self.source_type = args[1]\n if len(kwargs):\n self.__dict__.update(kwargs)\n # Check mandatory attributes: ID\n if 'source_id' not in self.__dict__:\n raise ValueError('Source must have an ID')\n elif not isinstance(self.source_id, str):\n raise ValueError('ID must be a string')\n # Check mandatory fields: SOURCE TYPE\n if 'source_type' not in self.__dict__:\n raise ValueError('Source must have a type')\n if self.source_type not in SOURCE_TYPES:\n raise ValueError('Unrecognized source type: %s' % self.source_type)\n if 'source_type' in self.__dict__:\n attribute_set = AREAS_ATTRIBUTES\n elif 'source_type' in self.__dict__:\n attribute_set = SIMPLE_FAULT_ATTRIBUTES\n else:\n raise ValueError('Unsupported source type')\n # Check attributes\n for key in self.__dict__:\n if key not in attribute_set:\n print ('Attribute set', attribute_set)\n msg = 'Parameter %s not compatible with this source' % (key)\n raise ValueError(msg)\n\n def get_info(self):\n for key in self.__dict__:\n print ('%30s:' % (key), getattr(self, key))\n", "step-ids": [ 2, 4, 5, 7, 8 ] }
[ 2, 4, 5, 7, 8 ]
import datetime import calendar import re def cardinal(ordinal): return int(''.join([char for char in ordinal if char.isdigit()])) def meetup_day(year, month, day_of_week, ordinal): days = { 0: 'Monday', 1: 'Tuesday', 2: 'Wednesday', 3: 'Thursday', 4: 'Friday', 5: 'Saturday', 6: 'Sunday' } possible_days = [] number_of_days = calendar.monthrange(year, month)[1] days_of_month = [datetime.date(year, month, 1) + datetime.timedelta(days=x) for x in range(0, number_of_days)] for day in days_of_month: if days[day.weekday()] == day_of_week: possible_days.append(day.day) if ordinal == 'teenth': for x in possible_days: if 10 < x < 20: day_of_month = x elif ordinal == 'last': day_of_month = possible_days[-1] else: day_of_month = possible_days[cardinal(ordinal)-1] return datetime.date(year, month, day_of_month)
normal
{ "blob_id": "d4b1b6bdf125f2791c219b7db579c234eda0a73c", "index": 9220, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef cardinal(ordinal):\n return int(''.join([char for char in ordinal if char.isdigit()]))\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef cardinal(ordinal):\n return int(''.join([char for char in ordinal if char.isdigit()]))\n\n\ndef meetup_day(year, month, day_of_week, ordinal):\n days = {(0): 'Monday', (1): 'Tuesday', (2): 'Wednesday', (3):\n 'Thursday', (4): 'Friday', (5): 'Saturday', (6): 'Sunday'}\n possible_days = []\n number_of_days = calendar.monthrange(year, month)[1]\n days_of_month = [(datetime.date(year, month, 1) + datetime.timedelta(\n days=x)) for x in range(0, number_of_days)]\n for day in days_of_month:\n if days[day.weekday()] == day_of_week:\n possible_days.append(day.day)\n if ordinal == 'teenth':\n for x in possible_days:\n if 10 < x < 20:\n day_of_month = x\n elif ordinal == 'last':\n day_of_month = possible_days[-1]\n else:\n day_of_month = possible_days[cardinal(ordinal) - 1]\n return datetime.date(year, month, day_of_month)\n", "step-4": "import datetime\nimport calendar\nimport re\n\n\ndef cardinal(ordinal):\n return int(''.join([char for char in ordinal if char.isdigit()]))\n\n\ndef meetup_day(year, month, day_of_week, ordinal):\n days = {(0): 'Monday', (1): 'Tuesday', (2): 'Wednesday', (3):\n 'Thursday', (4): 'Friday', (5): 'Saturday', (6): 'Sunday'}\n possible_days = []\n number_of_days = calendar.monthrange(year, month)[1]\n days_of_month = [(datetime.date(year, month, 1) + datetime.timedelta(\n days=x)) for x in range(0, number_of_days)]\n for day in days_of_month:\n if days[day.weekday()] == day_of_week:\n possible_days.append(day.day)\n if ordinal == 'teenth':\n for x in possible_days:\n if 10 < x < 20:\n day_of_month = x\n elif ordinal == 'last':\n day_of_month = possible_days[-1]\n else:\n day_of_month = possible_days[cardinal(ordinal) - 1]\n return datetime.date(year, month, day_of_month)\n", "step-5": "import datetime\nimport calendar\nimport re\n\ndef cardinal(ordinal):\n return int(''.join([char for char in ordinal if char.isdigit()]))\n\ndef meetup_day(year, month, day_of_week, ordinal):\n days = {\n 0: 'Monday',\n 1: 'Tuesday',\n 2: 'Wednesday',\n 3: 'Thursday',\n 4: 'Friday',\n 5: 'Saturday',\n 6: 'Sunday'\n }\n\n possible_days = []\n\n number_of_days = calendar.monthrange(year, month)[1]\n\n days_of_month = [datetime.date(year, month, 1) + datetime.timedelta(days=x) for x in range(0, number_of_days)]\n\n for day in days_of_month:\n if days[day.weekday()] == day_of_week:\n possible_days.append(day.day)\n\n if ordinal == 'teenth':\n for x in possible_days:\n if 10 < x < 20:\n day_of_month = x\n elif ordinal == 'last':\n day_of_month = possible_days[-1]\n else:\n day_of_month = possible_days[cardinal(ordinal)-1]\n\n return datetime.date(year, month, day_of_month)\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import re match = re.search(r'pi+', 'piiig') print 'found', match.group() == "piii"
normal
{ "blob_id": "82083f16c18db35193fa2aa45bc28c5201962f90", "index": 6704, "step-1": "\n\nimport re\n\n\nmatch = re.search(r'pi+', 'piiig')\nprint 'found', match.group() == \"piii\"\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
import os import json from page import Page from random import choice from os.path import join, expanduser from file_handler import f_read, f_readlines, open_local import config class LetterPage(Page): def __init__(self, page_num,n): super(LetterPage, self).__init__(page_num) self.title = "Letters" self.in_index = False self.n = n self.tagline = "Email [email protected] and your letter will appear here" self.letters = "" def background(self): self.letters = f_read("emails") if config.NAME == "KLBFAX" and self.n==1 and config.has_gmail_login(): import gmail details = f_readlines("gmail") g = gmail.login(details[0],details[1]) unread = g.inbox().mail(unread=True) for mail in unread: mail.fetch() lines = "".join(mail.body.split("\r")).split("\n") if lines[0] == "EVENT" and "[email protected]" in mail.fr: try: with open_local('events','a') as f: for line in lines: if line!="EVENT": f.write(line+"\n") mail.read() except: pass elif lines[0] == "CARD" and "[email protected]" in mail.fr: with open('/home/pi/cards/'+lines[1],"w") as f: f.write("\n".join(lines[2:])) mail.read() elif "POINTS" in lines[0].upper() and "[email protected]" in mail.fr: from points import add_points length = 1 points_to_give = 0 while length<=len(lines[2]): try: if lines[2][:length]!="-": points_to_give = int(lines[2][:length]) length += 1 except: break add_points(lines[1].split("=")[0],points_to_give) mail.read() else: newletter = "" for line in lines: if line!="": while len(line)>79: newletter += line[:79]+"\n" line=line[79:] newletter+=line+"\n" self.letters=newletter+"\n"+"from "+mail.fr+"\n\n"+self.letters mail.read() self.letters = self.letters.split("\n") if len(self.letters)>1000: self.letters = self.letters[:1000] with open_local("emails","w") as f: f.write("\n".join(self.letters)) else: self.letters = self.letters.split("\n") def generate_content(self): letters = self.letters[20*(self.n-1):20*self.n] letters = "\n".join(letters) try: letters = unicode(letters,'latin1') except: letters = str(letters) self.add_title("Have your say",font="size4") a = str(self.n)+"/21" self.move_cursor(x=90-len(a)) self.add_text(a, fg="BLUE", bg="YELLOW") self.move_cursor(x=0) self.start_random_bg_color() for line in letters.split("\n"): line = line.rstrip("\n") if line == "": self.end_bg_color() self.start_random_bg_color() self.add_text(line,fg="BLACK") self.add_newline() self.end_bg_color() if self.n==21: self.add_text("~ END OF LETTERS ~") else: self.add_text("The letters continue on page "+str(200+self.n)) letters_page1 = LetterPage("200",1) letters_page1.in_index = True letters_page1.index_num = "200-220" letters_page2 = LetterPage("201",2) letters_page3 = LetterPage("202",3) letters_page4 = LetterPage("203",4) letters_page5 = LetterPage("204",5) letters_page6 = LetterPage("205",6) letters_page7 = LetterPage("206",7) letters_page8 = LetterPage("207",8) letters_page9 = LetterPage("208",9) letters_page10 = LetterPage("209",10) letters_page11 = LetterPage("210",11) letters_page12 = LetterPage("211",12) letters_page13 = LetterPage("212",13) letters_page14 = LetterPage("213",14) letters_page15 = LetterPage("214",15) letters_page16 = LetterPage("215",16) letters_page17 = LetterPage("216",17) letters_page18 = LetterPage("217",18) letters_page19 = LetterPage("218",19) letters_page20 = LetterPage("219",20) letters_page21 = LetterPage("220",21)
normal
{ "blob_id": "e714fe0e27ec9ea5acb3120a4d2114d3d7674fcf", "index": 5601, "step-1": "<mask token>\n\n\nclass LetterPage(Page):\n\n def __init__(self, page_num, n):\n super(LetterPage, self).__init__(page_num)\n self.title = 'Letters'\n self.in_index = False\n self.n = n\n self.tagline = (\n 'Email [email protected] and your letter will appear here')\n self.letters = ''\n\n def background(self):\n self.letters = f_read('emails')\n if config.NAME == 'KLBFAX' and self.n == 1 and config.has_gmail_login(\n ):\n import gmail\n details = f_readlines('gmail')\n g = gmail.login(details[0], details[1])\n unread = g.inbox().mail(unread=True)\n for mail in unread:\n mail.fetch()\n lines = ''.join(mail.body.split('\\r')).split('\\n')\n if lines[0\n ] == 'EVENT' and '[email protected]' in mail.fr:\n try:\n with open_local('events', 'a') as f:\n for line in lines:\n if line != 'EVENT':\n f.write(line + '\\n')\n mail.read()\n except:\n pass\n elif lines[0\n ] == 'CARD' and '[email protected]' in mail.fr:\n with open('/home/pi/cards/' + lines[1], 'w') as f:\n f.write('\\n'.join(lines[2:]))\n mail.read()\n elif 'POINTS' in lines[0].upper(\n ) and '[email protected]' in mail.fr:\n from points import add_points\n length = 1\n points_to_give = 0\n while length <= len(lines[2]):\n try:\n if lines[2][:length] != '-':\n points_to_give = int(lines[2][:length])\n length += 1\n except:\n break\n add_points(lines[1].split('=')[0], points_to_give)\n mail.read()\n else:\n newletter = ''\n for line in lines:\n if line != '':\n while len(line) > 79:\n newletter += line[:79] + '\\n'\n line = line[79:]\n newletter += line + '\\n'\n self.letters = (newletter + '\\n' + 'from ' + mail.fr +\n '\\n\\n' + self.letters)\n mail.read()\n self.letters = self.letters.split('\\n')\n if len(self.letters) > 1000:\n self.letters = self.letters[:1000]\n with open_local('emails', 'w') as f:\n f.write('\\n'.join(self.letters))\n else:\n self.letters = self.letters.split('\\n')\n <mask token>\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass LetterPage(Page):\n\n def __init__(self, page_num, n):\n super(LetterPage, self).__init__(page_num)\n self.title = 'Letters'\n self.in_index = False\n self.n = n\n self.tagline = (\n 'Email [email protected] and your letter will appear here')\n self.letters = ''\n\n def background(self):\n self.letters = f_read('emails')\n if config.NAME == 'KLBFAX' and self.n == 1 and config.has_gmail_login(\n ):\n import gmail\n details = f_readlines('gmail')\n g = gmail.login(details[0], details[1])\n unread = g.inbox().mail(unread=True)\n for mail in unread:\n mail.fetch()\n lines = ''.join(mail.body.split('\\r')).split('\\n')\n if lines[0\n ] == 'EVENT' and '[email protected]' in mail.fr:\n try:\n with open_local('events', 'a') as f:\n for line in lines:\n if line != 'EVENT':\n f.write(line + '\\n')\n mail.read()\n except:\n pass\n elif lines[0\n ] == 'CARD' and '[email protected]' in mail.fr:\n with open('/home/pi/cards/' + lines[1], 'w') as f:\n f.write('\\n'.join(lines[2:]))\n mail.read()\n elif 'POINTS' in lines[0].upper(\n ) and '[email protected]' in mail.fr:\n from points import add_points\n length = 1\n points_to_give = 0\n while length <= len(lines[2]):\n try:\n if lines[2][:length] != '-':\n points_to_give = int(lines[2][:length])\n length += 1\n except:\n break\n add_points(lines[1].split('=')[0], points_to_give)\n mail.read()\n else:\n newletter = ''\n for line in lines:\n if line != '':\n while len(line) > 79:\n newletter += line[:79] + '\\n'\n line = line[79:]\n newletter += line + '\\n'\n self.letters = (newletter + '\\n' + 'from ' + mail.fr +\n '\\n\\n' + self.letters)\n mail.read()\n self.letters = self.letters.split('\\n')\n if len(self.letters) > 1000:\n self.letters = self.letters[:1000]\n with open_local('emails', 'w') as f:\n f.write('\\n'.join(self.letters))\n else:\n self.letters = self.letters.split('\\n')\n\n def generate_content(self):\n letters = self.letters[20 * (self.n - 1):20 * self.n]\n letters = '\\n'.join(letters)\n try:\n letters = unicode(letters, 'latin1')\n except:\n letters = str(letters)\n self.add_title('Have your say', font='size4')\n a = str(self.n) + '/21'\n self.move_cursor(x=90 - len(a))\n self.add_text(a, fg='BLUE', bg='YELLOW')\n self.move_cursor(x=0)\n self.start_random_bg_color()\n for line in letters.split('\\n'):\n line = line.rstrip('\\n')\n if line == '':\n self.end_bg_color()\n self.start_random_bg_color()\n self.add_text(line, fg='BLACK')\n self.add_newline()\n self.end_bg_color()\n if self.n == 21:\n self.add_text('~ END OF LETTERS ~')\n else:\n self.add_text('The letters continue on page ' + str(200 + self.n))\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass LetterPage(Page):\n\n def __init__(self, page_num, n):\n super(LetterPage, self).__init__(page_num)\n self.title = 'Letters'\n self.in_index = False\n self.n = n\n self.tagline = (\n 'Email [email protected] and your letter will appear here')\n self.letters = ''\n\n def background(self):\n self.letters = f_read('emails')\n if config.NAME == 'KLBFAX' and self.n == 1 and config.has_gmail_login(\n ):\n import gmail\n details = f_readlines('gmail')\n g = gmail.login(details[0], details[1])\n unread = g.inbox().mail(unread=True)\n for mail in unread:\n mail.fetch()\n lines = ''.join(mail.body.split('\\r')).split('\\n')\n if lines[0\n ] == 'EVENT' and '[email protected]' in mail.fr:\n try:\n with open_local('events', 'a') as f:\n for line in lines:\n if line != 'EVENT':\n f.write(line + '\\n')\n mail.read()\n except:\n pass\n elif lines[0\n ] == 'CARD' and '[email protected]' in mail.fr:\n with open('/home/pi/cards/' + lines[1], 'w') as f:\n f.write('\\n'.join(lines[2:]))\n mail.read()\n elif 'POINTS' in lines[0].upper(\n ) and '[email protected]' in mail.fr:\n from points import add_points\n length = 1\n points_to_give = 0\n while length <= len(lines[2]):\n try:\n if lines[2][:length] != '-':\n points_to_give = int(lines[2][:length])\n length += 1\n except:\n break\n add_points(lines[1].split('=')[0], points_to_give)\n mail.read()\n else:\n newletter = ''\n for line in lines:\n if line != '':\n while len(line) > 79:\n newletter += line[:79] + '\\n'\n line = line[79:]\n newletter += line + '\\n'\n self.letters = (newletter + '\\n' + 'from ' + mail.fr +\n '\\n\\n' + self.letters)\n mail.read()\n self.letters = self.letters.split('\\n')\n if len(self.letters) > 1000:\n self.letters = self.letters[:1000]\n with open_local('emails', 'w') as f:\n f.write('\\n'.join(self.letters))\n else:\n self.letters = self.letters.split('\\n')\n\n def generate_content(self):\n letters = self.letters[20 * (self.n - 1):20 * self.n]\n letters = '\\n'.join(letters)\n try:\n letters = unicode(letters, 'latin1')\n except:\n letters = str(letters)\n self.add_title('Have your say', font='size4')\n a = str(self.n) + '/21'\n self.move_cursor(x=90 - len(a))\n self.add_text(a, fg='BLUE', bg='YELLOW')\n self.move_cursor(x=0)\n self.start_random_bg_color()\n for line in letters.split('\\n'):\n line = line.rstrip('\\n')\n if line == '':\n self.end_bg_color()\n self.start_random_bg_color()\n self.add_text(line, fg='BLACK')\n self.add_newline()\n self.end_bg_color()\n if self.n == 21:\n self.add_text('~ END OF LETTERS ~')\n else:\n self.add_text('The letters continue on page ' + str(200 + self.n))\n\n\nletters_page1 = LetterPage('200', 1)\nletters_page1.in_index = True\nletters_page1.index_num = '200-220'\nletters_page2 = LetterPage('201', 2)\nletters_page3 = LetterPage('202', 3)\nletters_page4 = LetterPage('203', 4)\nletters_page5 = LetterPage('204', 5)\nletters_page6 = LetterPage('205', 6)\nletters_page7 = LetterPage('206', 7)\nletters_page8 = LetterPage('207', 8)\nletters_page9 = LetterPage('208', 9)\nletters_page10 = LetterPage('209', 10)\nletters_page11 = LetterPage('210', 11)\nletters_page12 = LetterPage('211', 12)\nletters_page13 = LetterPage('212', 13)\nletters_page14 = LetterPage('213', 14)\nletters_page15 = LetterPage('214', 15)\nletters_page16 = LetterPage('215', 16)\nletters_page17 = LetterPage('216', 17)\nletters_page18 = LetterPage('217', 18)\nletters_page19 = LetterPage('218', 19)\nletters_page20 = LetterPage('219', 20)\nletters_page21 = LetterPage('220', 21)\n", "step-4": "import os\nimport json\nfrom page import Page\nfrom random import choice\nfrom os.path import join, expanduser\nfrom file_handler import f_read, f_readlines, open_local\nimport config\n\n\nclass LetterPage(Page):\n\n def __init__(self, page_num, n):\n super(LetterPage, self).__init__(page_num)\n self.title = 'Letters'\n self.in_index = False\n self.n = n\n self.tagline = (\n 'Email [email protected] and your letter will appear here')\n self.letters = ''\n\n def background(self):\n self.letters = f_read('emails')\n if config.NAME == 'KLBFAX' and self.n == 1 and config.has_gmail_login(\n ):\n import gmail\n details = f_readlines('gmail')\n g = gmail.login(details[0], details[1])\n unread = g.inbox().mail(unread=True)\n for mail in unread:\n mail.fetch()\n lines = ''.join(mail.body.split('\\r')).split('\\n')\n if lines[0\n ] == 'EVENT' and '[email protected]' in mail.fr:\n try:\n with open_local('events', 'a') as f:\n for line in lines:\n if line != 'EVENT':\n f.write(line + '\\n')\n mail.read()\n except:\n pass\n elif lines[0\n ] == 'CARD' and '[email protected]' in mail.fr:\n with open('/home/pi/cards/' + lines[1], 'w') as f:\n f.write('\\n'.join(lines[2:]))\n mail.read()\n elif 'POINTS' in lines[0].upper(\n ) and '[email protected]' in mail.fr:\n from points import add_points\n length = 1\n points_to_give = 0\n while length <= len(lines[2]):\n try:\n if lines[2][:length] != '-':\n points_to_give = int(lines[2][:length])\n length += 1\n except:\n break\n add_points(lines[1].split('=')[0], points_to_give)\n mail.read()\n else:\n newletter = ''\n for line in lines:\n if line != '':\n while len(line) > 79:\n newletter += line[:79] + '\\n'\n line = line[79:]\n newletter += line + '\\n'\n self.letters = (newletter + '\\n' + 'from ' + mail.fr +\n '\\n\\n' + self.letters)\n mail.read()\n self.letters = self.letters.split('\\n')\n if len(self.letters) > 1000:\n self.letters = self.letters[:1000]\n with open_local('emails', 'w') as f:\n f.write('\\n'.join(self.letters))\n else:\n self.letters = self.letters.split('\\n')\n\n def generate_content(self):\n letters = self.letters[20 * (self.n - 1):20 * self.n]\n letters = '\\n'.join(letters)\n try:\n letters = unicode(letters, 'latin1')\n except:\n letters = str(letters)\n self.add_title('Have your say', font='size4')\n a = str(self.n) + '/21'\n self.move_cursor(x=90 - len(a))\n self.add_text(a, fg='BLUE', bg='YELLOW')\n self.move_cursor(x=0)\n self.start_random_bg_color()\n for line in letters.split('\\n'):\n line = line.rstrip('\\n')\n if line == '':\n self.end_bg_color()\n self.start_random_bg_color()\n self.add_text(line, fg='BLACK')\n self.add_newline()\n self.end_bg_color()\n if self.n == 21:\n self.add_text('~ END OF LETTERS ~')\n else:\n self.add_text('The letters continue on page ' + str(200 + self.n))\n\n\nletters_page1 = LetterPage('200', 1)\nletters_page1.in_index = True\nletters_page1.index_num = '200-220'\nletters_page2 = LetterPage('201', 2)\nletters_page3 = LetterPage('202', 3)\nletters_page4 = LetterPage('203', 4)\nletters_page5 = LetterPage('204', 5)\nletters_page6 = LetterPage('205', 6)\nletters_page7 = LetterPage('206', 7)\nletters_page8 = LetterPage('207', 8)\nletters_page9 = LetterPage('208', 9)\nletters_page10 = LetterPage('209', 10)\nletters_page11 = LetterPage('210', 11)\nletters_page12 = LetterPage('211', 12)\nletters_page13 = LetterPage('212', 13)\nletters_page14 = LetterPage('213', 14)\nletters_page15 = LetterPage('214', 15)\nletters_page16 = LetterPage('215', 16)\nletters_page17 = LetterPage('216', 17)\nletters_page18 = LetterPage('217', 18)\nletters_page19 = LetterPage('218', 19)\nletters_page20 = LetterPage('219', 20)\nletters_page21 = LetterPage('220', 21)\n", "step-5": "import os\nimport json\nfrom page import Page\nfrom random import choice\nfrom os.path import join, expanduser\nfrom file_handler import f_read, f_readlines, open_local\nimport config\n\nclass LetterPage(Page):\n def __init__(self, page_num,n):\n super(LetterPage, self).__init__(page_num)\n self.title = \"Letters\"\n self.in_index = False\n self.n = n\n self.tagline = \"Email [email protected] and your letter will appear here\"\n self.letters = \"\"\n\n def background(self):\n self.letters = f_read(\"emails\")\n if config.NAME == \"KLBFAX\" and self.n==1 and config.has_gmail_login():\n import gmail\n details = f_readlines(\"gmail\")\n\n g = gmail.login(details[0],details[1])\n unread = g.inbox().mail(unread=True)\n for mail in unread:\n mail.fetch()\n lines = \"\".join(mail.body.split(\"\\r\")).split(\"\\n\")\n if lines[0] == \"EVENT\" and \"[email protected]\" in mail.fr:\n try:\n with open_local('events','a') as f:\n for line in lines:\n if line!=\"EVENT\":\n f.write(line+\"\\n\")\n mail.read()\n except:\n pass\n elif lines[0] == \"CARD\" and \"[email protected]\" in mail.fr:\n with open('/home/pi/cards/'+lines[1],\"w\") as f:\n f.write(\"\\n\".join(lines[2:]))\n mail.read()\n elif \"POINTS\" in lines[0].upper() and \"[email protected]\" in mail.fr:\n from points import add_points\n length = 1\n points_to_give = 0\n while length<=len(lines[2]):\n try:\n if lines[2][:length]!=\"-\":\n points_to_give = int(lines[2][:length])\n length += 1\n except:\n break\n add_points(lines[1].split(\"=\")[0],points_to_give)\n mail.read()\n \n else:\n newletter = \"\"\n for line in lines:\n if line!=\"\":\n while len(line)>79:\n newletter += line[:79]+\"\\n\"\n line=line[79:]\n newletter+=line+\"\\n\"\n \n self.letters=newletter+\"\\n\"+\"from \"+mail.fr+\"\\n\\n\"+self.letters\n mail.read()\n self.letters = self.letters.split(\"\\n\")\n if len(self.letters)>1000:\n self.letters = self.letters[:1000]\n with open_local(\"emails\",\"w\") as f:\n f.write(\"\\n\".join(self.letters))\n else:\n self.letters = self.letters.split(\"\\n\")\n\n\n def generate_content(self):\n letters = self.letters[20*(self.n-1):20*self.n]\n letters = \"\\n\".join(letters)\n try:\n letters = unicode(letters,'latin1')\n except:\n letters = str(letters)\n\n\n self.add_title(\"Have your say\",font=\"size4\")\n a = str(self.n)+\"/21\"\n self.move_cursor(x=90-len(a))\n self.add_text(a, fg=\"BLUE\", bg=\"YELLOW\")\n self.move_cursor(x=0)\n self.start_random_bg_color()\n for line in letters.split(\"\\n\"):\n line = line.rstrip(\"\\n\")\n if line == \"\":\n self.end_bg_color()\n self.start_random_bg_color()\n self.add_text(line,fg=\"BLACK\")\n self.add_newline()\n self.end_bg_color()\n if self.n==21:\n self.add_text(\"~ END OF LETTERS ~\")\n else:\n self.add_text(\"The letters continue on page \"+str(200+self.n))\n\nletters_page1 = LetterPage(\"200\",1)\nletters_page1.in_index = True\nletters_page1.index_num = \"200-220\"\nletters_page2 = LetterPage(\"201\",2)\nletters_page3 = LetterPage(\"202\",3)\nletters_page4 = LetterPage(\"203\",4)\nletters_page5 = LetterPage(\"204\",5)\nletters_page6 = LetterPage(\"205\",6)\nletters_page7 = LetterPage(\"206\",7)\nletters_page8 = LetterPage(\"207\",8)\nletters_page9 = LetterPage(\"208\",9)\nletters_page10 = LetterPage(\"209\",10)\nletters_page11 = LetterPage(\"210\",11)\nletters_page12 = LetterPage(\"211\",12)\nletters_page13 = LetterPage(\"212\",13)\nletters_page14 = LetterPage(\"213\",14)\nletters_page15 = LetterPage(\"214\",15)\nletters_page16 = LetterPage(\"215\",16)\nletters_page17 = LetterPage(\"216\",17)\nletters_page18 = LetterPage(\"217\",18)\nletters_page19 = LetterPage(\"218\",19)\nletters_page20 = LetterPage(\"219\",20)\nletters_page21 = LetterPage(\"220\",21)\n", "step-ids": [ 3, 4, 5, 6, 7 ] }
[ 3, 4, 5, 6, 7 ]
#!/usr/bin/env python3 # -*- coding: utf-8 -*- __author__ = 'Brice Chou' import os import lib import sys import time import getopt import training try: import cv2 import h5py except Exception as e: error_info = 'Please install h5py/cv2 tools first. Error: {}.\n'.format(e) print('\033[0;31m%s\033[0m' % error_info) quit() class Usage(Exception): def __init__(self, msg): self.msg = msg def run(): # Set the window name window_name = __author__ # Get a reference to webcam #-1 (the last one) video_capture = cv2.VideoCapture(-1) # Initialize some variables unknown_folder_path = os.path.abspath('unknown') i = lib.get_file_max_number(unknown_folder_path) filerd = h5py.File('database/training_encodings.hdf5', 'r') # Image encodings mode encodings_mode = 'large' # Temp to save predict result name face_names = [] # Save the screen locations and encodings to find a person screen_locations = [] screen_encodings = [] # Save the training data from database training_names = [] training_eigenvalues = [] process_this_frame = True for key in filerd.keys(): training_names.append(filerd[key].name.split('/')[-1]) training_eigenvalues.append(filerd[key].value) filerd.close() while True: # Grab a single frame of video ret, frame = video_capture.read() # Resize frame of video to 1/4 size # for faster face recognition processing small_frame = cv2.resize(frame, (0, 0), fx=0.5, fy=0.5) # Only process every other frame of video to save time if process_this_frame: # Find all the faces and face encodings # in the current frame of video screen_locations = lib.face_locations(small_frame, 1, 'hog') screen_encodings = lib.face_encodings(small_frame, None, 1, encodings_mode) face_names = [] # How manay faces in the screen detected_face_length = len(screen_locations) info = 'We detected \033[0;32m{}\033[0m faces in the screen.\n' print(info.format(detected_face_length)) if detected_face_length >= 1: for screen_encoding in screen_encodings: # Compare the locations and get the face's name name = lib.compare_faces(training_eigenvalues, training_names, screen_encoding, 0.31) face_names.append(name) # Auto save the unknown images if '' == name: img_file_path = '{}/{}.jpg'.format( unknown_folder_path, i) cv2.imwrite(img_file_path, frame) i += 1 time.sleep(0.15) process_this_frame = not process_this_frame # Display the results for (top, right, bottom, left), name in zip(screen_locations, face_names): # We detected in was scaled to 1/2 size top *= 2 right *= 2 bottom *= 2 left *= 2 # Draw a box around the face cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2) if '' != name: # Draw a label with a name below the face # # cv2.cv.CV_FILLED cv2.rectangle(frame, (left - 60, bottom + 30), (right + 60, bottom - 10), (0, 0, 255), cv2.FILLED) font = cv2.FONT_HERSHEY_DUPLEX cv2.putText(frame, name, (left - 50, bottom + 20), font, 1, (255, 255, 255), 1) # Display the resulting image cv2.namedWindow(window_name, cv2.WND_PROP_FULLSCREEN) # cv2.cv.CV_WINDOW_FULLSCREEN cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN) cv2.imshow(window_name, frame) key = cv2.waitKey(1) if key == ord('s'): label = 'cache/{}.jpg'.format(i) cv2.imwrite(label, frame) i += 1 elif key == ord('q'): break # Release handle to the webcam video_capture.release() cv2.destroyAllWindows() def main(argv=None): if argv is None: argv = sys.argv try: try: argv_list = argv[1:] opts, args = getopt.getopt(argv_list, 'h', ['help']) arg = argv_list[0] if 'run' == arg: run() elif 'save' == arg: training.save() elif 'move' == arg: training.move() elif 'detect' == arg: training.detect() elif 'catch' == arg: if 2 == len(argv_list): training.catch(argv_list[1]) else: training.catch() elif 'rotate' == arg: if 2 == len(argv_list): training.rotate(amount=int(argv_list[1])) else: training.rotate() except getopt.error, msg: raise Usage(msg) except Usage, err: print >>sys.stderr, err.msg print >>sys.stderr, 'for help use --help' return 2 if __name__ == '__main__': lib.initial_project_folder() sys.exit(main())
normal
{ "blob_id": "398263b65fd98003f27020e46ae38e913dc5dd45", "index": 323, "step-1": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n__author__ = 'Brice Chou'\n\nimport os\nimport lib\nimport sys\nimport time\nimport getopt\nimport training\n\ntry:\n import cv2\n import h5py\nexcept Exception as e:\n error_info = 'Please install h5py/cv2 tools first. Error: {}.\\n'.format(e)\n print('\\033[0;31m%s\\033[0m' % error_info)\n quit()\n\n\nclass Usage(Exception):\n def __init__(self, msg):\n self.msg = msg\n\n\ndef run():\n # Set the window name\n window_name = __author__\n\n # Get a reference to webcam #-1 (the last one)\n video_capture = cv2.VideoCapture(-1)\n\n # Initialize some variables\n unknown_folder_path = os.path.abspath('unknown')\n i = lib.get_file_max_number(unknown_folder_path)\n filerd = h5py.File('database/training_encodings.hdf5', 'r')\n\n # Image encodings mode\n encodings_mode = 'large'\n\n # Temp to save predict result name\n face_names = []\n\n # Save the screen locations and encodings to find a person\n screen_locations = []\n screen_encodings = []\n\n # Save the training data from database\n training_names = []\n training_eigenvalues = []\n\n process_this_frame = True\n\n for key in filerd.keys():\n training_names.append(filerd[key].name.split('/')[-1])\n training_eigenvalues.append(filerd[key].value)\n\n filerd.close()\n\n while True:\n # Grab a single frame of video\n ret, frame = video_capture.read()\n\n # Resize frame of video to 1/4 size\n # for faster face recognition processing\n small_frame = cv2.resize(frame, (0, 0), fx=0.5, fy=0.5)\n\n # Only process every other frame of video to save time\n if process_this_frame:\n # Find all the faces and face encodings\n # in the current frame of video\n screen_locations = lib.face_locations(small_frame, 1,\n 'hog')\n screen_encodings = lib.face_encodings(small_frame, None,\n 1, encodings_mode)\n face_names = []\n\n # How manay faces in the screen\n detected_face_length = len(screen_locations)\n info = 'We detected \\033[0;32m{}\\033[0m faces in the screen.\\n'\n print(info.format(detected_face_length))\n if detected_face_length >= 1:\n for screen_encoding in screen_encodings:\n # Compare the locations and get the face's name\n name = lib.compare_faces(training_eigenvalues,\n training_names,\n screen_encoding, 0.31)\n face_names.append(name)\n\n # Auto save the unknown images\n if '' == name:\n img_file_path = '{}/{}.jpg'.format(\n unknown_folder_path, i)\n cv2.imwrite(img_file_path, frame)\n i += 1\n time.sleep(0.15)\n\n process_this_frame = not process_this_frame\n\n # Display the results\n for (top, right, bottom, left), name in zip(screen_locations, face_names):\n # We detected in was scaled to 1/2 size\n top *= 2\n right *= 2\n bottom *= 2\n left *= 2\n\n # Draw a box around the face\n cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)\n\n if '' != name:\n # Draw a label with a name below the face\n # # cv2.cv.CV_FILLED\n cv2.rectangle(frame, (left - 60, bottom + 30),\n (right + 60, bottom - 10), (0, 0, 255),\n cv2.FILLED)\n font = cv2.FONT_HERSHEY_DUPLEX\n cv2.putText(frame, name, (left - 50, bottom + 20),\n font, 1, (255, 255, 255), 1)\n\n # Display the resulting image\n cv2.namedWindow(window_name, cv2.WND_PROP_FULLSCREEN)\n # cv2.cv.CV_WINDOW_FULLSCREEN\n cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN,\n cv2.WINDOW_FULLSCREEN)\n cv2.imshow(window_name, frame)\n\n key = cv2.waitKey(1)\n if key == ord('s'):\n label = 'cache/{}.jpg'.format(i)\n cv2.imwrite(label, frame)\n i += 1\n elif key == ord('q'):\n break\n\n # Release handle to the webcam\n video_capture.release()\n cv2.destroyAllWindows()\n\n\ndef main(argv=None):\n if argv is None:\n argv = sys.argv\n try:\n try:\n argv_list = argv[1:]\n opts, args = getopt.getopt(argv_list, 'h', ['help'])\n arg = argv_list[0]\n if 'run' == arg:\n run()\n elif 'save' == arg:\n training.save()\n elif 'move' == arg:\n training.move()\n elif 'detect' == arg:\n training.detect()\n elif 'catch' == arg:\n if 2 == len(argv_list):\n training.catch(argv_list[1])\n else:\n training.catch()\n elif 'rotate' == arg:\n if 2 == len(argv_list):\n training.rotate(amount=int(argv_list[1]))\n else:\n training.rotate()\n except getopt.error, msg:\n raise Usage(msg)\n except Usage, err:\n print >>sys.stderr, err.msg\n print >>sys.stderr, 'for help use --help'\n return 2\n\n\nif __name__ == '__main__':\n lib.initial_project_folder()\n sys.exit(main())\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
var blackList = []string{ // global "document", "window", "top", "parent", "global", "this", //func "console", "alert", "log", "promise", "fetch", "eval", "import", //char "<", ">", "`", "\\*", "&", "#", "%", "\\\\", //key "if", "set", "get", "with", "yield", "async", "wait", "func", "for", "error", "string", //string "href", "location", "url", "cookie", "src", }
normal
{ "blob_id": "f502290cc8ffa9571454a214497aff1d1c5e1c9f", "index": 8285, "step-1": "var blackList = []string{\n\t// global\n\t\"document\", \"window\", \"top\", \"parent\", \"global\", \"this\",\n\t//func\n\t\"console\", \"alert\", \"log\", \"promise\", \"fetch\", \"eval\", \"import\",\n\t//char\n\t\"<\", \">\", \"`\", \"\\\\*\", \"&\", \"#\", \"%\", \"\\\\\\\\\",\n\t//key\n\t\"if\", \"set\", \"get\", \"with\", \"yield\", \"async\", \"wait\", \"func\", \"for\", \"error\", \"string\",\n\t//string\n\t\"href\", \"location\", \"url\", \"cookie\", \"src\",\n}", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
# 5. Write a program to implement polymorphism. class Honda: def __init__(self, name, color): self.name = name self.color = color def display(self): print("Honda car name is : ", self.name, " and color is : ", self.color) class Audi: def __init__(self, name, color): self.name = name self.color = color def display(self): print("Audi car name is : ", self.name, " and color is : ", self.color) HondaCar = Honda("Honda City", "White") AudiCar = Audi("A6", "Black") for car in (HondaCar, AudiCar): car.display()
normal
{ "blob_id": "92f59612b2697db155da1bdc625fdabc115867b0", "index": 9600, "step-1": "class Honda:\n <mask token>\n <mask token>\n\n\nclass Audi:\n\n def __init__(self, name, color):\n self.name = name\n self.color = color\n\n def display(self):\n print('Audi car name is : ', self.name, ' and color is : ', self.color)\n\n\n<mask token>\n", "step-2": "class Honda:\n\n def __init__(self, name, color):\n self.name = name\n self.color = color\n\n def display(self):\n print('Honda car name is : ', self.name, ' and color is : ', self.color\n )\n\n\nclass Audi:\n\n def __init__(self, name, color):\n self.name = name\n self.color = color\n\n def display(self):\n print('Audi car name is : ', self.name, ' and color is : ', self.color)\n\n\n<mask token>\n", "step-3": "class Honda:\n\n def __init__(self, name, color):\n self.name = name\n self.color = color\n\n def display(self):\n print('Honda car name is : ', self.name, ' and color is : ', self.color\n )\n\n\nclass Audi:\n\n def __init__(self, name, color):\n self.name = name\n self.color = color\n\n def display(self):\n print('Audi car name is : ', self.name, ' and color is : ', self.color)\n\n\n<mask token>\nfor car in (HondaCar, AudiCar):\n car.display()\n", "step-4": "class Honda:\n\n def __init__(self, name, color):\n self.name = name\n self.color = color\n\n def display(self):\n print('Honda car name is : ', self.name, ' and color is : ', self.color\n )\n\n\nclass Audi:\n\n def __init__(self, name, color):\n self.name = name\n self.color = color\n\n def display(self):\n print('Audi car name is : ', self.name, ' and color is : ', self.color)\n\n\nHondaCar = Honda('Honda City', 'White')\nAudiCar = Audi('A6', 'Black')\nfor car in (HondaCar, AudiCar):\n car.display()\n", "step-5": "# 5. Write a program to implement polymorphism.\n\nclass Honda:\n\n def __init__(self, name, color):\n self.name = name\n self.color = color\n\n def display(self):\n print(\"Honda car name is : \", self.name, \" and color is : \", self.color)\n\n\nclass Audi:\n\n def __init__(self, name, color):\n self.name = name\n self.color = color\n\n def display(self):\n print(\"Audi car name is : \", self.name, \" and color is : \", self.color)\n\n\nHondaCar = Honda(\"Honda City\", \"White\")\nAudiCar = Audi(\"A6\", \"Black\")\n\nfor car in (HondaCar, AudiCar):\n car.display()\n\n", "step-ids": [ 4, 6, 7, 8, 9 ] }
[ 4, 6, 7, 8, 9 ]
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Sun Jun 14 20:35:10 2020 @author: Johanna """ import numpy as np ############################################################################### # Complex Visibility Functions ############################################################################### def compute_vis(X, F): vis = np.matmul(X, np.transpose(F)).astype(np.complex64) return vis def compute_vis_grad(vis, Z, F): Z_vis = compute_vis(Z, F) grad = -np.matmul(np.conjugate(F.T), vis - Z_vis) return grad.real def chisq_vis(vis, Z, F, sigma): ''' Compute mean chi-squared of visibilities of Z. ''' samples = compute_vis(Z, F) chisq = np.sum(np.abs((samples-vis)/sigma)**2)/(2*len(vis)) return chisq ############################################################################### # Visibility Amplitude Functions ############################################################################### def compute_amp(X, F): ''' Given an image X and DFT matrix F, compute and return its visibility amplitude. ''' amp = np.abs(np.dot(F, X)) return amp def compute_amp_grad(amp, Z, A, sigma): ''' Compute gradient of visibility amplitude. ''' i1 = np.dot(A, Z) amp_samples = np.abs(i1) pp = ((amp - amp_samples) * amp_samples) / (sigma**2) / i1 out = (-2.0/len(amp)) * np.real(np.dot(pp, A)) return out def chisq_amp(amp, Z, F, sigma): ''' Compute and return chi-squared of amplitude between X and Z. ''' amp_Z = compute_amp(Z, F) chisq = np.sum(np.abs((amp - amp_Z)/sigma)**2)/len(amp) return chisq ############################################################################### # Closure Phase Functions ############################################################################### def compute_cphase(X, F_cphase): ''' Given an image X and the DFT matrices from three baselines, compute and return its closure phase. ''' # Get fourier matrices of each baseline A1 = F_cphase[:, :, 0] A2 = F_cphase[:, :, 1] A3 = F_cphase[:, :, 2] X = np.array(X) # Compute observed closure phase of image vis1 = np.matmul(X.reshape((1,-1)), np.transpose(A1)).astype(np.complex64) vis2 = np.matmul(X.reshape((1,-1)), np.transpose(A2)).astype(np.complex64) vis3 = np.matmul(X.reshape((1,-1)), np.transpose(A3)).astype(np.complex64) cphase = np.angle(vis1 * vis2 * vis3) return cphase def compute_cphase_grad(cphase, Z, F_cphase, sigma, npix): ''' Compute gradient of closure phase chi-squared cphase : closure phase of true image Z : predicted image vector F_cphase : 3 DFT matrices from three baselines in a closure triangle ''' # Get fourier matrices of each baseline A1 = F_cphase[:, :, 0] A2 = F_cphase[:, :, 1] A3 = F_cphase[:, :, 2] i1 = np.matmul(Z.reshape((1,-1)), np.transpose(A1)).astype(np.complex64) i2 = np.matmul(Z.reshape((1,-1)), np.transpose(A2)).astype(np.complex64) i3 = np.matmul(Z.reshape((1,-1)), np.transpose(A3)).astype(np.complex64) cphase_samples = np.angle(i1 * i2 * i3) pref = np.sin(cphase - cphase_samples)/(sigma**2) pt1 = pref/i1 pt2 = pref/i2 pt3 = pref/i3 out = -(2.0/len(cphase)) * np.imag(np.dot(pt1, A1) + np.dot(pt2, A2) + np.dot(pt3, A3)) return out.reshape(npix**2) def chisq_cphase(cphase, Z, F_cphase, sigma_cphase): """Closure Phase reduced chi-squared loss.""" cphase_samples = compute_cphase(Z, F_cphase) chisq= (2.0/len(cphase)) * np.sum((1.0 - np.cos(cphase-cphase_samples))/(sigma_cphase**2)) return chisq ############################################################################### # Closure Amplitude Functions ############################################################################### def compute_camp(X, Amatrices): ''' Compute closure amplitude of image vector X. ''' i1 = np.dot(Amatrices[0], X) i2 = np.dot(Amatrices[1], X) i3 = np.dot(Amatrices[2], X) i4 = np.dot(Amatrices[3], X) camp = np.abs((i1 * i2)/(i3 * i4)) return camp def compute_camp_grad(camp, Z, Amatrices, sigma): """ The gradient of the closure amplitude chi-squared camp: Closure amplitudes of true image Z: Predicted image vector Amatrices: DFT matrices of four baselines """ i1 = np.dot(Amatrices[0], Z) i2 = np.dot(Amatrices[1], Z) i3 = np.dot(Amatrices[2], Z) i4 = np.dot(Amatrices[3], Z) camp_samples = np.abs((i1 * i2)/(i3 * i4)) pp = ((camp - camp_samples) * camp_samples)/(sigma**2) pt1 = pp/i1 pt2 = pp/i2 pt3 = -pp/i3 pt4 = -pp/i4 out = (np.dot(pt1, Amatrices[0]) + np.dot(pt2, Amatrices[1]) + np.dot(pt3, Amatrices[2]) + np.dot(pt4, Amatrices[3])) return (-2.0/len(camp)) * np.real(out) def chisq_camp(camp, Z, Amatrices, sigma): """Closure Amplitudes reduced chi-squared loss.""" i1 = np.dot(Amatrices[0], Z) i2 = np.dot(Amatrices[1], Z) i3 = np.dot(Amatrices[2], Z) i4 = np.dot(Amatrices[3], Z) camp_samples = np.abs((i1 * i2)/(i3 * i4)) chisq = np.sum(np.abs((camp - camp_samples)/sigma)**2)/len(camp) return chisq ############################################################################### # Log Closure Amplitude Functions ############################################################################### def compute_lgcamp(X, Amatrices): ''' Compute log closure amplitude of image vector X ''' a1 = np.abs(np.dot(Amatrices[0], X)) a2 = np.abs(np.dot(Amatrices[1], X)) a3 = np.abs(np.dot(Amatrices[2], X)) a4 = np.abs(np.dot(Amatrices[3], X)) lgcamp = np.log(a1) + np.log(a2) - np.log(a3) - np.log(a4) return lgcamp def compute_lgcamp_grad(lgcamp, Z, Amatrices, sigma): """The gradient of the Log closure amplitude chi-squared""" i1 = np.dot(Amatrices[0], Z) i2 = np.dot(Amatrices[1], Z) i3 = np.dot(Amatrices[2], Z) i4 = np.dot(Amatrices[3], Z) lgcamp_samples = (np.log(np.abs(i1)) + np.log(np.abs(i2)) - np.log(np.abs(i3)) - np.log(np.abs(i4))) pp = (lgcamp - lgcamp_samples) / (sigma**2) pt1 = pp / i1 pt2 = pp / i2 pt3 = -pp / i3 pt4 = -pp / i4 out = (np.dot(pt1, Amatrices[0]) + np.dot(pt2, Amatrices[1]) + np.dot(pt3, Amatrices[2]) + np.dot(pt4, Amatrices[3])) return (-2.0/len(lgcamp)) * np.real(out) def chisq_lgcamp(lgcamp, X, Amatrices, sigma): """Log Closure Amplitudes reduced chi-squared""" a1 = np.abs(np.dot(Amatrices[0], X)) a2 = np.abs(np.dot(Amatrices[1], X)) a3 = np.abs(np.dot(Amatrices[2], X)) a4 = np.abs(np.dot(Amatrices[3], X)) samples = np.log(a1) + np.log(a2) - np.log(a3) - np.log(a4) chisq = np.sum(np.abs((lgcamp - samples)/sigma)**2) / (len(lgcamp)) return chisq
normal
{ "blob_id": "ea3217be80b6d1d3a400139bc4a91870cd2f1d87", "index": 5118, "step-1": "<mask token>\n\n\ndef compute_vis(X, F):\n vis = np.matmul(X, np.transpose(F)).astype(np.complex64)\n return vis\n\n\ndef compute_vis_grad(vis, Z, F):\n Z_vis = compute_vis(Z, F)\n grad = -np.matmul(np.conjugate(F.T), vis - Z_vis)\n return grad.real\n\n\n<mask token>\n\n\ndef compute_amp_grad(amp, Z, A, sigma):\n \"\"\" \n Compute gradient of visibility amplitude.\n \"\"\"\n i1 = np.dot(A, Z)\n amp_samples = np.abs(i1)\n pp = (amp - amp_samples) * amp_samples / sigma ** 2 / i1\n out = -2.0 / len(amp) * np.real(np.dot(pp, A))\n return out\n\n\ndef chisq_amp(amp, Z, F, sigma):\n \"\"\" Compute and return chi-squared of amplitude between X and Z. \"\"\"\n amp_Z = compute_amp(Z, F)\n chisq = np.sum(np.abs((amp - amp_Z) / sigma) ** 2) / len(amp)\n return chisq\n\n\ndef compute_cphase(X, F_cphase):\n \"\"\" Given an image X and the DFT matrices from three baselines,\n compute and return its closure phase. \"\"\"\n A1 = F_cphase[:, :, 0]\n A2 = F_cphase[:, :, 1]\n A3 = F_cphase[:, :, 2]\n X = np.array(X)\n vis1 = np.matmul(X.reshape((1, -1)), np.transpose(A1)).astype(np.complex64)\n vis2 = np.matmul(X.reshape((1, -1)), np.transpose(A2)).astype(np.complex64)\n vis3 = np.matmul(X.reshape((1, -1)), np.transpose(A3)).astype(np.complex64)\n cphase = np.angle(vis1 * vis2 * vis3)\n return cphase\n\n\ndef compute_cphase_grad(cphase, Z, F_cphase, sigma, npix):\n \"\"\" \n Compute gradient of closure phase chi-squared\n \n cphase : closure phase of true image \n Z : predicted image vector\n F_cphase : 3 DFT matrices from three baselines in a closure triangle\n \"\"\"\n A1 = F_cphase[:, :, 0]\n A2 = F_cphase[:, :, 1]\n A3 = F_cphase[:, :, 2]\n i1 = np.matmul(Z.reshape((1, -1)), np.transpose(A1)).astype(np.complex64)\n i2 = np.matmul(Z.reshape((1, -1)), np.transpose(A2)).astype(np.complex64)\n i3 = np.matmul(Z.reshape((1, -1)), np.transpose(A3)).astype(np.complex64)\n cphase_samples = np.angle(i1 * i2 * i3)\n pref = np.sin(cphase - cphase_samples) / sigma ** 2\n pt1 = pref / i1\n pt2 = pref / i2\n pt3 = pref / i3\n out = -(2.0 / len(cphase)) * np.imag(np.dot(pt1, A1) + np.dot(pt2, A2) +\n np.dot(pt3, A3))\n return out.reshape(npix ** 2)\n\n\ndef chisq_cphase(cphase, Z, F_cphase, sigma_cphase):\n \"\"\"Closure Phase reduced chi-squared loss.\"\"\"\n cphase_samples = compute_cphase(Z, F_cphase)\n chisq = 2.0 / len(cphase) * np.sum((1.0 - np.cos(cphase -\n cphase_samples)) / sigma_cphase ** 2)\n return chisq\n\n\ndef compute_camp(X, Amatrices):\n \"\"\"\n Compute closure amplitude of image vector X.\n \"\"\"\n i1 = np.dot(Amatrices[0], X)\n i2 = np.dot(Amatrices[1], X)\n i3 = np.dot(Amatrices[2], X)\n i4 = np.dot(Amatrices[3], X)\n camp = np.abs(i1 * i2 / (i3 * i4))\n return camp\n\n\ndef compute_camp_grad(camp, Z, Amatrices, sigma):\n \"\"\"\n The gradient of the closure amplitude chi-squared\n \n camp: Closure amplitudes of true image\n Z: Predicted image vector\n Amatrices: DFT matrices of four baselines\n \"\"\"\n i1 = np.dot(Amatrices[0], Z)\n i2 = np.dot(Amatrices[1], Z)\n i3 = np.dot(Amatrices[2], Z)\n i4 = np.dot(Amatrices[3], Z)\n camp_samples = np.abs(i1 * i2 / (i3 * i4))\n pp = (camp - camp_samples) * camp_samples / sigma ** 2\n pt1 = pp / i1\n pt2 = pp / i2\n pt3 = -pp / i3\n pt4 = -pp / i4\n out = np.dot(pt1, Amatrices[0]) + np.dot(pt2, Amatrices[1]) + np.dot(pt3,\n Amatrices[2]) + np.dot(pt4, Amatrices[3])\n return -2.0 / len(camp) * np.real(out)\n\n\n<mask token>\n\n\ndef compute_lgcamp(X, Amatrices):\n \"\"\" Compute log closure amplitude of image vector X \"\"\"\n a1 = np.abs(np.dot(Amatrices[0], X))\n a2 = np.abs(np.dot(Amatrices[1], X))\n a3 = np.abs(np.dot(Amatrices[2], X))\n a4 = np.abs(np.dot(Amatrices[3], X))\n lgcamp = np.log(a1) + np.log(a2) - np.log(a3) - np.log(a4)\n return lgcamp\n\n\n<mask token>\n\n\ndef chisq_lgcamp(lgcamp, X, Amatrices, sigma):\n \"\"\"Log Closure Amplitudes reduced chi-squared\"\"\"\n a1 = np.abs(np.dot(Amatrices[0], X))\n a2 = np.abs(np.dot(Amatrices[1], X))\n a3 = np.abs(np.dot(Amatrices[2], X))\n a4 = np.abs(np.dot(Amatrices[3], X))\n samples = np.log(a1) + np.log(a2) - np.log(a3) - np.log(a4)\n chisq = np.sum(np.abs((lgcamp - samples) / sigma) ** 2) / len(lgcamp)\n return chisq\n", "step-2": "<mask token>\n\n\ndef compute_vis(X, F):\n vis = np.matmul(X, np.transpose(F)).astype(np.complex64)\n return vis\n\n\ndef compute_vis_grad(vis, Z, F):\n Z_vis = compute_vis(Z, F)\n grad = -np.matmul(np.conjugate(F.T), vis - Z_vis)\n return grad.real\n\n\n<mask token>\n\n\ndef compute_amp_grad(amp, Z, A, sigma):\n \"\"\" \n Compute gradient of visibility amplitude.\n \"\"\"\n i1 = np.dot(A, Z)\n amp_samples = np.abs(i1)\n pp = (amp - amp_samples) * amp_samples / sigma ** 2 / i1\n out = -2.0 / len(amp) * np.real(np.dot(pp, A))\n return out\n\n\ndef chisq_amp(amp, Z, F, sigma):\n \"\"\" Compute and return chi-squared of amplitude between X and Z. \"\"\"\n amp_Z = compute_amp(Z, F)\n chisq = np.sum(np.abs((amp - amp_Z) / sigma) ** 2) / len(amp)\n return chisq\n\n\ndef compute_cphase(X, F_cphase):\n \"\"\" Given an image X and the DFT matrices from three baselines,\n compute and return its closure phase. \"\"\"\n A1 = F_cphase[:, :, 0]\n A2 = F_cphase[:, :, 1]\n A3 = F_cphase[:, :, 2]\n X = np.array(X)\n vis1 = np.matmul(X.reshape((1, -1)), np.transpose(A1)).astype(np.complex64)\n vis2 = np.matmul(X.reshape((1, -1)), np.transpose(A2)).astype(np.complex64)\n vis3 = np.matmul(X.reshape((1, -1)), np.transpose(A3)).astype(np.complex64)\n cphase = np.angle(vis1 * vis2 * vis3)\n return cphase\n\n\ndef compute_cphase_grad(cphase, Z, F_cphase, sigma, npix):\n \"\"\" \n Compute gradient of closure phase chi-squared\n \n cphase : closure phase of true image \n Z : predicted image vector\n F_cphase : 3 DFT matrices from three baselines in a closure triangle\n \"\"\"\n A1 = F_cphase[:, :, 0]\n A2 = F_cphase[:, :, 1]\n A3 = F_cphase[:, :, 2]\n i1 = np.matmul(Z.reshape((1, -1)), np.transpose(A1)).astype(np.complex64)\n i2 = np.matmul(Z.reshape((1, -1)), np.transpose(A2)).astype(np.complex64)\n i3 = np.matmul(Z.reshape((1, -1)), np.transpose(A3)).astype(np.complex64)\n cphase_samples = np.angle(i1 * i2 * i3)\n pref = np.sin(cphase - cphase_samples) / sigma ** 2\n pt1 = pref / i1\n pt2 = pref / i2\n pt3 = pref / i3\n out = -(2.0 / len(cphase)) * np.imag(np.dot(pt1, A1) + np.dot(pt2, A2) +\n np.dot(pt3, A3))\n return out.reshape(npix ** 2)\n\n\ndef chisq_cphase(cphase, Z, F_cphase, sigma_cphase):\n \"\"\"Closure Phase reduced chi-squared loss.\"\"\"\n cphase_samples = compute_cphase(Z, F_cphase)\n chisq = 2.0 / len(cphase) * np.sum((1.0 - np.cos(cphase -\n cphase_samples)) / sigma_cphase ** 2)\n return chisq\n\n\ndef compute_camp(X, Amatrices):\n \"\"\"\n Compute closure amplitude of image vector X.\n \"\"\"\n i1 = np.dot(Amatrices[0], X)\n i2 = np.dot(Amatrices[1], X)\n i3 = np.dot(Amatrices[2], X)\n i4 = np.dot(Amatrices[3], X)\n camp = np.abs(i1 * i2 / (i3 * i4))\n return camp\n\n\ndef compute_camp_grad(camp, Z, Amatrices, sigma):\n \"\"\"\n The gradient of the closure amplitude chi-squared\n \n camp: Closure amplitudes of true image\n Z: Predicted image vector\n Amatrices: DFT matrices of four baselines\n \"\"\"\n i1 = np.dot(Amatrices[0], Z)\n i2 = np.dot(Amatrices[1], Z)\n i3 = np.dot(Amatrices[2], Z)\n i4 = np.dot(Amatrices[3], Z)\n camp_samples = np.abs(i1 * i2 / (i3 * i4))\n pp = (camp - camp_samples) * camp_samples / sigma ** 2\n pt1 = pp / i1\n pt2 = pp / i2\n pt3 = -pp / i3\n pt4 = -pp / i4\n out = np.dot(pt1, Amatrices[0]) + np.dot(pt2, Amatrices[1]) + np.dot(pt3,\n Amatrices[2]) + np.dot(pt4, Amatrices[3])\n return -2.0 / len(camp) * np.real(out)\n\n\ndef chisq_camp(camp, Z, Amatrices, sigma):\n \"\"\"Closure Amplitudes reduced chi-squared loss.\"\"\"\n i1 = np.dot(Amatrices[0], Z)\n i2 = np.dot(Amatrices[1], Z)\n i3 = np.dot(Amatrices[2], Z)\n i4 = np.dot(Amatrices[3], Z)\n camp_samples = np.abs(i1 * i2 / (i3 * i4))\n chisq = np.sum(np.abs((camp - camp_samples) / sigma) ** 2) / len(camp)\n return chisq\n\n\ndef compute_lgcamp(X, Amatrices):\n \"\"\" Compute log closure amplitude of image vector X \"\"\"\n a1 = np.abs(np.dot(Amatrices[0], X))\n a2 = np.abs(np.dot(Amatrices[1], X))\n a3 = np.abs(np.dot(Amatrices[2], X))\n a4 = np.abs(np.dot(Amatrices[3], X))\n lgcamp = np.log(a1) + np.log(a2) - np.log(a3) - np.log(a4)\n return lgcamp\n\n\ndef compute_lgcamp_grad(lgcamp, Z, Amatrices, sigma):\n \"\"\"The gradient of the Log closure amplitude chi-squared\"\"\"\n i1 = np.dot(Amatrices[0], Z)\n i2 = np.dot(Amatrices[1], Z)\n i3 = np.dot(Amatrices[2], Z)\n i4 = np.dot(Amatrices[3], Z)\n lgcamp_samples = np.log(np.abs(i1)) + np.log(np.abs(i2)) - np.log(np.\n abs(i3)) - np.log(np.abs(i4))\n pp = (lgcamp - lgcamp_samples) / sigma ** 2\n pt1 = pp / i1\n pt2 = pp / i2\n pt3 = -pp / i3\n pt4 = -pp / i4\n out = np.dot(pt1, Amatrices[0]) + np.dot(pt2, Amatrices[1]) + np.dot(pt3,\n Amatrices[2]) + np.dot(pt4, Amatrices[3])\n return -2.0 / len(lgcamp) * np.real(out)\n\n\ndef chisq_lgcamp(lgcamp, X, Amatrices, sigma):\n \"\"\"Log Closure Amplitudes reduced chi-squared\"\"\"\n a1 = np.abs(np.dot(Amatrices[0], X))\n a2 = np.abs(np.dot(Amatrices[1], X))\n a3 = np.abs(np.dot(Amatrices[2], X))\n a4 = np.abs(np.dot(Amatrices[3], X))\n samples = np.log(a1) + np.log(a2) - np.log(a3) - np.log(a4)\n chisq = np.sum(np.abs((lgcamp - samples) / sigma) ** 2) / len(lgcamp)\n return chisq\n", "step-3": "<mask token>\n\n\ndef compute_vis(X, F):\n vis = np.matmul(X, np.transpose(F)).astype(np.complex64)\n return vis\n\n\ndef compute_vis_grad(vis, Z, F):\n Z_vis = compute_vis(Z, F)\n grad = -np.matmul(np.conjugate(F.T), vis - Z_vis)\n return grad.real\n\n\ndef chisq_vis(vis, Z, F, sigma):\n \"\"\" \n Compute mean chi-squared of visibilities of Z.\n \"\"\"\n samples = compute_vis(Z, F)\n chisq = np.sum(np.abs((samples - vis) / sigma) ** 2) / (2 * len(vis))\n return chisq\n\n\ndef compute_amp(X, F):\n \"\"\" Given an image X and DFT matrix F, compute and return its \n visibility amplitude. \"\"\"\n amp = np.abs(np.dot(F, X))\n return amp\n\n\ndef compute_amp_grad(amp, Z, A, sigma):\n \"\"\" \n Compute gradient of visibility amplitude.\n \"\"\"\n i1 = np.dot(A, Z)\n amp_samples = np.abs(i1)\n pp = (amp - amp_samples) * amp_samples / sigma ** 2 / i1\n out = -2.0 / len(amp) * np.real(np.dot(pp, A))\n return out\n\n\ndef chisq_amp(amp, Z, F, sigma):\n \"\"\" Compute and return chi-squared of amplitude between X and Z. \"\"\"\n amp_Z = compute_amp(Z, F)\n chisq = np.sum(np.abs((amp - amp_Z) / sigma) ** 2) / len(amp)\n return chisq\n\n\ndef compute_cphase(X, F_cphase):\n \"\"\" Given an image X and the DFT matrices from three baselines,\n compute and return its closure phase. \"\"\"\n A1 = F_cphase[:, :, 0]\n A2 = F_cphase[:, :, 1]\n A3 = F_cphase[:, :, 2]\n X = np.array(X)\n vis1 = np.matmul(X.reshape((1, -1)), np.transpose(A1)).astype(np.complex64)\n vis2 = np.matmul(X.reshape((1, -1)), np.transpose(A2)).astype(np.complex64)\n vis3 = np.matmul(X.reshape((1, -1)), np.transpose(A3)).astype(np.complex64)\n cphase = np.angle(vis1 * vis2 * vis3)\n return cphase\n\n\ndef compute_cphase_grad(cphase, Z, F_cphase, sigma, npix):\n \"\"\" \n Compute gradient of closure phase chi-squared\n \n cphase : closure phase of true image \n Z : predicted image vector\n F_cphase : 3 DFT matrices from three baselines in a closure triangle\n \"\"\"\n A1 = F_cphase[:, :, 0]\n A2 = F_cphase[:, :, 1]\n A3 = F_cphase[:, :, 2]\n i1 = np.matmul(Z.reshape((1, -1)), np.transpose(A1)).astype(np.complex64)\n i2 = np.matmul(Z.reshape((1, -1)), np.transpose(A2)).astype(np.complex64)\n i3 = np.matmul(Z.reshape((1, -1)), np.transpose(A3)).astype(np.complex64)\n cphase_samples = np.angle(i1 * i2 * i3)\n pref = np.sin(cphase - cphase_samples) / sigma ** 2\n pt1 = pref / i1\n pt2 = pref / i2\n pt3 = pref / i3\n out = -(2.0 / len(cphase)) * np.imag(np.dot(pt1, A1) + np.dot(pt2, A2) +\n np.dot(pt3, A3))\n return out.reshape(npix ** 2)\n\n\ndef chisq_cphase(cphase, Z, F_cphase, sigma_cphase):\n \"\"\"Closure Phase reduced chi-squared loss.\"\"\"\n cphase_samples = compute_cphase(Z, F_cphase)\n chisq = 2.0 / len(cphase) * np.sum((1.0 - np.cos(cphase -\n cphase_samples)) / sigma_cphase ** 2)\n return chisq\n\n\ndef compute_camp(X, Amatrices):\n \"\"\"\n Compute closure amplitude of image vector X.\n \"\"\"\n i1 = np.dot(Amatrices[0], X)\n i2 = np.dot(Amatrices[1], X)\n i3 = np.dot(Amatrices[2], X)\n i4 = np.dot(Amatrices[3], X)\n camp = np.abs(i1 * i2 / (i3 * i4))\n return camp\n\n\ndef compute_camp_grad(camp, Z, Amatrices, sigma):\n \"\"\"\n The gradient of the closure amplitude chi-squared\n \n camp: Closure amplitudes of true image\n Z: Predicted image vector\n Amatrices: DFT matrices of four baselines\n \"\"\"\n i1 = np.dot(Amatrices[0], Z)\n i2 = np.dot(Amatrices[1], Z)\n i3 = np.dot(Amatrices[2], Z)\n i4 = np.dot(Amatrices[3], Z)\n camp_samples = np.abs(i1 * i2 / (i3 * i4))\n pp = (camp - camp_samples) * camp_samples / sigma ** 2\n pt1 = pp / i1\n pt2 = pp / i2\n pt3 = -pp / i3\n pt4 = -pp / i4\n out = np.dot(pt1, Amatrices[0]) + np.dot(pt2, Amatrices[1]) + np.dot(pt3,\n Amatrices[2]) + np.dot(pt4, Amatrices[3])\n return -2.0 / len(camp) * np.real(out)\n\n\ndef chisq_camp(camp, Z, Amatrices, sigma):\n \"\"\"Closure Amplitudes reduced chi-squared loss.\"\"\"\n i1 = np.dot(Amatrices[0], Z)\n i2 = np.dot(Amatrices[1], Z)\n i3 = np.dot(Amatrices[2], Z)\n i4 = np.dot(Amatrices[3], Z)\n camp_samples = np.abs(i1 * i2 / (i3 * i4))\n chisq = np.sum(np.abs((camp - camp_samples) / sigma) ** 2) / len(camp)\n return chisq\n\n\ndef compute_lgcamp(X, Amatrices):\n \"\"\" Compute log closure amplitude of image vector X \"\"\"\n a1 = np.abs(np.dot(Amatrices[0], X))\n a2 = np.abs(np.dot(Amatrices[1], X))\n a3 = np.abs(np.dot(Amatrices[2], X))\n a4 = np.abs(np.dot(Amatrices[3], X))\n lgcamp = np.log(a1) + np.log(a2) - np.log(a3) - np.log(a4)\n return lgcamp\n\n\ndef compute_lgcamp_grad(lgcamp, Z, Amatrices, sigma):\n \"\"\"The gradient of the Log closure amplitude chi-squared\"\"\"\n i1 = np.dot(Amatrices[0], Z)\n i2 = np.dot(Amatrices[1], Z)\n i3 = np.dot(Amatrices[2], Z)\n i4 = np.dot(Amatrices[3], Z)\n lgcamp_samples = np.log(np.abs(i1)) + np.log(np.abs(i2)) - np.log(np.\n abs(i3)) - np.log(np.abs(i4))\n pp = (lgcamp - lgcamp_samples) / sigma ** 2\n pt1 = pp / i1\n pt2 = pp / i2\n pt3 = -pp / i3\n pt4 = -pp / i4\n out = np.dot(pt1, Amatrices[0]) + np.dot(pt2, Amatrices[1]) + np.dot(pt3,\n Amatrices[2]) + np.dot(pt4, Amatrices[3])\n return -2.0 / len(lgcamp) * np.real(out)\n\n\ndef chisq_lgcamp(lgcamp, X, Amatrices, sigma):\n \"\"\"Log Closure Amplitudes reduced chi-squared\"\"\"\n a1 = np.abs(np.dot(Amatrices[0], X))\n a2 = np.abs(np.dot(Amatrices[1], X))\n a3 = np.abs(np.dot(Amatrices[2], X))\n a4 = np.abs(np.dot(Amatrices[3], X))\n samples = np.log(a1) + np.log(a2) - np.log(a3) - np.log(a4)\n chisq = np.sum(np.abs((lgcamp - samples) / sigma) ** 2) / len(lgcamp)\n return chisq\n", "step-4": "<mask token>\nimport numpy as np\n\n\ndef compute_vis(X, F):\n vis = np.matmul(X, np.transpose(F)).astype(np.complex64)\n return vis\n\n\ndef compute_vis_grad(vis, Z, F):\n Z_vis = compute_vis(Z, F)\n grad = -np.matmul(np.conjugate(F.T), vis - Z_vis)\n return grad.real\n\n\ndef chisq_vis(vis, Z, F, sigma):\n \"\"\" \n Compute mean chi-squared of visibilities of Z.\n \"\"\"\n samples = compute_vis(Z, F)\n chisq = np.sum(np.abs((samples - vis) / sigma) ** 2) / (2 * len(vis))\n return chisq\n\n\ndef compute_amp(X, F):\n \"\"\" Given an image X and DFT matrix F, compute and return its \n visibility amplitude. \"\"\"\n amp = np.abs(np.dot(F, X))\n return amp\n\n\ndef compute_amp_grad(amp, Z, A, sigma):\n \"\"\" \n Compute gradient of visibility amplitude.\n \"\"\"\n i1 = np.dot(A, Z)\n amp_samples = np.abs(i1)\n pp = (amp - amp_samples) * amp_samples / sigma ** 2 / i1\n out = -2.0 / len(amp) * np.real(np.dot(pp, A))\n return out\n\n\ndef chisq_amp(amp, Z, F, sigma):\n \"\"\" Compute and return chi-squared of amplitude between X and Z. \"\"\"\n amp_Z = compute_amp(Z, F)\n chisq = np.sum(np.abs((amp - amp_Z) / sigma) ** 2) / len(amp)\n return chisq\n\n\ndef compute_cphase(X, F_cphase):\n \"\"\" Given an image X and the DFT matrices from three baselines,\n compute and return its closure phase. \"\"\"\n A1 = F_cphase[:, :, 0]\n A2 = F_cphase[:, :, 1]\n A3 = F_cphase[:, :, 2]\n X = np.array(X)\n vis1 = np.matmul(X.reshape((1, -1)), np.transpose(A1)).astype(np.complex64)\n vis2 = np.matmul(X.reshape((1, -1)), np.transpose(A2)).astype(np.complex64)\n vis3 = np.matmul(X.reshape((1, -1)), np.transpose(A3)).astype(np.complex64)\n cphase = np.angle(vis1 * vis2 * vis3)\n return cphase\n\n\ndef compute_cphase_grad(cphase, Z, F_cphase, sigma, npix):\n \"\"\" \n Compute gradient of closure phase chi-squared\n \n cphase : closure phase of true image \n Z : predicted image vector\n F_cphase : 3 DFT matrices from three baselines in a closure triangle\n \"\"\"\n A1 = F_cphase[:, :, 0]\n A2 = F_cphase[:, :, 1]\n A3 = F_cphase[:, :, 2]\n i1 = np.matmul(Z.reshape((1, -1)), np.transpose(A1)).astype(np.complex64)\n i2 = np.matmul(Z.reshape((1, -1)), np.transpose(A2)).astype(np.complex64)\n i3 = np.matmul(Z.reshape((1, -1)), np.transpose(A3)).astype(np.complex64)\n cphase_samples = np.angle(i1 * i2 * i3)\n pref = np.sin(cphase - cphase_samples) / sigma ** 2\n pt1 = pref / i1\n pt2 = pref / i2\n pt3 = pref / i3\n out = -(2.0 / len(cphase)) * np.imag(np.dot(pt1, A1) + np.dot(pt2, A2) +\n np.dot(pt3, A3))\n return out.reshape(npix ** 2)\n\n\ndef chisq_cphase(cphase, Z, F_cphase, sigma_cphase):\n \"\"\"Closure Phase reduced chi-squared loss.\"\"\"\n cphase_samples = compute_cphase(Z, F_cphase)\n chisq = 2.0 / len(cphase) * np.sum((1.0 - np.cos(cphase -\n cphase_samples)) / sigma_cphase ** 2)\n return chisq\n\n\ndef compute_camp(X, Amatrices):\n \"\"\"\n Compute closure amplitude of image vector X.\n \"\"\"\n i1 = np.dot(Amatrices[0], X)\n i2 = np.dot(Amatrices[1], X)\n i3 = np.dot(Amatrices[2], X)\n i4 = np.dot(Amatrices[3], X)\n camp = np.abs(i1 * i2 / (i3 * i4))\n return camp\n\n\ndef compute_camp_grad(camp, Z, Amatrices, sigma):\n \"\"\"\n The gradient of the closure amplitude chi-squared\n \n camp: Closure amplitudes of true image\n Z: Predicted image vector\n Amatrices: DFT matrices of four baselines\n \"\"\"\n i1 = np.dot(Amatrices[0], Z)\n i2 = np.dot(Amatrices[1], Z)\n i3 = np.dot(Amatrices[2], Z)\n i4 = np.dot(Amatrices[3], Z)\n camp_samples = np.abs(i1 * i2 / (i3 * i4))\n pp = (camp - camp_samples) * camp_samples / sigma ** 2\n pt1 = pp / i1\n pt2 = pp / i2\n pt3 = -pp / i3\n pt4 = -pp / i4\n out = np.dot(pt1, Amatrices[0]) + np.dot(pt2, Amatrices[1]) + np.dot(pt3,\n Amatrices[2]) + np.dot(pt4, Amatrices[3])\n return -2.0 / len(camp) * np.real(out)\n\n\ndef chisq_camp(camp, Z, Amatrices, sigma):\n \"\"\"Closure Amplitudes reduced chi-squared loss.\"\"\"\n i1 = np.dot(Amatrices[0], Z)\n i2 = np.dot(Amatrices[1], Z)\n i3 = np.dot(Amatrices[2], Z)\n i4 = np.dot(Amatrices[3], Z)\n camp_samples = np.abs(i1 * i2 / (i3 * i4))\n chisq = np.sum(np.abs((camp - camp_samples) / sigma) ** 2) / len(camp)\n return chisq\n\n\ndef compute_lgcamp(X, Amatrices):\n \"\"\" Compute log closure amplitude of image vector X \"\"\"\n a1 = np.abs(np.dot(Amatrices[0], X))\n a2 = np.abs(np.dot(Amatrices[1], X))\n a3 = np.abs(np.dot(Amatrices[2], X))\n a4 = np.abs(np.dot(Amatrices[3], X))\n lgcamp = np.log(a1) + np.log(a2) - np.log(a3) - np.log(a4)\n return lgcamp\n\n\ndef compute_lgcamp_grad(lgcamp, Z, Amatrices, sigma):\n \"\"\"The gradient of the Log closure amplitude chi-squared\"\"\"\n i1 = np.dot(Amatrices[0], Z)\n i2 = np.dot(Amatrices[1], Z)\n i3 = np.dot(Amatrices[2], Z)\n i4 = np.dot(Amatrices[3], Z)\n lgcamp_samples = np.log(np.abs(i1)) + np.log(np.abs(i2)) - np.log(np.\n abs(i3)) - np.log(np.abs(i4))\n pp = (lgcamp - lgcamp_samples) / sigma ** 2\n pt1 = pp / i1\n pt2 = pp / i2\n pt3 = -pp / i3\n pt4 = -pp / i4\n out = np.dot(pt1, Amatrices[0]) + np.dot(pt2, Amatrices[1]) + np.dot(pt3,\n Amatrices[2]) + np.dot(pt4, Amatrices[3])\n return -2.0 / len(lgcamp) * np.real(out)\n\n\ndef chisq_lgcamp(lgcamp, X, Amatrices, sigma):\n \"\"\"Log Closure Amplitudes reduced chi-squared\"\"\"\n a1 = np.abs(np.dot(Amatrices[0], X))\n a2 = np.abs(np.dot(Amatrices[1], X))\n a3 = np.abs(np.dot(Amatrices[2], X))\n a4 = np.abs(np.dot(Amatrices[3], X))\n samples = np.log(a1) + np.log(a2) - np.log(a3) - np.log(a4)\n chisq = np.sum(np.abs((lgcamp - samples) / sigma) ** 2) / len(lgcamp)\n return chisq\n", "step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jun 14 20:35:10 2020\n\n@author: Johanna\n\"\"\"\nimport numpy as np\n\n###############################################################################\n# Complex Visibility Functions\n###############################################################################\n\ndef compute_vis(X, F):\n vis = np.matmul(X, np.transpose(F)).astype(np.complex64)\n return vis\n\ndef compute_vis_grad(vis, Z, F):\n Z_vis = compute_vis(Z, F)\n grad = -np.matmul(np.conjugate(F.T), vis - Z_vis)\n return grad.real\n\ndef chisq_vis(vis, Z, F, sigma):\n ''' \n Compute mean chi-squared of visibilities of Z.\n '''\n samples = compute_vis(Z, F)\n chisq = np.sum(np.abs((samples-vis)/sigma)**2)/(2*len(vis))\n return chisq\n\n###############################################################################\n# Visibility Amplitude Functions\n###############################################################################\n \ndef compute_amp(X, F):\n ''' Given an image X and DFT matrix F, compute and return its \n visibility amplitude. '''\n amp = np.abs(np.dot(F, X))\n return amp\n\ndef compute_amp_grad(amp, Z, A, sigma):\n ''' \n Compute gradient of visibility amplitude.\n '''\n i1 = np.dot(A, Z)\n amp_samples = np.abs(i1)\n\n pp = ((amp - amp_samples) * amp_samples) / (sigma**2) / i1\n out = (-2.0/len(amp)) * np.real(np.dot(pp, A))\n return out\n\ndef chisq_amp(amp, Z, F, sigma):\n ''' Compute and return chi-squared of amplitude between X and Z. '''\n amp_Z = compute_amp(Z, F)\n chisq = np.sum(np.abs((amp - amp_Z)/sigma)**2)/len(amp)\n return chisq \n\n###############################################################################\n# Closure Phase Functions\n###############################################################################\n\ndef compute_cphase(X, F_cphase):\n ''' Given an image X and the DFT matrices from three baselines,\n compute and return its closure phase. '''\n # Get fourier matrices of each baseline \n A1 = F_cphase[:, :, 0]\n A2 = F_cphase[:, :, 1]\n A3 = F_cphase[:, :, 2]\n \n X = np.array(X)\n \n # Compute observed closure phase of image\n vis1 = np.matmul(X.reshape((1,-1)), np.transpose(A1)).astype(np.complex64)\n vis2 = np.matmul(X.reshape((1,-1)), np.transpose(A2)).astype(np.complex64)\n vis3 = np.matmul(X.reshape((1,-1)), np.transpose(A3)).astype(np.complex64)\n \n cphase = np.angle(vis1 * vis2 * vis3) \n \n return cphase\n\ndef compute_cphase_grad(cphase, Z, F_cphase, sigma, npix):\n ''' \n Compute gradient of closure phase chi-squared\n \n cphase : closure phase of true image \n Z : predicted image vector\n F_cphase : 3 DFT matrices from three baselines in a closure triangle\n '''\n # Get fourier matrices of each baseline \n A1 = F_cphase[:, :, 0]\n A2 = F_cphase[:, :, 1]\n A3 = F_cphase[:, :, 2]\n \n i1 = np.matmul(Z.reshape((1,-1)), np.transpose(A1)).astype(np.complex64)\n i2 = np.matmul(Z.reshape((1,-1)), np.transpose(A2)).astype(np.complex64)\n i3 = np.matmul(Z.reshape((1,-1)), np.transpose(A3)).astype(np.complex64)\n cphase_samples = np.angle(i1 * i2 * i3)\n \n pref = np.sin(cphase - cphase_samples)/(sigma**2)\n pt1 = pref/i1\n pt2 = pref/i2\n pt3 = pref/i3\n out = -(2.0/len(cphase)) * np.imag(np.dot(pt1, A1) + np.dot(pt2, A2) + np.dot(pt3, A3))\n \n return out.reshape(npix**2)\n\ndef chisq_cphase(cphase, Z, F_cphase, sigma_cphase):\n \"\"\"Closure Phase reduced chi-squared loss.\"\"\"\n cphase_samples = compute_cphase(Z, F_cphase)\n chisq= (2.0/len(cphase)) * np.sum((1.0 - np.cos(cphase-cphase_samples))/(sigma_cphase**2))\n return chisq \n \n###############################################################################\n# Closure Amplitude Functions\n###############################################################################\n \ndef compute_camp(X, Amatrices):\n '''\n Compute closure amplitude of image vector X.\n '''\n i1 = np.dot(Amatrices[0], X)\n i2 = np.dot(Amatrices[1], X)\n i3 = np.dot(Amatrices[2], X)\n i4 = np.dot(Amatrices[3], X)\n \n camp = np.abs((i1 * i2)/(i3 * i4))\n return camp\n\ndef compute_camp_grad(camp, Z, Amatrices, sigma):\n \"\"\"\n The gradient of the closure amplitude chi-squared\n \n camp: Closure amplitudes of true image\n Z: Predicted image vector\n Amatrices: DFT matrices of four baselines\n \"\"\"\n i1 = np.dot(Amatrices[0], Z)\n i2 = np.dot(Amatrices[1], Z)\n i3 = np.dot(Amatrices[2], Z)\n i4 = np.dot(Amatrices[3], Z)\n camp_samples = np.abs((i1 * i2)/(i3 * i4))\n\n pp = ((camp - camp_samples) * camp_samples)/(sigma**2)\n pt1 = pp/i1\n pt2 = pp/i2\n pt3 = -pp/i3\n pt4 = -pp/i4\n out = (np.dot(pt1, Amatrices[0]) +\n np.dot(pt2, Amatrices[1]) +\n np.dot(pt3, Amatrices[2]) +\n np.dot(pt4, Amatrices[3]))\n\n return (-2.0/len(camp)) * np.real(out)\n \ndef chisq_camp(camp, Z, Amatrices, sigma):\n \"\"\"Closure Amplitudes reduced chi-squared loss.\"\"\"\n\n i1 = np.dot(Amatrices[0], Z)\n i2 = np.dot(Amatrices[1], Z)\n i3 = np.dot(Amatrices[2], Z)\n i4 = np.dot(Amatrices[3], Z)\n camp_samples = np.abs((i1 * i2)/(i3 * i4))\n\n chisq = np.sum(np.abs((camp - camp_samples)/sigma)**2)/len(camp)\n return chisq \n\n \n###############################################################################\n# Log Closure Amplitude Functions\n###############################################################################\n \ndef compute_lgcamp(X, Amatrices):\n ''' Compute log closure amplitude of image vector X '''\n a1 = np.abs(np.dot(Amatrices[0], X))\n a2 = np.abs(np.dot(Amatrices[1], X))\n a3 = np.abs(np.dot(Amatrices[2], X))\n a4 = np.abs(np.dot(Amatrices[3], X))\n \n lgcamp = np.log(a1) + np.log(a2) - np.log(a3) - np.log(a4)\n return lgcamp\n\ndef compute_lgcamp_grad(lgcamp, Z, Amatrices, sigma):\n \"\"\"The gradient of the Log closure amplitude chi-squared\"\"\"\n\n i1 = np.dot(Amatrices[0], Z)\n i2 = np.dot(Amatrices[1], Z)\n i3 = np.dot(Amatrices[2], Z)\n i4 = np.dot(Amatrices[3], Z)\n lgcamp_samples = (np.log(np.abs(i1)) +\n np.log(np.abs(i2)) - \n np.log(np.abs(i3)) -\n np.log(np.abs(i4)))\n\n pp = (lgcamp - lgcamp_samples) / (sigma**2)\n pt1 = pp / i1\n pt2 = pp / i2\n pt3 = -pp / i3\n pt4 = -pp / i4\n out = (np.dot(pt1, Amatrices[0]) +\n np.dot(pt2, Amatrices[1]) +\n np.dot(pt3, Amatrices[2]) +\n np.dot(pt4, Amatrices[3]))\n\n return (-2.0/len(lgcamp)) * np.real(out)\n\ndef chisq_lgcamp(lgcamp, X, Amatrices, sigma):\n \"\"\"Log Closure Amplitudes reduced chi-squared\"\"\"\n\n a1 = np.abs(np.dot(Amatrices[0], X))\n a2 = np.abs(np.dot(Amatrices[1], X))\n a3 = np.abs(np.dot(Amatrices[2], X))\n a4 = np.abs(np.dot(Amatrices[3], X))\n\n samples = np.log(a1) + np.log(a2) - np.log(a3) - np.log(a4)\n chisq = np.sum(np.abs((lgcamp - samples)/sigma)**2) / (len(lgcamp))\n return chisq \n\n\n\n\n\n\n\n\n\n\n\n\n", "step-ids": [ 11, 13, 15, 16, 17 ] }
[ 11, 13, 15, 16, 17 ]
#!/usr/bin/env python3 def main(): A1, A2, A3 = map(int, input().split()) A=A1+A2+A3 if A >=22: ans='bust' else: ans='win' print(ans) if __name__ == "__main__": main()
normal
{ "blob_id": "753e062940e0580d7d33c88c1165977142dcd202", "index": 8060, "step-1": "<mask token>\n", "step-2": "def main():\n A1, A2, A3 = map(int, input().split())\n A = A1 + A2 + A3\n if A >= 22:\n ans = 'bust'\n else:\n ans = 'win'\n print(ans)\n\n\n<mask token>\n", "step-3": "def main():\n A1, A2, A3 = map(int, input().split())\n A = A1 + A2 + A3\n if A >= 22:\n ans = 'bust'\n else:\n ans = 'win'\n print(ans)\n\n\nif __name__ == '__main__':\n main()\n", "step-4": "#!/usr/bin/env python3\n\ndef main():\n A1, A2, A3 = map(int, input().split())\n A=A1+A2+A3\n if A >=22:\n ans='bust'\n else:\n ans='win'\n print(ans)\n \nif __name__ == \"__main__\":\n main()\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
def max_product(n): lst, lstnums, res, num = [], [], [], 1 for i in range(0, n+1): lstnums.append(i) for j in str(i): num *= int(j) lst.append(num) num = 1 ​ maxlst = max(lst) for i in range(len(lst)): if lst[i] == maxlst: res.append(lstnums[i]) ​ return res
normal
{ "blob_id": "c804391cc199a242d1b54ece8487ef74065a40ad", "index": 840, "step-1": "\ndef max_product(n):\n lst, lstnums, res, num = [], [], [], 1\n for i in range(0, n+1):\n lstnums.append(i)\n for j in str(i):\n num *= int(j)\n lst.append(num)\n num = 1\n​\n maxlst = max(lst)\n for i in range(len(lst)):\n if lst[i] == maxlst:\n res.append(lstnums[i])\n​\n return res\n\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
import numpy as np import sys import os import os.path import json import optparse import time import pandas as pd #Randomize and split the inference set according to hor_pred #Generate .npy file for each hp selected #Coge valores aleatorios de la columna de etiquetas en función del horizonte de predicción. #Coge los índices de las muestras seleccionadas y los usa para seleccionar las imágenes que ##tienen asociadas. #Tenemos que tener pandas para la seleccion primera de las etiquetas, luego solo generamos un ##.npy con ese hor_pred y con la cantidad que queramos en función del valor del split ####PARSEAR CON EL JSON ################### # PARSE CONNFIG ##### ################## def addOptions(parser): parser.add_option("--NNfile", default="", help="Config json file for the data to pass to the model") parser = optparse.OptionParser() addOptions(parser) (options, args) = parser.parse_args() if not options.NNfile: print(sys.stderr, "No configuration file specified\n") sys.exit(1) with open(options.NNfile, 'r') as cfg_file: cfg_data = json.load(cfg_file) days_info_file = cfg_data['days_info'] days_info = pd.read_csv(days_info_file) day_length = days_info['length_day'][0] days = days_info['number_train_days'][0] tg = cfg_data['time_granularity'] hor_pred = cfg_data['hor_pred'] forecast_prediction = [] cut_1 = cfg_data['cut'] img_rows = cfg_data['img_rows'] img_cols = cfg_data['img_cols'] orig_folder = cfg_data['orig_folder'] dest_folder = cfg_data['dest_folder'] ################## # DATA LOAD ###### ################### print('Loading images...\n') load_start = time.time() x_original = np.load("x_train.npy") print(x_original.shape) print(len(x_original)) print('Loading tags...\n') y_original = pd.read_csv(orig_folder + '/Y_tr_val.csv') load_end = time.time() load_time = load_end - load_start load_min = int(load_time / 60) load_sec = load_time % 60 print('Dataframes loaded in {} minutes {} seconds! Splitting for train and validation...\n'.format(load_min, load_sec)) ################# # RANDOMIZATION## ################# # Since we configured our matrices with an offset we have to adjust to "jump" to the sample we want to actually predict for hp in hor_pred: if hp.endswith("min"): hor_pred_indices = int(int(hp.replace('min', '')) * 60 / tg) if hp.endswith("s"): hor_pred_indices = int(int(hp.replace('s', '')) / tg) forecast_prediction.append(hp) y_t = y_original # y_train y son iquals y_t_index = y_t.index # devulve una array de index # Don't get values for the previous or next day: y_t_index_valid = y_t_index[(y_t_index % day_length) < (day_length - hor_pred_indices)] y_t_indices_lost = len(y_t_index) - len(y_t_index_valid) print('Indices computed. {} indices lost \n.'.format(y_t_indices_lost)) print('Building randomized y matrix with valid indices...\n') y_t = np.ravel(y_original.iloc[y_t_index_valid + hor_pred_indices]) print('Building y matrix removing invalid indices for persistence model...\n') y_pred_persistence = np.ravel(y_original.iloc[y_t_index_valid]) # una row de dataFram combia por numpy array print('Building X matrix...Same thing as before...\n') # like our randomization, just picking the same indices x_t = x_original[y_t_index_valid] x_t = x_t.reshape(x_t.shape[0], img_rows, img_cols, 1) #Split: cut = int(cut_1*len(x_t)) x_train, x_test = x_t[:cut,:], x_t[cut:,:] y_train, y_test = y_t[:cut], y_t[cut:] #print(x_train.shape, x_test.shape) #print(y_train.shape, y_test.shape) #Etiquetas (valores reales que debería predecir con cada muestra) name = "set_hp_" + str(hp) + "_" + str (cut_1) + "total" + ".npy" name2 = "tags_hp_" + str(hp) + "_" + str (cut_1) + "total" + ".npy" #Para cada horizonte de predicción genero un array para inferencia np.save(name, x_train) np.save(name2, y_train) print('Generated {} images array \n.'.format(x_train.shape))
normal
{ "blob_id": "83a92c0b645b9a2a483a01c19a47ab5c296ccbd9", "index": 6907, "step-1": "<mask token>\n\n\ndef addOptions(parser):\n parser.add_option('--NNfile', default='', help=\n 'Config json file for the data to pass to the model')\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef addOptions(parser):\n parser.add_option('--NNfile', default='', help=\n 'Config json file for the data to pass to the model')\n\n\n<mask token>\naddOptions(parser)\n<mask token>\nif not options.NNfile:\n print(sys.stderr, 'No configuration file specified\\n')\n sys.exit(1)\nwith open(options.NNfile, 'r') as cfg_file:\n cfg_data = json.load(cfg_file)\n<mask token>\nprint('Loading images...\\n')\n<mask token>\nprint(x_original.shape)\nprint(len(x_original))\nprint('Loading tags...\\n')\n<mask token>\nprint(\n 'Dataframes loaded in {} minutes {} seconds! Splitting for train and validation...\\n'\n .format(load_min, load_sec))\nfor hp in hor_pred:\n if hp.endswith('min'):\n hor_pred_indices = int(int(hp.replace('min', '')) * 60 / tg)\n if hp.endswith('s'):\n hor_pred_indices = int(int(hp.replace('s', '')) / tg)\n forecast_prediction.append(hp)\n y_t = y_original\n y_t_index = y_t.index\n y_t_index_valid = y_t_index[y_t_index % day_length < day_length -\n hor_pred_indices]\n y_t_indices_lost = len(y_t_index) - len(y_t_index_valid)\n print('Indices computed. {} indices lost \\n.'.format(y_t_indices_lost))\n print('Building randomized y matrix with valid indices...\\n')\n y_t = np.ravel(y_original.iloc[y_t_index_valid + hor_pred_indices])\n print(\n 'Building y matrix removing invalid indices for persistence model...\\n'\n )\n y_pred_persistence = np.ravel(y_original.iloc[y_t_index_valid])\n print('Building X matrix...Same thing as before...\\n')\n x_t = x_original[y_t_index_valid]\n x_t = x_t.reshape(x_t.shape[0], img_rows, img_cols, 1)\n cut = int(cut_1 * len(x_t))\n x_train, x_test = x_t[:cut, :], x_t[cut:, :]\n y_train, y_test = y_t[:cut], y_t[cut:]\n name = 'set_hp_' + str(hp) + '_' + str(cut_1) + 'total' + '.npy'\n name2 = 'tags_hp_' + str(hp) + '_' + str(cut_1) + 'total' + '.npy'\n np.save(name, x_train)\n np.save(name2, y_train)\n print('Generated {} images array \\n.'.format(x_train.shape))\n", "step-3": "<mask token>\n\n\ndef addOptions(parser):\n parser.add_option('--NNfile', default='', help=\n 'Config json file for the data to pass to the model')\n\n\nparser = optparse.OptionParser()\naddOptions(parser)\noptions, args = parser.parse_args()\nif not options.NNfile:\n print(sys.stderr, 'No configuration file specified\\n')\n sys.exit(1)\nwith open(options.NNfile, 'r') as cfg_file:\n cfg_data = json.load(cfg_file)\ndays_info_file = cfg_data['days_info']\ndays_info = pd.read_csv(days_info_file)\nday_length = days_info['length_day'][0]\ndays = days_info['number_train_days'][0]\ntg = cfg_data['time_granularity']\nhor_pred = cfg_data['hor_pred']\nforecast_prediction = []\ncut_1 = cfg_data['cut']\nimg_rows = cfg_data['img_rows']\nimg_cols = cfg_data['img_cols']\norig_folder = cfg_data['orig_folder']\ndest_folder = cfg_data['dest_folder']\nprint('Loading images...\\n')\nload_start = time.time()\nx_original = np.load('x_train.npy')\nprint(x_original.shape)\nprint(len(x_original))\nprint('Loading tags...\\n')\ny_original = pd.read_csv(orig_folder + '/Y_tr_val.csv')\nload_end = time.time()\nload_time = load_end - load_start\nload_min = int(load_time / 60)\nload_sec = load_time % 60\nprint(\n 'Dataframes loaded in {} minutes {} seconds! Splitting for train and validation...\\n'\n .format(load_min, load_sec))\nfor hp in hor_pred:\n if hp.endswith('min'):\n hor_pred_indices = int(int(hp.replace('min', '')) * 60 / tg)\n if hp.endswith('s'):\n hor_pred_indices = int(int(hp.replace('s', '')) / tg)\n forecast_prediction.append(hp)\n y_t = y_original\n y_t_index = y_t.index\n y_t_index_valid = y_t_index[y_t_index % day_length < day_length -\n hor_pred_indices]\n y_t_indices_lost = len(y_t_index) - len(y_t_index_valid)\n print('Indices computed. {} indices lost \\n.'.format(y_t_indices_lost))\n print('Building randomized y matrix with valid indices...\\n')\n y_t = np.ravel(y_original.iloc[y_t_index_valid + hor_pred_indices])\n print(\n 'Building y matrix removing invalid indices for persistence model...\\n'\n )\n y_pred_persistence = np.ravel(y_original.iloc[y_t_index_valid])\n print('Building X matrix...Same thing as before...\\n')\n x_t = x_original[y_t_index_valid]\n x_t = x_t.reshape(x_t.shape[0], img_rows, img_cols, 1)\n cut = int(cut_1 * len(x_t))\n x_train, x_test = x_t[:cut, :], x_t[cut:, :]\n y_train, y_test = y_t[:cut], y_t[cut:]\n name = 'set_hp_' + str(hp) + '_' + str(cut_1) + 'total' + '.npy'\n name2 = 'tags_hp_' + str(hp) + '_' + str(cut_1) + 'total' + '.npy'\n np.save(name, x_train)\n np.save(name2, y_train)\n print('Generated {} images array \\n.'.format(x_train.shape))\n", "step-4": "import numpy as np\nimport sys\nimport os\nimport os.path\nimport json\nimport optparse\nimport time\nimport pandas as pd\n\n\ndef addOptions(parser):\n parser.add_option('--NNfile', default='', help=\n 'Config json file for the data to pass to the model')\n\n\nparser = optparse.OptionParser()\naddOptions(parser)\noptions, args = parser.parse_args()\nif not options.NNfile:\n print(sys.stderr, 'No configuration file specified\\n')\n sys.exit(1)\nwith open(options.NNfile, 'r') as cfg_file:\n cfg_data = json.load(cfg_file)\ndays_info_file = cfg_data['days_info']\ndays_info = pd.read_csv(days_info_file)\nday_length = days_info['length_day'][0]\ndays = days_info['number_train_days'][0]\ntg = cfg_data['time_granularity']\nhor_pred = cfg_data['hor_pred']\nforecast_prediction = []\ncut_1 = cfg_data['cut']\nimg_rows = cfg_data['img_rows']\nimg_cols = cfg_data['img_cols']\norig_folder = cfg_data['orig_folder']\ndest_folder = cfg_data['dest_folder']\nprint('Loading images...\\n')\nload_start = time.time()\nx_original = np.load('x_train.npy')\nprint(x_original.shape)\nprint(len(x_original))\nprint('Loading tags...\\n')\ny_original = pd.read_csv(orig_folder + '/Y_tr_val.csv')\nload_end = time.time()\nload_time = load_end - load_start\nload_min = int(load_time / 60)\nload_sec = load_time % 60\nprint(\n 'Dataframes loaded in {} minutes {} seconds! Splitting for train and validation...\\n'\n .format(load_min, load_sec))\nfor hp in hor_pred:\n if hp.endswith('min'):\n hor_pred_indices = int(int(hp.replace('min', '')) * 60 / tg)\n if hp.endswith('s'):\n hor_pred_indices = int(int(hp.replace('s', '')) / tg)\n forecast_prediction.append(hp)\n y_t = y_original\n y_t_index = y_t.index\n y_t_index_valid = y_t_index[y_t_index % day_length < day_length -\n hor_pred_indices]\n y_t_indices_lost = len(y_t_index) - len(y_t_index_valid)\n print('Indices computed. {} indices lost \\n.'.format(y_t_indices_lost))\n print('Building randomized y matrix with valid indices...\\n')\n y_t = np.ravel(y_original.iloc[y_t_index_valid + hor_pred_indices])\n print(\n 'Building y matrix removing invalid indices for persistence model...\\n'\n )\n y_pred_persistence = np.ravel(y_original.iloc[y_t_index_valid])\n print('Building X matrix...Same thing as before...\\n')\n x_t = x_original[y_t_index_valid]\n x_t = x_t.reshape(x_t.shape[0], img_rows, img_cols, 1)\n cut = int(cut_1 * len(x_t))\n x_train, x_test = x_t[:cut, :], x_t[cut:, :]\n y_train, y_test = y_t[:cut], y_t[cut:]\n name = 'set_hp_' + str(hp) + '_' + str(cut_1) + 'total' + '.npy'\n name2 = 'tags_hp_' + str(hp) + '_' + str(cut_1) + 'total' + '.npy'\n np.save(name, x_train)\n np.save(name2, y_train)\n print('Generated {} images array \\n.'.format(x_train.shape))\n", "step-5": "import numpy as np\nimport sys\nimport os\nimport os.path\nimport json\nimport optparse\nimport time\nimport pandas as pd\n\n #Randomize and split the inference set according to hor_pred\n #Generate .npy file for each hp selected\n\n #Coge valores aleatorios de la columna de etiquetas en función del horizonte de predicción. \n #Coge los índices de las muestras seleccionadas y los usa para seleccionar las imágenes que \n ##tienen asociadas.\n #Tenemos que tener pandas para la seleccion primera de las etiquetas, luego solo generamos un \n ##.npy con ese hor_pred y con la cantidad que queramos en función del valor del split\n ####PARSEAR CON EL JSON\n\n\n###################\n# PARSE CONNFIG #####\n##################\ndef addOptions(parser):\n parser.add_option(\"--NNfile\", default=\"\",\n help=\"Config json file for the data to pass to the model\")\nparser = optparse.OptionParser()\naddOptions(parser)\n(options, args) = parser.parse_args()\nif not options.NNfile:\n print(sys.stderr, \"No configuration file specified\\n\")\n sys.exit(1)\n\nwith open(options.NNfile, 'r') as cfg_file:\n cfg_data = json.load(cfg_file)\ndays_info_file = cfg_data['days_info']\ndays_info = pd.read_csv(days_info_file)\nday_length = days_info['length_day'][0]\ndays = days_info['number_train_days'][0]\ntg = cfg_data['time_granularity']\nhor_pred = cfg_data['hor_pred']\nforecast_prediction = []\ncut_1 = cfg_data['cut']\nimg_rows = cfg_data['img_rows']\nimg_cols = cfg_data['img_cols']\norig_folder = cfg_data['orig_folder']\ndest_folder = cfg_data['dest_folder']\n\n\n##################\n# DATA LOAD ######\n###################\nprint('Loading images...\\n')\nload_start = time.time()\nx_original = np.load(\"x_train.npy\")\nprint(x_original.shape)\nprint(len(x_original))\nprint('Loading tags...\\n')\ny_original = pd.read_csv(orig_folder + '/Y_tr_val.csv')\nload_end = time.time()\nload_time = load_end - load_start\nload_min = int(load_time / 60)\nload_sec = load_time % 60\nprint('Dataframes loaded in {} minutes {} seconds! Splitting for train and validation...\\n'.format(load_min, load_sec))\n\n#################\n# RANDOMIZATION##\n#################\n# Since we configured our matrices with an offset we have to adjust to \"jump\" to the sample we want to actually predict\n\nfor hp in hor_pred:\n if hp.endswith(\"min\"):\n hor_pred_indices = int(int(hp.replace('min', '')) * 60 / tg)\n if hp.endswith(\"s\"):\n hor_pred_indices = int(int(hp.replace('s', '')) / tg)\n forecast_prediction.append(hp)\n \n y_t = y_original # y_train y son iquals\n y_t_index = y_t.index # devulve una array de index\n # Don't get values for the previous or next day:\n y_t_index_valid = y_t_index[(y_t_index % day_length) < (day_length - hor_pred_indices)] \n y_t_indices_lost = len(y_t_index) - len(y_t_index_valid)\n print('Indices computed. {} indices lost \\n.'.format(y_t_indices_lost))\n print('Building randomized y matrix with valid indices...\\n')\n y_t = np.ravel(y_original.iloc[y_t_index_valid + hor_pred_indices])\n print('Building y matrix removing invalid indices for persistence model...\\n')\n y_pred_persistence = np.ravel(y_original.iloc[y_t_index_valid]) # una row de dataFram combia por numpy array\n print('Building X matrix...Same thing as before...\\n')\n # like our randomization, just picking the same indices\n x_t = x_original[y_t_index_valid] \n x_t = x_t.reshape(x_t.shape[0], img_rows, img_cols, 1)\n \n #Split: \n cut = int(cut_1*len(x_t))\n x_train, x_test = x_t[:cut,:], x_t[cut:,:]\n y_train, y_test = y_t[:cut], y_t[cut:]\n #print(x_train.shape, x_test.shape) \n #print(y_train.shape, y_test.shape) #Etiquetas (valores reales que debería predecir con cada muestra)\n \n name = \"set_hp_\" + str(hp) + \"_\" + str (cut_1) + \"total\" + \".npy\"\n name2 = \"tags_hp_\" + str(hp) + \"_\" + str (cut_1) + \"total\" + \".npy\"\n\n #Para cada horizonte de predicción genero un array para inferencia\n np.save(name, x_train)\n np.save(name2, y_train)\n\n print('Generated {} images array \\n.'.format(x_train.shape))\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
#-*- coding:utf-8 -*- from xml.etree import ElementTree from xml.etree.ElementTree import Element _exception = None import os class xmlSp: def addNode(self,parentNode,childNode): parentNode.append(childNode) def createChildNode(self,key,value,propertyMap={}): element = Element(key,propertyMap) element.text = value return element def fetchXmlNodeTree(self,xmlPathOrXmlStr):#Load xml has 2 ways.First:load xml string.Second:load xml file. if(xmlPathOrXmlStr == ""): return None elif(os.path.isfile(xmlPathOrXmlStr)):#is xmlPath return ElementTree.parse(xmlPathOrXmlStr) else:#is xmlStr return ElementTree.fromstring(xmlPathOrXmlStr) def fetchSingleNode(self,nodeTree,xpathOrKey):#If the node that is same name is more,return first node. if xpathOrKey == None or xpathOrKey == "": return None elif len(xpathOrKey.split('/')) > 1:#is xpath return nodeTree.find(xpathOrKey)#find is faster than findall then return first else:#is key nodeList = nodeTree.getiterator(xpathOrKey) if nodeList == None or len(nodeList) <= 0: return nodeList else: return nodeList[0] def fetchSingleNodeValue(self,nodeTree,xpathOrKey):#If the node that is same name is more,return first node. node = self.fetchSingleNode(nodeTree,xpathOrKey) if node == None or len(node) <= 0 or node == "": return "" else: return node.text def fetchNodeList(self,nodeTree,xpathOrKey): if xpathOrKey == None or xpathOrKey == "": return None elif len(xpathOrKey.split('/')) > 1:#is xpath return nodeTree.findall(xpathOrKey) else:#is key return nodeTree.getiterator(xpathOrKey) def fetchNodeValueList(self,nodeTree,xpathOrKey,key=""):#If xpathOrKey is xpath,key must be not empty.Otherwise return empty set if xpathOrKey == None or xpathOrKey == "": return None else: nodeValueList = [] nodeList = self.fetchNodeList(nodeTree,xpathOrKey) for node in nodeList: if node.tag == xpathOrKey: nodeValueList.append(node.text) return nodeValueList def format(self,sourceXmlPath,destXmlPath,charset='UTF-8'): global _exception _exception = None if os.path.exists(sourceXmlPath): try: fileRead = open(sourceXmlPath,'r',encoding=charset) fileWrite = open(destXmlPath,'w',encoding=charset) lines = fileRead.read() nodeList=[] self.__writeXmlStruct(lines,nodeList,fileWrite) fileRead.close() fileWrite.close() return True except BaseException as error: _exception = error return False else: _exception = BaseException('File not exist!') return False def __writeXmlStruct(self,xmlStr,nodeList,fileWrite): xmlStr=xmlStr.replace('\n','') xmlStruct1=self.__analyNodeFlag(xmlStr) if xmlStruct1!=None: xmlNode1=xmlStruct1[0] xmlRestStr1=xmlStruct1[1] xmlStruct2=self.__analyNodeFlag(xmlRestStr1) xmlNode2=xmlStruct2[0] xmlRestStr2=xmlStruct2[1] xmlInnerTextEnd=xmlRestStr1.find(xmlNode2) xmlInnerText=xmlRestStr1[:xmlInnerTextEnd] isPair=self.__checkNodeFlagIsPair(xmlNode1,xmlNode2) nodeName1=self.__fetchNodeNameFromStr(xmlNode1) nodeName2=self.__fetchNodeNameFromStr(xmlNode2) if not (nodeName1 in nodeList): nodeList.append(nodeName1) if not (nodeName2 in nodeList): nodeList.append(nodeName2) nodeName1Floor=nodeList.index(nodeName1,0) nodeName2Floor=nodeList.index(nodeName2,0) space='' if len(xmlNode1)>0: if isPair: for index in range(nodeName1Floor): xmlNode1=space+xmlNode1 fileWrite.write(xmlNode1+'\n') if len(xmlInnerText)>0: if isPair: for index in range(nodeName1Floor+1): xmlInnerText=space+xmlInnerText fileWrite.write(xmlInnerText+'\n') if len(xmlNode2)>0: for index in range(nodeName2Floor): xmlNode2=space+xmlNode2 fileWrite.write(xmlNode2+'\n') self.__writeXmlStruct(xmlRestStr2,nodeList,fileWrite) def __analyNodeFlag(self,sourceStr): global _exception _exception=None try: nodeBegin = sourceStr.find('<') nodeEnd = str(sourceStr).find('>') if nodeBegin >= 0 and nodeEnd > 0: node =sourceStr[nodeBegin:nodeEnd+1] nodeInnerText=sourceStr[nodeEnd+1:] return [node,nodeInnerText] else: return ["",sourceStr] except BaseException as error: _exception=error return None def __checkNodeFlagIsPair(self,nodeFlag1,nodeFlag2): if len(nodeFlag1)>0 and len(nodeFlag2)>0: nodeFlag1=nodeFlag1[1:(len(nodeFlag1)-2)] nodeFlag2=nodeFlag2[1:(len(nodeFlag2)-2)] nodeFlag1=nodeFlag1.replace('/','') nodeFlag2=nodeFlag2.replace('/','') if nodeFlag1==nodeFlag2: return True return False def __fetchNodeNameFromStr(self,str): str=str[1:(len(str)-1)] nodeName=str.replace('/','') return nodeName def modifyNodeValue(self,node,newValue, isAppend=False): if(node == None): return False else: try: if isAppend: node.text += newValue else: node.text = newValue return True except: return False def writeXml(self,nodeTree, outPath,charset="utf-8"): global _exception _exception=None try: nodeTree.write(outPath, encoding=charset) return True except BaseException as error: _exception=error return False #import os #if __name__ == '__main__': # myxml = xmlSp() # formatResult = myxml.format("1.txt","2.txt") # if not formatResult: # print(_exception) # else: # os.remove("1.txt") # os.rename('2.txt','1.txt') ## xmlPath= "..\\article\\articleList.xml"; ## nodeTree = myxml.fetchXmlNodeTree(xmlPath) ## #nodeTree= ## #myxml.fetchXmlNodeTree("<artilceList><article><id>aaaa</id></article></artilceList>") ## #node=myxml.fetchSingleNode(nodeTree,'article/id') ## #if len(node)<=0: ## # print("empty") ## #print(node) ## #nodeList = myxml.fetchNodeList(nodeTree,'id') ## #myxml.modifyNodeValue(nodeList[0],'bbbb') ## #myxml.writeXml(nodeTree,xmlPath) ## #rootNode=myxml.fetchSingleNode(nodeTree,'articleList') ## #idNode=myxml.createChildNode('id','aaabbbb') ## #nameNode=myxml.createChildNode('name','aaabbbb') ## #parentNode=myxml.createChildNode('article','') ## #myxml.addNode(parentNode,idNode) ## #myxml.addNode(parentNode,nameNode) ## #myxml.addNode(rootNode,parentNode) ## #myxml.writeXml(nodeTree,'aaa.xml') ## #for node in nodeList: ## # print("node:%s" %node) ## #nodeValueSet=fetchNodeValueSet(nodeTree,'article/id') ## #for nodeValue in nodeValueSet: ## # print ("nodeValue:%s" %nodeValue) #import os #os.system("PAUSE")
normal
{ "blob_id": "0470f98247f8f835c0c052b01ddd7f1f7a515ab5", "index": 5509, "step-1": "<mask token>\n\n\nclass xmlSp:\n\n def addNode(self, parentNode, childNode):\n parentNode.append(childNode)\n\n def createChildNode(self, key, value, propertyMap={}):\n element = Element(key, propertyMap)\n element.text = value\n return element\n <mask token>\n\n def fetchSingleNode(self, nodeTree, xpathOrKey):\n if xpathOrKey == None or xpathOrKey == '':\n return None\n elif len(xpathOrKey.split('/')) > 1:\n return nodeTree.find(xpathOrKey)\n else:\n nodeList = nodeTree.getiterator(xpathOrKey)\n if nodeList == None or len(nodeList) <= 0:\n return nodeList\n else:\n return nodeList[0]\n\n def fetchSingleNodeValue(self, nodeTree, xpathOrKey):\n node = self.fetchSingleNode(nodeTree, xpathOrKey)\n if node == None or len(node) <= 0 or node == '':\n return ''\n else:\n return node.text\n\n def fetchNodeList(self, nodeTree, xpathOrKey):\n if xpathOrKey == None or xpathOrKey == '':\n return None\n elif len(xpathOrKey.split('/')) > 1:\n return nodeTree.findall(xpathOrKey)\n else:\n return nodeTree.getiterator(xpathOrKey)\n\n def fetchNodeValueList(self, nodeTree, xpathOrKey, key=''):\n if xpathOrKey == None or xpathOrKey == '':\n return None\n else:\n nodeValueList = []\n nodeList = self.fetchNodeList(nodeTree, xpathOrKey)\n for node in nodeList:\n if node.tag == xpathOrKey:\n nodeValueList.append(node.text)\n return nodeValueList\n\n def format(self, sourceXmlPath, destXmlPath, charset='UTF-8'):\n global _exception\n _exception = None\n if os.path.exists(sourceXmlPath):\n try:\n fileRead = open(sourceXmlPath, 'r', encoding=charset)\n fileWrite = open(destXmlPath, 'w', encoding=charset)\n lines = fileRead.read()\n nodeList = []\n self.__writeXmlStruct(lines, nodeList, fileWrite)\n fileRead.close()\n fileWrite.close()\n return True\n except BaseException as error:\n _exception = error\n return False\n else:\n _exception = BaseException('File not exist!')\n return False\n <mask token>\n\n def __analyNodeFlag(self, sourceStr):\n global _exception\n _exception = None\n try:\n nodeBegin = sourceStr.find('<')\n nodeEnd = str(sourceStr).find('>')\n if nodeBegin >= 0 and nodeEnd > 0:\n node = sourceStr[nodeBegin:nodeEnd + 1]\n nodeInnerText = sourceStr[nodeEnd + 1:]\n return [node, nodeInnerText]\n else:\n return ['', sourceStr]\n except BaseException as error:\n _exception = error\n return None\n\n def __checkNodeFlagIsPair(self, nodeFlag1, nodeFlag2):\n if len(nodeFlag1) > 0 and len(nodeFlag2) > 0:\n nodeFlag1 = nodeFlag1[1:len(nodeFlag1) - 2]\n nodeFlag2 = nodeFlag2[1:len(nodeFlag2) - 2]\n nodeFlag1 = nodeFlag1.replace('/', '')\n nodeFlag2 = nodeFlag2.replace('/', '')\n if nodeFlag1 == nodeFlag2:\n return True\n return False\n <mask token>\n\n def modifyNodeValue(self, node, newValue, isAppend=False):\n if node == None:\n return False\n else:\n try:\n if isAppend:\n node.text += newValue\n else:\n node.text = newValue\n return True\n except:\n return False\n\n def writeXml(self, nodeTree, outPath, charset='utf-8'):\n global _exception\n _exception = None\n try:\n nodeTree.write(outPath, encoding=charset)\n return True\n except BaseException as error:\n _exception = error\n return False\n", "step-2": "<mask token>\n\n\nclass xmlSp:\n\n def addNode(self, parentNode, childNode):\n parentNode.append(childNode)\n\n def createChildNode(self, key, value, propertyMap={}):\n element = Element(key, propertyMap)\n element.text = value\n return element\n <mask token>\n\n def fetchSingleNode(self, nodeTree, xpathOrKey):\n if xpathOrKey == None or xpathOrKey == '':\n return None\n elif len(xpathOrKey.split('/')) > 1:\n return nodeTree.find(xpathOrKey)\n else:\n nodeList = nodeTree.getiterator(xpathOrKey)\n if nodeList == None or len(nodeList) <= 0:\n return nodeList\n else:\n return nodeList[0]\n\n def fetchSingleNodeValue(self, nodeTree, xpathOrKey):\n node = self.fetchSingleNode(nodeTree, xpathOrKey)\n if node == None or len(node) <= 0 or node == '':\n return ''\n else:\n return node.text\n\n def fetchNodeList(self, nodeTree, xpathOrKey):\n if xpathOrKey == None or xpathOrKey == '':\n return None\n elif len(xpathOrKey.split('/')) > 1:\n return nodeTree.findall(xpathOrKey)\n else:\n return nodeTree.getiterator(xpathOrKey)\n\n def fetchNodeValueList(self, nodeTree, xpathOrKey, key=''):\n if xpathOrKey == None or xpathOrKey == '':\n return None\n else:\n nodeValueList = []\n nodeList = self.fetchNodeList(nodeTree, xpathOrKey)\n for node in nodeList:\n if node.tag == xpathOrKey:\n nodeValueList.append(node.text)\n return nodeValueList\n\n def format(self, sourceXmlPath, destXmlPath, charset='UTF-8'):\n global _exception\n _exception = None\n if os.path.exists(sourceXmlPath):\n try:\n fileRead = open(sourceXmlPath, 'r', encoding=charset)\n fileWrite = open(destXmlPath, 'w', encoding=charset)\n lines = fileRead.read()\n nodeList = []\n self.__writeXmlStruct(lines, nodeList, fileWrite)\n fileRead.close()\n fileWrite.close()\n return True\n except BaseException as error:\n _exception = error\n return False\n else:\n _exception = BaseException('File not exist!')\n return False\n <mask token>\n\n def __analyNodeFlag(self, sourceStr):\n global _exception\n _exception = None\n try:\n nodeBegin = sourceStr.find('<')\n nodeEnd = str(sourceStr).find('>')\n if nodeBegin >= 0 and nodeEnd > 0:\n node = sourceStr[nodeBegin:nodeEnd + 1]\n nodeInnerText = sourceStr[nodeEnd + 1:]\n return [node, nodeInnerText]\n else:\n return ['', sourceStr]\n except BaseException as error:\n _exception = error\n return None\n\n def __checkNodeFlagIsPair(self, nodeFlag1, nodeFlag2):\n if len(nodeFlag1) > 0 and len(nodeFlag2) > 0:\n nodeFlag1 = nodeFlag1[1:len(nodeFlag1) - 2]\n nodeFlag2 = nodeFlag2[1:len(nodeFlag2) - 2]\n nodeFlag1 = nodeFlag1.replace('/', '')\n nodeFlag2 = nodeFlag2.replace('/', '')\n if nodeFlag1 == nodeFlag2:\n return True\n return False\n\n def __fetchNodeNameFromStr(self, str):\n str = str[1:len(str) - 1]\n nodeName = str.replace('/', '')\n return nodeName\n\n def modifyNodeValue(self, node, newValue, isAppend=False):\n if node == None:\n return False\n else:\n try:\n if isAppend:\n node.text += newValue\n else:\n node.text = newValue\n return True\n except:\n return False\n\n def writeXml(self, nodeTree, outPath, charset='utf-8'):\n global _exception\n _exception = None\n try:\n nodeTree.write(outPath, encoding=charset)\n return True\n except BaseException as error:\n _exception = error\n return False\n", "step-3": "<mask token>\n\n\nclass xmlSp:\n\n def addNode(self, parentNode, childNode):\n parentNode.append(childNode)\n\n def createChildNode(self, key, value, propertyMap={}):\n element = Element(key, propertyMap)\n element.text = value\n return element\n\n def fetchXmlNodeTree(self, xmlPathOrXmlStr):\n if xmlPathOrXmlStr == '':\n return None\n elif os.path.isfile(xmlPathOrXmlStr):\n return ElementTree.parse(xmlPathOrXmlStr)\n else:\n return ElementTree.fromstring(xmlPathOrXmlStr)\n\n def fetchSingleNode(self, nodeTree, xpathOrKey):\n if xpathOrKey == None or xpathOrKey == '':\n return None\n elif len(xpathOrKey.split('/')) > 1:\n return nodeTree.find(xpathOrKey)\n else:\n nodeList = nodeTree.getiterator(xpathOrKey)\n if nodeList == None or len(nodeList) <= 0:\n return nodeList\n else:\n return nodeList[0]\n\n def fetchSingleNodeValue(self, nodeTree, xpathOrKey):\n node = self.fetchSingleNode(nodeTree, xpathOrKey)\n if node == None or len(node) <= 0 or node == '':\n return ''\n else:\n return node.text\n\n def fetchNodeList(self, nodeTree, xpathOrKey):\n if xpathOrKey == None or xpathOrKey == '':\n return None\n elif len(xpathOrKey.split('/')) > 1:\n return nodeTree.findall(xpathOrKey)\n else:\n return nodeTree.getiterator(xpathOrKey)\n\n def fetchNodeValueList(self, nodeTree, xpathOrKey, key=''):\n if xpathOrKey == None or xpathOrKey == '':\n return None\n else:\n nodeValueList = []\n nodeList = self.fetchNodeList(nodeTree, xpathOrKey)\n for node in nodeList:\n if node.tag == xpathOrKey:\n nodeValueList.append(node.text)\n return nodeValueList\n\n def format(self, sourceXmlPath, destXmlPath, charset='UTF-8'):\n global _exception\n _exception = None\n if os.path.exists(sourceXmlPath):\n try:\n fileRead = open(sourceXmlPath, 'r', encoding=charset)\n fileWrite = open(destXmlPath, 'w', encoding=charset)\n lines = fileRead.read()\n nodeList = []\n self.__writeXmlStruct(lines, nodeList, fileWrite)\n fileRead.close()\n fileWrite.close()\n return True\n except BaseException as error:\n _exception = error\n return False\n else:\n _exception = BaseException('File not exist!')\n return False\n\n def __writeXmlStruct(self, xmlStr, nodeList, fileWrite):\n xmlStr = xmlStr.replace('\\n', '')\n xmlStruct1 = self.__analyNodeFlag(xmlStr)\n if xmlStruct1 != None:\n xmlNode1 = xmlStruct1[0]\n xmlRestStr1 = xmlStruct1[1]\n xmlStruct2 = self.__analyNodeFlag(xmlRestStr1)\n xmlNode2 = xmlStruct2[0]\n xmlRestStr2 = xmlStruct2[1]\n xmlInnerTextEnd = xmlRestStr1.find(xmlNode2)\n xmlInnerText = xmlRestStr1[:xmlInnerTextEnd]\n isPair = self.__checkNodeFlagIsPair(xmlNode1, xmlNode2)\n nodeName1 = self.__fetchNodeNameFromStr(xmlNode1)\n nodeName2 = self.__fetchNodeNameFromStr(xmlNode2)\n if not nodeName1 in nodeList:\n nodeList.append(nodeName1)\n if not nodeName2 in nodeList:\n nodeList.append(nodeName2)\n nodeName1Floor = nodeList.index(nodeName1, 0)\n nodeName2Floor = nodeList.index(nodeName2, 0)\n space = ''\n if len(xmlNode1) > 0:\n if isPair:\n for index in range(nodeName1Floor):\n xmlNode1 = space + xmlNode1\n fileWrite.write(xmlNode1 + '\\n')\n if len(xmlInnerText) > 0:\n if isPair:\n for index in range(nodeName1Floor + 1):\n xmlInnerText = space + xmlInnerText\n fileWrite.write(xmlInnerText + '\\n')\n if len(xmlNode2) > 0:\n for index in range(nodeName2Floor):\n xmlNode2 = space + xmlNode2\n fileWrite.write(xmlNode2 + '\\n')\n self.__writeXmlStruct(xmlRestStr2, nodeList, fileWrite)\n\n def __analyNodeFlag(self, sourceStr):\n global _exception\n _exception = None\n try:\n nodeBegin = sourceStr.find('<')\n nodeEnd = str(sourceStr).find('>')\n if nodeBegin >= 0 and nodeEnd > 0:\n node = sourceStr[nodeBegin:nodeEnd + 1]\n nodeInnerText = sourceStr[nodeEnd + 1:]\n return [node, nodeInnerText]\n else:\n return ['', sourceStr]\n except BaseException as error:\n _exception = error\n return None\n\n def __checkNodeFlagIsPair(self, nodeFlag1, nodeFlag2):\n if len(nodeFlag1) > 0 and len(nodeFlag2) > 0:\n nodeFlag1 = nodeFlag1[1:len(nodeFlag1) - 2]\n nodeFlag2 = nodeFlag2[1:len(nodeFlag2) - 2]\n nodeFlag1 = nodeFlag1.replace('/', '')\n nodeFlag2 = nodeFlag2.replace('/', '')\n if nodeFlag1 == nodeFlag2:\n return True\n return False\n\n def __fetchNodeNameFromStr(self, str):\n str = str[1:len(str) - 1]\n nodeName = str.replace('/', '')\n return nodeName\n\n def modifyNodeValue(self, node, newValue, isAppend=False):\n if node == None:\n return False\n else:\n try:\n if isAppend:\n node.text += newValue\n else:\n node.text = newValue\n return True\n except:\n return False\n\n def writeXml(self, nodeTree, outPath, charset='utf-8'):\n global _exception\n _exception = None\n try:\n nodeTree.write(outPath, encoding=charset)\n return True\n except BaseException as error:\n _exception = error\n return False\n", "step-4": "<mask token>\n_exception = None\n<mask token>\n\n\nclass xmlSp:\n\n def addNode(self, parentNode, childNode):\n parentNode.append(childNode)\n\n def createChildNode(self, key, value, propertyMap={}):\n element = Element(key, propertyMap)\n element.text = value\n return element\n\n def fetchXmlNodeTree(self, xmlPathOrXmlStr):\n if xmlPathOrXmlStr == '':\n return None\n elif os.path.isfile(xmlPathOrXmlStr):\n return ElementTree.parse(xmlPathOrXmlStr)\n else:\n return ElementTree.fromstring(xmlPathOrXmlStr)\n\n def fetchSingleNode(self, nodeTree, xpathOrKey):\n if xpathOrKey == None or xpathOrKey == '':\n return None\n elif len(xpathOrKey.split('/')) > 1:\n return nodeTree.find(xpathOrKey)\n else:\n nodeList = nodeTree.getiterator(xpathOrKey)\n if nodeList == None or len(nodeList) <= 0:\n return nodeList\n else:\n return nodeList[0]\n\n def fetchSingleNodeValue(self, nodeTree, xpathOrKey):\n node = self.fetchSingleNode(nodeTree, xpathOrKey)\n if node == None or len(node) <= 0 or node == '':\n return ''\n else:\n return node.text\n\n def fetchNodeList(self, nodeTree, xpathOrKey):\n if xpathOrKey == None or xpathOrKey == '':\n return None\n elif len(xpathOrKey.split('/')) > 1:\n return nodeTree.findall(xpathOrKey)\n else:\n return nodeTree.getiterator(xpathOrKey)\n\n def fetchNodeValueList(self, nodeTree, xpathOrKey, key=''):\n if xpathOrKey == None or xpathOrKey == '':\n return None\n else:\n nodeValueList = []\n nodeList = self.fetchNodeList(nodeTree, xpathOrKey)\n for node in nodeList:\n if node.tag == xpathOrKey:\n nodeValueList.append(node.text)\n return nodeValueList\n\n def format(self, sourceXmlPath, destXmlPath, charset='UTF-8'):\n global _exception\n _exception = None\n if os.path.exists(sourceXmlPath):\n try:\n fileRead = open(sourceXmlPath, 'r', encoding=charset)\n fileWrite = open(destXmlPath, 'w', encoding=charset)\n lines = fileRead.read()\n nodeList = []\n self.__writeXmlStruct(lines, nodeList, fileWrite)\n fileRead.close()\n fileWrite.close()\n return True\n except BaseException as error:\n _exception = error\n return False\n else:\n _exception = BaseException('File not exist!')\n return False\n\n def __writeXmlStruct(self, xmlStr, nodeList, fileWrite):\n xmlStr = xmlStr.replace('\\n', '')\n xmlStruct1 = self.__analyNodeFlag(xmlStr)\n if xmlStruct1 != None:\n xmlNode1 = xmlStruct1[0]\n xmlRestStr1 = xmlStruct1[1]\n xmlStruct2 = self.__analyNodeFlag(xmlRestStr1)\n xmlNode2 = xmlStruct2[0]\n xmlRestStr2 = xmlStruct2[1]\n xmlInnerTextEnd = xmlRestStr1.find(xmlNode2)\n xmlInnerText = xmlRestStr1[:xmlInnerTextEnd]\n isPair = self.__checkNodeFlagIsPair(xmlNode1, xmlNode2)\n nodeName1 = self.__fetchNodeNameFromStr(xmlNode1)\n nodeName2 = self.__fetchNodeNameFromStr(xmlNode2)\n if not nodeName1 in nodeList:\n nodeList.append(nodeName1)\n if not nodeName2 in nodeList:\n nodeList.append(nodeName2)\n nodeName1Floor = nodeList.index(nodeName1, 0)\n nodeName2Floor = nodeList.index(nodeName2, 0)\n space = ''\n if len(xmlNode1) > 0:\n if isPair:\n for index in range(nodeName1Floor):\n xmlNode1 = space + xmlNode1\n fileWrite.write(xmlNode1 + '\\n')\n if len(xmlInnerText) > 0:\n if isPair:\n for index in range(nodeName1Floor + 1):\n xmlInnerText = space + xmlInnerText\n fileWrite.write(xmlInnerText + '\\n')\n if len(xmlNode2) > 0:\n for index in range(nodeName2Floor):\n xmlNode2 = space + xmlNode2\n fileWrite.write(xmlNode2 + '\\n')\n self.__writeXmlStruct(xmlRestStr2, nodeList, fileWrite)\n\n def __analyNodeFlag(self, sourceStr):\n global _exception\n _exception = None\n try:\n nodeBegin = sourceStr.find('<')\n nodeEnd = str(sourceStr).find('>')\n if nodeBegin >= 0 and nodeEnd > 0:\n node = sourceStr[nodeBegin:nodeEnd + 1]\n nodeInnerText = sourceStr[nodeEnd + 1:]\n return [node, nodeInnerText]\n else:\n return ['', sourceStr]\n except BaseException as error:\n _exception = error\n return None\n\n def __checkNodeFlagIsPair(self, nodeFlag1, nodeFlag2):\n if len(nodeFlag1) > 0 and len(nodeFlag2) > 0:\n nodeFlag1 = nodeFlag1[1:len(nodeFlag1) - 2]\n nodeFlag2 = nodeFlag2[1:len(nodeFlag2) - 2]\n nodeFlag1 = nodeFlag1.replace('/', '')\n nodeFlag2 = nodeFlag2.replace('/', '')\n if nodeFlag1 == nodeFlag2:\n return True\n return False\n\n def __fetchNodeNameFromStr(self, str):\n str = str[1:len(str) - 1]\n nodeName = str.replace('/', '')\n return nodeName\n\n def modifyNodeValue(self, node, newValue, isAppend=False):\n if node == None:\n return False\n else:\n try:\n if isAppend:\n node.text += newValue\n else:\n node.text = newValue\n return True\n except:\n return False\n\n def writeXml(self, nodeTree, outPath, charset='utf-8'):\n global _exception\n _exception = None\n try:\n nodeTree.write(outPath, encoding=charset)\n return True\n except BaseException as error:\n _exception = error\n return False\n", "step-5": "#-*- coding:utf-8 -*-\n\nfrom xml.etree import ElementTree\nfrom xml.etree.ElementTree import Element \n\n_exception = None\n\nimport os\nclass xmlSp: \n def addNode(self,parentNode,childNode): \n parentNode.append(childNode) \n \n def createChildNode(self,key,value,propertyMap={}):\n element = Element(key,propertyMap) \n element.text = value \n return element\n\n def fetchXmlNodeTree(self,xmlPathOrXmlStr):#Load xml has 2 ways.First:load xml string.Second:load xml file. \n if(xmlPathOrXmlStr == \"\"):\n return None\n elif(os.path.isfile(xmlPathOrXmlStr)):#is xmlPath\n return ElementTree.parse(xmlPathOrXmlStr)\n else:#is xmlStr\n return ElementTree.fromstring(xmlPathOrXmlStr) \n\n def fetchSingleNode(self,nodeTree,xpathOrKey):#If the node that is same name is more,return first node. \n if xpathOrKey == None or xpathOrKey == \"\":\n return None\n elif len(xpathOrKey.split('/')) > 1:#is xpath \n return nodeTree.find(xpathOrKey)#find is faster than findall then return first\n else:#is key\n nodeList = nodeTree.getiterator(xpathOrKey)\n if nodeList == None or len(nodeList) <= 0:\n return nodeList\n else:\n return nodeList[0]\n\n def fetchSingleNodeValue(self,nodeTree,xpathOrKey):#If the node that is same name is more,return first node. \n node = self.fetchSingleNode(nodeTree,xpathOrKey)\n if node == None or len(node) <= 0 or node == \"\":\n return \"\"\n else:\n return node.text\n\n def fetchNodeList(self,nodeTree,xpathOrKey):\n if xpathOrKey == None or xpathOrKey == \"\":\n return None\n elif len(xpathOrKey.split('/')) > 1:#is xpath\n return nodeTree.findall(xpathOrKey)\n else:#is key\n return nodeTree.getiterator(xpathOrKey)\n\n def fetchNodeValueList(self,nodeTree,xpathOrKey,key=\"\"):#If xpathOrKey is xpath,key must be not empty.Otherwise return empty set \n if xpathOrKey == None or xpathOrKey == \"\":\n return None\n else:\n nodeValueList = [] \n nodeList = self.fetchNodeList(nodeTree,xpathOrKey)\n for node in nodeList:\n if node.tag == xpathOrKey:\n nodeValueList.append(node.text)\n return nodeValueList \n\n def format(self,sourceXmlPath,destXmlPath,charset='UTF-8'): \n global _exception \n _exception = None\n if os.path.exists(sourceXmlPath):\n try:\n fileRead = open(sourceXmlPath,'r',encoding=charset)\n fileWrite = open(destXmlPath,'w',encoding=charset) \n lines = fileRead.read() \n nodeList=[] \n self.__writeXmlStruct(lines,nodeList,fileWrite) \n fileRead.close()\n fileWrite.close() \n return True\n except BaseException as error:\n _exception = error\n return False\n else:\n _exception = BaseException('File not exist!')\n return False\n def __writeXmlStruct(self,xmlStr,nodeList,fileWrite): \n xmlStr=xmlStr.replace('\\n','') \n xmlStruct1=self.__analyNodeFlag(xmlStr) \n if xmlStruct1!=None:\n xmlNode1=xmlStruct1[0]\n xmlRestStr1=xmlStruct1[1]\n xmlStruct2=self.__analyNodeFlag(xmlRestStr1)\n xmlNode2=xmlStruct2[0]\n xmlRestStr2=xmlStruct2[1]\n xmlInnerTextEnd=xmlRestStr1.find(xmlNode2)\n xmlInnerText=xmlRestStr1[:xmlInnerTextEnd]\n isPair=self.__checkNodeFlagIsPair(xmlNode1,xmlNode2)\n nodeName1=self.__fetchNodeNameFromStr(xmlNode1)\n nodeName2=self.__fetchNodeNameFromStr(xmlNode2)\n if not (nodeName1 in nodeList):\n nodeList.append(nodeName1)\n if not (nodeName2 in nodeList):\n nodeList.append(nodeName2)\n nodeName1Floor=nodeList.index(nodeName1,0)\n nodeName2Floor=nodeList.index(nodeName2,0) \n space=''\n if len(xmlNode1)>0: \n if isPair:\n for index in range(nodeName1Floor):\n xmlNode1=space+xmlNode1 \n fileWrite.write(xmlNode1+'\\n') \n if len(xmlInnerText)>0:\n if isPair:\n for index in range(nodeName1Floor+1):\n xmlInnerText=space+xmlInnerText\n fileWrite.write(xmlInnerText+'\\n')\n if len(xmlNode2)>0: \n for index in range(nodeName2Floor):\n xmlNode2=space+xmlNode2\n fileWrite.write(xmlNode2+'\\n') \n self.__writeXmlStruct(xmlRestStr2,nodeList,fileWrite) \n def __analyNodeFlag(self,sourceStr): \n global _exception\n _exception=None\n try: \n nodeBegin = sourceStr.find('<') \n nodeEnd = str(sourceStr).find('>') \n if nodeBegin >= 0 and nodeEnd > 0:\n node =sourceStr[nodeBegin:nodeEnd+1] \n nodeInnerText=sourceStr[nodeEnd+1:]\n return [node,nodeInnerText]\n else:\n return [\"\",sourceStr]\n except BaseException as error:\n _exception=error\n return None\n def __checkNodeFlagIsPair(self,nodeFlag1,nodeFlag2):\n if len(nodeFlag1)>0 and len(nodeFlag2)>0:\n nodeFlag1=nodeFlag1[1:(len(nodeFlag1)-2)]\n nodeFlag2=nodeFlag2[1:(len(nodeFlag2)-2)]\n nodeFlag1=nodeFlag1.replace('/','')\n nodeFlag2=nodeFlag2.replace('/','')\n if nodeFlag1==nodeFlag2:\n return True \n return False\n\n def __fetchNodeNameFromStr(self,str):\n str=str[1:(len(str)-1)]\n nodeName=str.replace('/','')\n return nodeName\n \n def modifyNodeValue(self,node,newValue, isAppend=False):\n if(node == None):\n return False\n else:\n try:\n if isAppend: \n node.text += newValue \n else: \n node.text = newValue \n return True \n except:\n return False\n\n def writeXml(self,nodeTree, outPath,charset=\"utf-8\"): \n global _exception\n _exception=None\n try:\n nodeTree.write(outPath, encoding=charset)\n return True\n except BaseException as error:\n _exception=error\n return False\n\n#import os \n#if __name__ == '__main__': \n# myxml = xmlSp() \n# formatResult = myxml.format(\"1.txt\",\"2.txt\")\n# if not formatResult:\n# print(_exception)\n# else:\n# os.remove(\"1.txt\")\n# os.rename('2.txt','1.txt')\n \n## xmlPath= \"..\\\\article\\\\articleList.xml\";\n## nodeTree = myxml.fetchXmlNodeTree(xmlPath)\n## #nodeTree=\n## #myxml.fetchXmlNodeTree(\"<artilceList><article><id>aaaa</id></article></artilceList>\")\n## #node=myxml.fetchSingleNode(nodeTree,'article/id')\n## #if len(node)<=0:\n## # print(\"empty\")\n## #print(node)\n## #nodeList = myxml.fetchNodeList(nodeTree,'id')\n## #myxml.modifyNodeValue(nodeList[0],'bbbb')\n## #myxml.writeXml(nodeTree,xmlPath)\n## #rootNode=myxml.fetchSingleNode(nodeTree,'articleList')\n## #idNode=myxml.createChildNode('id','aaabbbb')\n## #nameNode=myxml.createChildNode('name','aaabbbb')\n## #parentNode=myxml.createChildNode('article','')\n## #myxml.addNode(parentNode,idNode)\n## #myxml.addNode(parentNode,nameNode)\n## #myxml.addNode(rootNode,parentNode)\n## #myxml.writeXml(nodeTree,'aaa.xml')\n## #for node in nodeList:\n## # print(\"node:%s\" %node)\n## #nodeValueSet=fetchNodeValueSet(nodeTree,'article/id')\n## #for nodeValue in nodeValueSet:\n## # print (\"nodeValue:%s\" %nodeValue)\n#import os\n#os.system(\"PAUSE\")\n", "step-ids": [ 12, 13, 15, 16, 18 ] }
[ 12, 13, 15, 16, 18 ]
#!/usr/bin/env python import rospy from geometry_msgs.msg import PoseStamped from styx_msgs.msg import Lane, Waypoint from scipy.spatial import KDTree import numpy as np from std_msgs.msg import Int32 import math ''' This node will publish waypoints from the car's current position to some `x` distance ahead. As mentioned in the doc, you should ideally first implement a version which does not care about traffic lights or obstacles. Once you have created dbw_node, you will update this node to use the status of traffic lights too. Please note that our simulator also provides the exact location of traffic lights and their current status in `/vehicle/traffic_lights` message. You can use this message to build this node as well as to verify your TL classifier. TODO (for Yousuf and Aaron): Stopline location for each traffic light. ''' LOOKAHEAD_WPS = 60 # Number of waypoints we will publish. You can change this number MAX_DECEL = 0.5 MPH_TO_MPS = 0.447 MAX_SPEED = 20 # in MPH class WaypointUpdater(object): def __init__(self): #rospy.loginfo('Entered WaypointUpdater init') rospy.init_node('waypoint_updater') rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb) rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb) # TODO: Add a subscriber for /traffic_waypoint and /obstacle_waypoint below rospy.Subscriber('/traffic_waypoint', Int32, self.traffic_cb) self.final_waypoints_pub = rospy.Publisher('final_waypoints', Lane, queue_size=1) # TODO: Add other member variables you need below self.pose = None self.base_waypoints = None self.waypoints_2d = None self.waypoint_tree = None self.stopline_wp_idx = -1 #rospy.spin() self.loop() def loop(self): rospy.loginfo('Entered WaypointUpdater loop') rate = rospy.Rate(50) while not rospy.is_shutdown(): if self.pose and self.base_waypoints: #Get closest waypoint #rospy.loginfo('Value of self pose is %d, %d',self.pose.pose.position.x, self.pose.pose.position.y) closest_waypoint_idx = self.get_closest_waypoint_idx() self.publish_waypoints(closest_waypoint_idx) rate.sleep() def get_closest_waypoint_idx(self): x = self.pose.pose.position.x y = self.pose.pose.position.y closest_idx = self.waypoint_tree.query([x,y], 1)[1] #Check if closest coord is ahead of or behind the vehicle #rospy.loginfo('Value of closest_idx is %d',closest_idx) closest_coord = self.waypoints_2d[closest_idx] prev_coord = self.waypoints_2d[closest_idx-1] closest_vect = np.array(closest_coord) previous_vect = np.array(prev_coord) pos_vect = np.array([x,y]) val = np.dot(closest_vect-previous_vect, pos_vect-closest_vect) if val > 0: closest_idx = (closest_idx+1) % len(self.waypoints_2d) return closest_idx def publish_waypoints(self, closest_idx): final_lane = self.generate_lane() self.final_waypoints_pub.publish(final_lane) def generate_lane(self): lane = Lane() lane.header = self.base_waypoints.header closest_idx = self.get_closest_waypoint_idx() farthest_idx = closest_idx + LOOKAHEAD_WPS base_waypoints = self.base_waypoints.waypoints[closest_idx:farthest_idx] #rospy.loginfo('Entered generate_lane, farthest_idx: %d', farthest_idx) if self.stopline_wp_idx == -1 or (self.stopline_wp_idx >= farthest_idx): lane.waypoints = self.limitMaxSpeed(base_waypoints) else: rospy.loginfo('Stopline index non-default: %d, closest_car index: %d', self.stopline_wp_idx, closest_idx) lane.waypoints = self.decelerate_waypoints(base_waypoints, closest_idx) return lane def limitMaxSpeed(self, waypoints): velocity = MAX_SPEED * MPH_TO_MPS for i in range(len(waypoints)): self.set_waypoint_velocity(waypoints, i, velocity) return waypoints def decelerate_waypoints(self, waypoints, closest_idx): tmp = [] stopline_wp_idx = self.stopline_wp_idx for i, wp in enumerate(waypoints): p = Waypoint() p.pose = wp.pose stop_idx = max(stopline_wp_idx - 5 - closest_idx, 0) # 2 waypoints back from stopline so that the front of the car is at the line if i > stop_idx: vel = 0 else: #rospy.loginfo('Decelerate waypoints index values: %d, stop_idx: %d', i, stop_idx) dist = self.distance(waypoints, i, stop_idx) vel = math.sqrt(2 * MAX_DECEL * dist) if vel < 1.0: vel = 0.0 p.twist.twist.linear.x = min(vel, wp.twist.twist.linear.x) #rospy.loginfo('Waypoint velocity for index %d is %f', i, p.twist.twist.linear.x) tmp.append(p) return tmp def pose_cb(self, msg): #rospy.loginfo('Entered pose_cb') self.pose = msg def waypoints_cb(self, waypoints): self.base_waypoints = waypoints if not self.waypoints_2d: self.waypoints_2d = [[waypoint.pose.pose.position.x, waypoint.pose.pose.position.y] for waypoint in waypoints.waypoints] #rospy.loginfo('First waypoint: %d, %d', waypoints.waypoints[0].pose.pose.position.x, waypoints.waypoints[0].pose.pose.position.y) self.waypoint_tree = KDTree(self.waypoints_2d) def traffic_cb(self, msg): # TODO: Callback for /traffic_waypoint message. Implement self.stopline_wp_idx = msg.data #rospy.loginfo('traffic_cb called with stop_idx: %d', self.stopline_wp_idx) def obstacle_cb(self, msg): # TODO: Callback for /obstacle_waypoint message. We will implement it later pass def get_waypoint_velocity(self, waypoint): return waypoint.twist.twist.linear.x def set_waypoint_velocity(self, waypoints, waypoint, velocity): waypoints[waypoint].twist.twist.linear.x = velocity def distance(self, waypoints, wp1, wp2): dist = 0 dl = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2 + (a.z-b.z)**2) for i in range(wp1, wp2+1): dist += dl(waypoints[wp1].pose.pose.position, waypoints[i].pose.pose.position) wp1 = i return dist if __name__ == '__main__': try: WaypointUpdater() except rospy.ROSInterruptException: rospy.logerr('Could not start waypoint updater node.')
normal
{ "blob_id": "9ad92b23b8a02204a86af599e507eb889e5bcec7", "index": 7565, "step-1": "<mask token>\n\n\nclass WaypointUpdater(object):\n\n def __init__(self):\n rospy.init_node('waypoint_updater')\n rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)\n rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)\n rospy.Subscriber('/traffic_waypoint', Int32, self.traffic_cb)\n self.final_waypoints_pub = rospy.Publisher('final_waypoints', Lane,\n queue_size=1)\n self.pose = None\n self.base_waypoints = None\n self.waypoints_2d = None\n self.waypoint_tree = None\n self.stopline_wp_idx = -1\n self.loop()\n <mask token>\n\n def get_closest_waypoint_idx(self):\n x = self.pose.pose.position.x\n y = self.pose.pose.position.y\n closest_idx = self.waypoint_tree.query([x, y], 1)[1]\n closest_coord = self.waypoints_2d[closest_idx]\n prev_coord = self.waypoints_2d[closest_idx - 1]\n closest_vect = np.array(closest_coord)\n previous_vect = np.array(prev_coord)\n pos_vect = np.array([x, y])\n val = np.dot(closest_vect - previous_vect, pos_vect - closest_vect)\n if val > 0:\n closest_idx = (closest_idx + 1) % len(self.waypoints_2d)\n return closest_idx\n <mask token>\n <mask token>\n\n def limitMaxSpeed(self, waypoints):\n velocity = MAX_SPEED * MPH_TO_MPS\n for i in range(len(waypoints)):\n self.set_waypoint_velocity(waypoints, i, velocity)\n return waypoints\n <mask token>\n\n def pose_cb(self, msg):\n self.pose = msg\n\n def waypoints_cb(self, waypoints):\n self.base_waypoints = waypoints\n if not self.waypoints_2d:\n self.waypoints_2d = [[waypoint.pose.pose.position.x, waypoint.\n pose.pose.position.y] for waypoint in waypoints.waypoints]\n self.waypoint_tree = KDTree(self.waypoints_2d)\n\n def traffic_cb(self, msg):\n self.stopline_wp_idx = msg.data\n <mask token>\n\n def get_waypoint_velocity(self, waypoint):\n return waypoint.twist.twist.linear.x\n\n def set_waypoint_velocity(self, waypoints, waypoint, velocity):\n waypoints[waypoint].twist.twist.linear.x = velocity\n\n def distance(self, waypoints, wp1, wp2):\n dist = 0\n dl = lambda a, b: math.sqrt((a.x - b.x) ** 2 + (a.y - b.y) ** 2 + (\n a.z - b.z) ** 2)\n for i in range(wp1, wp2 + 1):\n dist += dl(waypoints[wp1].pose.pose.position, waypoints[i].pose\n .pose.position)\n wp1 = i\n return dist\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass WaypointUpdater(object):\n\n def __init__(self):\n rospy.init_node('waypoint_updater')\n rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)\n rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)\n rospy.Subscriber('/traffic_waypoint', Int32, self.traffic_cb)\n self.final_waypoints_pub = rospy.Publisher('final_waypoints', Lane,\n queue_size=1)\n self.pose = None\n self.base_waypoints = None\n self.waypoints_2d = None\n self.waypoint_tree = None\n self.stopline_wp_idx = -1\n self.loop()\n\n def loop(self):\n rospy.loginfo('Entered WaypointUpdater loop')\n rate = rospy.Rate(50)\n while not rospy.is_shutdown():\n if self.pose and self.base_waypoints:\n closest_waypoint_idx = self.get_closest_waypoint_idx()\n self.publish_waypoints(closest_waypoint_idx)\n rate.sleep()\n\n def get_closest_waypoint_idx(self):\n x = self.pose.pose.position.x\n y = self.pose.pose.position.y\n closest_idx = self.waypoint_tree.query([x, y], 1)[1]\n closest_coord = self.waypoints_2d[closest_idx]\n prev_coord = self.waypoints_2d[closest_idx - 1]\n closest_vect = np.array(closest_coord)\n previous_vect = np.array(prev_coord)\n pos_vect = np.array([x, y])\n val = np.dot(closest_vect - previous_vect, pos_vect - closest_vect)\n if val > 0:\n closest_idx = (closest_idx + 1) % len(self.waypoints_2d)\n return closest_idx\n\n def publish_waypoints(self, closest_idx):\n final_lane = self.generate_lane()\n self.final_waypoints_pub.publish(final_lane)\n\n def generate_lane(self):\n lane = Lane()\n lane.header = self.base_waypoints.header\n closest_idx = self.get_closest_waypoint_idx()\n farthest_idx = closest_idx + LOOKAHEAD_WPS\n base_waypoints = self.base_waypoints.waypoints[closest_idx:farthest_idx\n ]\n if self.stopline_wp_idx == -1 or self.stopline_wp_idx >= farthest_idx:\n lane.waypoints = self.limitMaxSpeed(base_waypoints)\n else:\n rospy.loginfo(\n 'Stopline index non-default: %d, closest_car index: %d',\n self.stopline_wp_idx, closest_idx)\n lane.waypoints = self.decelerate_waypoints(base_waypoints,\n closest_idx)\n return lane\n\n def limitMaxSpeed(self, waypoints):\n velocity = MAX_SPEED * MPH_TO_MPS\n for i in range(len(waypoints)):\n self.set_waypoint_velocity(waypoints, i, velocity)\n return waypoints\n\n def decelerate_waypoints(self, waypoints, closest_idx):\n tmp = []\n stopline_wp_idx = self.stopline_wp_idx\n for i, wp in enumerate(waypoints):\n p = Waypoint()\n p.pose = wp.pose\n stop_idx = max(stopline_wp_idx - 5 - closest_idx, 0)\n if i > stop_idx:\n vel = 0\n else:\n dist = self.distance(waypoints, i, stop_idx)\n vel = math.sqrt(2 * MAX_DECEL * dist)\n if vel < 1.0:\n vel = 0.0\n p.twist.twist.linear.x = min(vel, wp.twist.twist.linear.x)\n tmp.append(p)\n return tmp\n\n def pose_cb(self, msg):\n self.pose = msg\n\n def waypoints_cb(self, waypoints):\n self.base_waypoints = waypoints\n if not self.waypoints_2d:\n self.waypoints_2d = [[waypoint.pose.pose.position.x, waypoint.\n pose.pose.position.y] for waypoint in waypoints.waypoints]\n self.waypoint_tree = KDTree(self.waypoints_2d)\n\n def traffic_cb(self, msg):\n self.stopline_wp_idx = msg.data\n\n def obstacle_cb(self, msg):\n pass\n\n def get_waypoint_velocity(self, waypoint):\n return waypoint.twist.twist.linear.x\n\n def set_waypoint_velocity(self, waypoints, waypoint, velocity):\n waypoints[waypoint].twist.twist.linear.x = velocity\n\n def distance(self, waypoints, wp1, wp2):\n dist = 0\n dl = lambda a, b: math.sqrt((a.x - b.x) ** 2 + (a.y - b.y) ** 2 + (\n a.z - b.z) ** 2)\n for i in range(wp1, wp2 + 1):\n dist += dl(waypoints[wp1].pose.pose.position, waypoints[i].pose\n .pose.position)\n wp1 = i\n return dist\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass WaypointUpdater(object):\n\n def __init__(self):\n rospy.init_node('waypoint_updater')\n rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)\n rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)\n rospy.Subscriber('/traffic_waypoint', Int32, self.traffic_cb)\n self.final_waypoints_pub = rospy.Publisher('final_waypoints', Lane,\n queue_size=1)\n self.pose = None\n self.base_waypoints = None\n self.waypoints_2d = None\n self.waypoint_tree = None\n self.stopline_wp_idx = -1\n self.loop()\n\n def loop(self):\n rospy.loginfo('Entered WaypointUpdater loop')\n rate = rospy.Rate(50)\n while not rospy.is_shutdown():\n if self.pose and self.base_waypoints:\n closest_waypoint_idx = self.get_closest_waypoint_idx()\n self.publish_waypoints(closest_waypoint_idx)\n rate.sleep()\n\n def get_closest_waypoint_idx(self):\n x = self.pose.pose.position.x\n y = self.pose.pose.position.y\n closest_idx = self.waypoint_tree.query([x, y], 1)[1]\n closest_coord = self.waypoints_2d[closest_idx]\n prev_coord = self.waypoints_2d[closest_idx - 1]\n closest_vect = np.array(closest_coord)\n previous_vect = np.array(prev_coord)\n pos_vect = np.array([x, y])\n val = np.dot(closest_vect - previous_vect, pos_vect - closest_vect)\n if val > 0:\n closest_idx = (closest_idx + 1) % len(self.waypoints_2d)\n return closest_idx\n\n def publish_waypoints(self, closest_idx):\n final_lane = self.generate_lane()\n self.final_waypoints_pub.publish(final_lane)\n\n def generate_lane(self):\n lane = Lane()\n lane.header = self.base_waypoints.header\n closest_idx = self.get_closest_waypoint_idx()\n farthest_idx = closest_idx + LOOKAHEAD_WPS\n base_waypoints = self.base_waypoints.waypoints[closest_idx:farthest_idx\n ]\n if self.stopline_wp_idx == -1 or self.stopline_wp_idx >= farthest_idx:\n lane.waypoints = self.limitMaxSpeed(base_waypoints)\n else:\n rospy.loginfo(\n 'Stopline index non-default: %d, closest_car index: %d',\n self.stopline_wp_idx, closest_idx)\n lane.waypoints = self.decelerate_waypoints(base_waypoints,\n closest_idx)\n return lane\n\n def limitMaxSpeed(self, waypoints):\n velocity = MAX_SPEED * MPH_TO_MPS\n for i in range(len(waypoints)):\n self.set_waypoint_velocity(waypoints, i, velocity)\n return waypoints\n\n def decelerate_waypoints(self, waypoints, closest_idx):\n tmp = []\n stopline_wp_idx = self.stopline_wp_idx\n for i, wp in enumerate(waypoints):\n p = Waypoint()\n p.pose = wp.pose\n stop_idx = max(stopline_wp_idx - 5 - closest_idx, 0)\n if i > stop_idx:\n vel = 0\n else:\n dist = self.distance(waypoints, i, stop_idx)\n vel = math.sqrt(2 * MAX_DECEL * dist)\n if vel < 1.0:\n vel = 0.0\n p.twist.twist.linear.x = min(vel, wp.twist.twist.linear.x)\n tmp.append(p)\n return tmp\n\n def pose_cb(self, msg):\n self.pose = msg\n\n def waypoints_cb(self, waypoints):\n self.base_waypoints = waypoints\n if not self.waypoints_2d:\n self.waypoints_2d = [[waypoint.pose.pose.position.x, waypoint.\n pose.pose.position.y] for waypoint in waypoints.waypoints]\n self.waypoint_tree = KDTree(self.waypoints_2d)\n\n def traffic_cb(self, msg):\n self.stopline_wp_idx = msg.data\n\n def obstacle_cb(self, msg):\n pass\n\n def get_waypoint_velocity(self, waypoint):\n return waypoint.twist.twist.linear.x\n\n def set_waypoint_velocity(self, waypoints, waypoint, velocity):\n waypoints[waypoint].twist.twist.linear.x = velocity\n\n def distance(self, waypoints, wp1, wp2):\n dist = 0\n dl = lambda a, b: math.sqrt((a.x - b.x) ** 2 + (a.y - b.y) ** 2 + (\n a.z - b.z) ** 2)\n for i in range(wp1, wp2 + 1):\n dist += dl(waypoints[wp1].pose.pose.position, waypoints[i].pose\n .pose.position)\n wp1 = i\n return dist\n\n\nif __name__ == '__main__':\n try:\n WaypointUpdater()\n except rospy.ROSInterruptException:\n rospy.logerr('Could not start waypoint updater node.')\n", "step-4": "<mask token>\nLOOKAHEAD_WPS = 60\nMAX_DECEL = 0.5\nMPH_TO_MPS = 0.447\nMAX_SPEED = 20\n\n\nclass WaypointUpdater(object):\n\n def __init__(self):\n rospy.init_node('waypoint_updater')\n rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)\n rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)\n rospy.Subscriber('/traffic_waypoint', Int32, self.traffic_cb)\n self.final_waypoints_pub = rospy.Publisher('final_waypoints', Lane,\n queue_size=1)\n self.pose = None\n self.base_waypoints = None\n self.waypoints_2d = None\n self.waypoint_tree = None\n self.stopline_wp_idx = -1\n self.loop()\n\n def loop(self):\n rospy.loginfo('Entered WaypointUpdater loop')\n rate = rospy.Rate(50)\n while not rospy.is_shutdown():\n if self.pose and self.base_waypoints:\n closest_waypoint_idx = self.get_closest_waypoint_idx()\n self.publish_waypoints(closest_waypoint_idx)\n rate.sleep()\n\n def get_closest_waypoint_idx(self):\n x = self.pose.pose.position.x\n y = self.pose.pose.position.y\n closest_idx = self.waypoint_tree.query([x, y], 1)[1]\n closest_coord = self.waypoints_2d[closest_idx]\n prev_coord = self.waypoints_2d[closest_idx - 1]\n closest_vect = np.array(closest_coord)\n previous_vect = np.array(prev_coord)\n pos_vect = np.array([x, y])\n val = np.dot(closest_vect - previous_vect, pos_vect - closest_vect)\n if val > 0:\n closest_idx = (closest_idx + 1) % len(self.waypoints_2d)\n return closest_idx\n\n def publish_waypoints(self, closest_idx):\n final_lane = self.generate_lane()\n self.final_waypoints_pub.publish(final_lane)\n\n def generate_lane(self):\n lane = Lane()\n lane.header = self.base_waypoints.header\n closest_idx = self.get_closest_waypoint_idx()\n farthest_idx = closest_idx + LOOKAHEAD_WPS\n base_waypoints = self.base_waypoints.waypoints[closest_idx:farthest_idx\n ]\n if self.stopline_wp_idx == -1 or self.stopline_wp_idx >= farthest_idx:\n lane.waypoints = self.limitMaxSpeed(base_waypoints)\n else:\n rospy.loginfo(\n 'Stopline index non-default: %d, closest_car index: %d',\n self.stopline_wp_idx, closest_idx)\n lane.waypoints = self.decelerate_waypoints(base_waypoints,\n closest_idx)\n return lane\n\n def limitMaxSpeed(self, waypoints):\n velocity = MAX_SPEED * MPH_TO_MPS\n for i in range(len(waypoints)):\n self.set_waypoint_velocity(waypoints, i, velocity)\n return waypoints\n\n def decelerate_waypoints(self, waypoints, closest_idx):\n tmp = []\n stopline_wp_idx = self.stopline_wp_idx\n for i, wp in enumerate(waypoints):\n p = Waypoint()\n p.pose = wp.pose\n stop_idx = max(stopline_wp_idx - 5 - closest_idx, 0)\n if i > stop_idx:\n vel = 0\n else:\n dist = self.distance(waypoints, i, stop_idx)\n vel = math.sqrt(2 * MAX_DECEL * dist)\n if vel < 1.0:\n vel = 0.0\n p.twist.twist.linear.x = min(vel, wp.twist.twist.linear.x)\n tmp.append(p)\n return tmp\n\n def pose_cb(self, msg):\n self.pose = msg\n\n def waypoints_cb(self, waypoints):\n self.base_waypoints = waypoints\n if not self.waypoints_2d:\n self.waypoints_2d = [[waypoint.pose.pose.position.x, waypoint.\n pose.pose.position.y] for waypoint in waypoints.waypoints]\n self.waypoint_tree = KDTree(self.waypoints_2d)\n\n def traffic_cb(self, msg):\n self.stopline_wp_idx = msg.data\n\n def obstacle_cb(self, msg):\n pass\n\n def get_waypoint_velocity(self, waypoint):\n return waypoint.twist.twist.linear.x\n\n def set_waypoint_velocity(self, waypoints, waypoint, velocity):\n waypoints[waypoint].twist.twist.linear.x = velocity\n\n def distance(self, waypoints, wp1, wp2):\n dist = 0\n dl = lambda a, b: math.sqrt((a.x - b.x) ** 2 + (a.y - b.y) ** 2 + (\n a.z - b.z) ** 2)\n for i in range(wp1, wp2 + 1):\n dist += dl(waypoints[wp1].pose.pose.position, waypoints[i].pose\n .pose.position)\n wp1 = i\n return dist\n\n\nif __name__ == '__main__':\n try:\n WaypointUpdater()\n except rospy.ROSInterruptException:\n rospy.logerr('Could not start waypoint updater node.')\n", "step-5": "#!/usr/bin/env python\n\nimport rospy\nfrom geometry_msgs.msg import PoseStamped\nfrom styx_msgs.msg import Lane, Waypoint\nfrom scipy.spatial import KDTree\nimport numpy as np\nfrom std_msgs.msg import Int32\n\nimport math\n\n'''\nThis node will publish waypoints from the car's current position to some `x` distance ahead.\n\nAs mentioned in the doc, you should ideally first implement a version which does not care\nabout traffic lights or obstacles.\n\nOnce you have created dbw_node, you will update this node to use the status of traffic lights too.\n\nPlease note that our simulator also provides the exact location of traffic lights and their\ncurrent status in `/vehicle/traffic_lights` message. You can use this message to build this node\nas well as to verify your TL classifier.\n\nTODO (for Yousuf and Aaron): Stopline location for each traffic light.\n'''\n\nLOOKAHEAD_WPS = 60 # Number of waypoints we will publish. You can change this number\nMAX_DECEL = 0.5\nMPH_TO_MPS = 0.447\nMAX_SPEED = 20 # in MPH\n\n\nclass WaypointUpdater(object):\n def __init__(self):\n #rospy.loginfo('Entered WaypointUpdater init')\n rospy.init_node('waypoint_updater')\n\n rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)\n rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)\n\n # TODO: Add a subscriber for /traffic_waypoint and /obstacle_waypoint below\n rospy.Subscriber('/traffic_waypoint', Int32, self.traffic_cb)\n\n\n self.final_waypoints_pub = rospy.Publisher('final_waypoints', Lane, queue_size=1)\n\n # TODO: Add other member variables you need below\n self.pose = None\n self.base_waypoints = None\n self.waypoints_2d = None\n self.waypoint_tree = None\n self.stopline_wp_idx = -1\n\n #rospy.spin()\n self.loop()\n\n def loop(self):\n rospy.loginfo('Entered WaypointUpdater loop')\n rate = rospy.Rate(50)\n while not rospy.is_shutdown():\n if self.pose and self.base_waypoints:\n #Get closest waypoint\n #rospy.loginfo('Value of self pose is %d, %d',self.pose.pose.position.x, self.pose.pose.position.y)\n closest_waypoint_idx = self.get_closest_waypoint_idx()\n self.publish_waypoints(closest_waypoint_idx)\n rate.sleep()\n\n def get_closest_waypoint_idx(self):\n x = self.pose.pose.position.x\n y = self.pose.pose.position.y\n closest_idx = self.waypoint_tree.query([x,y], 1)[1]\n\n #Check if closest coord is ahead of or behind the vehicle\n #rospy.loginfo('Value of closest_idx is %d',closest_idx)\n closest_coord = self.waypoints_2d[closest_idx]\n prev_coord = self.waypoints_2d[closest_idx-1]\n\n closest_vect = np.array(closest_coord)\n previous_vect = np.array(prev_coord)\n pos_vect = np.array([x,y])\n\n val = np.dot(closest_vect-previous_vect, pos_vect-closest_vect)\n\n if val > 0:\n closest_idx = (closest_idx+1) % len(self.waypoints_2d)\n return closest_idx\n\n def publish_waypoints(self, closest_idx):\n final_lane = self.generate_lane()\n self.final_waypoints_pub.publish(final_lane)\n\n def generate_lane(self):\n lane = Lane()\n lane.header = self.base_waypoints.header\n\n closest_idx = self.get_closest_waypoint_idx()\n farthest_idx = closest_idx + LOOKAHEAD_WPS\n base_waypoints = self.base_waypoints.waypoints[closest_idx:farthest_idx]\n\n #rospy.loginfo('Entered generate_lane, farthest_idx: %d', farthest_idx)\n if self.stopline_wp_idx == -1 or (self.stopline_wp_idx >= farthest_idx):\n lane.waypoints = self.limitMaxSpeed(base_waypoints)\n else:\n rospy.loginfo('Stopline index non-default: %d, closest_car index: %d', self.stopline_wp_idx, closest_idx)\n lane.waypoints = self.decelerate_waypoints(base_waypoints, closest_idx)\n return lane\n\n def limitMaxSpeed(self, waypoints):\n velocity = MAX_SPEED * MPH_TO_MPS\n for i in range(len(waypoints)):\n self.set_waypoint_velocity(waypoints, i, velocity)\n return waypoints\n\n def decelerate_waypoints(self, waypoints, closest_idx):\n tmp = []\n stopline_wp_idx = self.stopline_wp_idx\n for i, wp in enumerate(waypoints):\n p = Waypoint()\n p.pose = wp.pose\n\n stop_idx = max(stopline_wp_idx - 5 - closest_idx, 0) # 2 waypoints back from stopline so that the front of the car is at the line\n if i > stop_idx:\n vel = 0\n else:\n #rospy.loginfo('Decelerate waypoints index values: %d, stop_idx: %d', i, stop_idx)\n dist = self.distance(waypoints, i, stop_idx)\n vel = math.sqrt(2 * MAX_DECEL * dist)\n if vel < 1.0:\n vel = 0.0\n\n p.twist.twist.linear.x = min(vel, wp.twist.twist.linear.x)\n #rospy.loginfo('Waypoint velocity for index %d is %f', i, p.twist.twist.linear.x)\n tmp.append(p)\n return tmp\n\n def pose_cb(self, msg):\n #rospy.loginfo('Entered pose_cb')\n self.pose = msg\n\n def waypoints_cb(self, waypoints):\n self.base_waypoints = waypoints\n if not self.waypoints_2d:\n self.waypoints_2d = [[waypoint.pose.pose.position.x, waypoint.pose.pose.position.y] for waypoint in waypoints.waypoints]\n #rospy.loginfo('First waypoint: %d, %d', waypoints.waypoints[0].pose.pose.position.x, waypoints.waypoints[0].pose.pose.position.y)\n self.waypoint_tree = KDTree(self.waypoints_2d)\n\n def traffic_cb(self, msg):\n # TODO: Callback for /traffic_waypoint message. Implement\n \n self.stopline_wp_idx = msg.data\n #rospy.loginfo('traffic_cb called with stop_idx: %d', self.stopline_wp_idx)\n\n def obstacle_cb(self, msg):\n # TODO: Callback for /obstacle_waypoint message. We will implement it later\n pass\n\n def get_waypoint_velocity(self, waypoint):\n return waypoint.twist.twist.linear.x\n\n def set_waypoint_velocity(self, waypoints, waypoint, velocity):\n waypoints[waypoint].twist.twist.linear.x = velocity\n\n def distance(self, waypoints, wp1, wp2):\n dist = 0\n dl = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2 + (a.z-b.z)**2)\n for i in range(wp1, wp2+1):\n dist += dl(waypoints[wp1].pose.pose.position, waypoints[i].pose.pose.position)\n wp1 = i\n return dist\n\n\nif __name__ == '__main__':\n try:\n WaypointUpdater()\n except rospy.ROSInterruptException:\n rospy.logerr('Could not start waypoint updater node.')\n", "step-ids": [ 10, 15, 16, 17, 19 ] }
[ 10, 15, 16, 17, 19 ]
#!/usr/bin/env python import mcvine.cli from numpy import array from mcvine_workflow.singlextal.resolution import use_res_comps as urc beam_neutrons_path = '/SNS/users/p63/ORNL_public_research/MCViNE_Covmat_comparison/mcvine_resolution/beams/beam_125_1e9/out/neutrons' instrument = urc.instrument('ARCS', '3.*meter', '13.6*meter', '-0.15*meter') samplexmlpath = '/SNS/users/p63/ORNL_public_research/learning_from_mcvine/res_sims/Ei_125/E86.7120348337_hkl-7.62228386234,3.53360635791,-3.42342194523/sample/sampleassembly.xml' psi = -0.011798841097534662 hkl2Q = array([[-0.64961065, 0.94207344, 0. ], [ 0.66614652, 0.4593441 , -0.80916512], [-0.66614652, -0.4593441 , -0.80916512]]) pp = array([-1.22433552, 2.73879582, 0.0612745 ]) pixel = urc.pixel('0.5*inch', 'meter/128', '10*atm', position=(pp[1], pp[2], pp[0])) t_m2p = 0.0038753067573975117 Q = array([ 9.58591698, -3.98508133, -0.08915738]) E = 86.712034833655451 hkl_projection = array([-0.6235806 , -0.08226367, 0.30709024]) urc.run( beam_neutrons_path, instrument, samplexmlpath, psi, hkl2Q, pixel, t_m2p, Q, E, hkl_projection, Nbuffer=100000)
normal
{ "blob_id": "47c5fb03cb427d5c9f7703e1715e026b6f2c7a35", "index": 4660, "step-1": "<mask token>\n", "step-2": "<mask token>\nurc.run(beam_neutrons_path, instrument, samplexmlpath, psi, hkl2Q, pixel,\n t_m2p, Q, E, hkl_projection, Nbuffer=100000)\n", "step-3": "<mask token>\nbeam_neutrons_path = (\n '/SNS/users/p63/ORNL_public_research/MCViNE_Covmat_comparison/mcvine_resolution/beams/beam_125_1e9/out/neutrons'\n )\ninstrument = urc.instrument('ARCS', '3.*meter', '13.6*meter', '-0.15*meter')\nsamplexmlpath = (\n '/SNS/users/p63/ORNL_public_research/learning_from_mcvine/res_sims/Ei_125/E86.7120348337_hkl-7.62228386234,3.53360635791,-3.42342194523/sample/sampleassembly.xml'\n )\npsi = -0.011798841097534662\nhkl2Q = array([[-0.64961065, 0.94207344, 0.0], [0.66614652, 0.4593441, -\n 0.80916512], [-0.66614652, -0.4593441, -0.80916512]])\npp = array([-1.22433552, 2.73879582, 0.0612745])\npixel = urc.pixel('0.5*inch', 'meter/128', '10*atm', position=(pp[1], pp[2],\n pp[0]))\nt_m2p = 0.0038753067573975117\nQ = array([9.58591698, -3.98508133, -0.08915738])\nE = 86.71203483365545\nhkl_projection = array([-0.6235806, -0.08226367, 0.30709024])\nurc.run(beam_neutrons_path, instrument, samplexmlpath, psi, hkl2Q, pixel,\n t_m2p, Q, E, hkl_projection, Nbuffer=100000)\n", "step-4": "import mcvine.cli\nfrom numpy import array\nfrom mcvine_workflow.singlextal.resolution import use_res_comps as urc\nbeam_neutrons_path = (\n '/SNS/users/p63/ORNL_public_research/MCViNE_Covmat_comparison/mcvine_resolution/beams/beam_125_1e9/out/neutrons'\n )\ninstrument = urc.instrument('ARCS', '3.*meter', '13.6*meter', '-0.15*meter')\nsamplexmlpath = (\n '/SNS/users/p63/ORNL_public_research/learning_from_mcvine/res_sims/Ei_125/E86.7120348337_hkl-7.62228386234,3.53360635791,-3.42342194523/sample/sampleassembly.xml'\n )\npsi = -0.011798841097534662\nhkl2Q = array([[-0.64961065, 0.94207344, 0.0], [0.66614652, 0.4593441, -\n 0.80916512], [-0.66614652, -0.4593441, -0.80916512]])\npp = array([-1.22433552, 2.73879582, 0.0612745])\npixel = urc.pixel('0.5*inch', 'meter/128', '10*atm', position=(pp[1], pp[2],\n pp[0]))\nt_m2p = 0.0038753067573975117\nQ = array([9.58591698, -3.98508133, -0.08915738])\nE = 86.71203483365545\nhkl_projection = array([-0.6235806, -0.08226367, 0.30709024])\nurc.run(beam_neutrons_path, instrument, samplexmlpath, psi, hkl2Q, pixel,\n t_m2p, Q, E, hkl_projection, Nbuffer=100000)\n", "step-5": "#!/usr/bin/env python\nimport mcvine.cli\nfrom numpy import array\nfrom mcvine_workflow.singlextal.resolution import use_res_comps as urc\nbeam_neutrons_path = '/SNS/users/p63/ORNL_public_research/MCViNE_Covmat_comparison/mcvine_resolution/beams/beam_125_1e9/out/neutrons'\ninstrument = urc.instrument('ARCS', '3.*meter', '13.6*meter', '-0.15*meter')\nsamplexmlpath = '/SNS/users/p63/ORNL_public_research/learning_from_mcvine/res_sims/Ei_125/E86.7120348337_hkl-7.62228386234,3.53360635791,-3.42342194523/sample/sampleassembly.xml'\npsi = -0.011798841097534662\nhkl2Q = array([[-0.64961065, 0.94207344, 0. ],\n [ 0.66614652, 0.4593441 , -0.80916512],\n [-0.66614652, -0.4593441 , -0.80916512]])\npp = array([-1.22433552, 2.73879582, 0.0612745 ])\npixel = urc.pixel('0.5*inch', 'meter/128', '10*atm', position=(pp[1], pp[2], pp[0]))\nt_m2p = 0.0038753067573975117\nQ = array([ 9.58591698, -3.98508133, -0.08915738])\nE = 86.712034833655451\nhkl_projection = array([-0.6235806 , -0.08226367, 0.30709024])\nurc.run(\n beam_neutrons_path, instrument, samplexmlpath, psi, hkl2Q, pixel, t_m2p,\n Q, E, hkl_projection, Nbuffer=100000)\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import BlockDeviceHandler import json import LocalMachine import os """ This module automaticly format the disk based on diskconf.json """ def module_print(text): print_text = "[ autoformat disk ] " + str(text) print(print_text) def parse_config_file_from_disk(path, confname="diskconf.json"): json_path = str(path) + "/" + str(confname) if not os.path.exists(json_path): module_print("\tPath not exists: " + str(json_path)) return None try: with open(json_path, "r") as f: data = json.load(f) module_print("config: " + str(confname) + " => " + str(data)) except Exception as e: module_print("Json parse error: " + str(e)) return None return data def write_state_config_file_from_disk(path, data, confname="diskconf.json"): json_path = str(path) + "/" + str(confname) try: if os.path.exists(json_path): module_print("\tWrite back format state to " + str(json_path)) with open(json_path, "w") as f: if str(data['is_formatted']).lower() == "false": data['is_formatted'] = "True" json.dump(data, f, indent=2) module_print("\t\tSUCCESS") else: module_print("State already set") else: module_print("diskconf not exists: " + str(json_path)) except Exception as e: module_print("\t\tFAILED") module_print("Write back format state to disk failed:" + str(e)) def save_diskconf_file(path, confname="diskconf.json"): json_path = str(path) + "/" + str(confname) save_path = "/tmp" cmd = "sudo cp {} {}".format(json_path, save_path) exitcode, stdout, stderr = LocalMachine.run_command(cmd) BlockDeviceHandler.check_exitcode(cmd, exitcode, stderr) def restore_diskconf_file(path, confname="diskconf.json"): json_path = str(path) + "/" + str(confname) save_path = "/tmp/" + str(confname) cmd = "sudo cp {} {}".format(save_path, json_path) exitcode, stdout, stderr = LocalMachine.run_command(cmd) BlockDeviceHandler.check_exitcode(cmd, exitcode, stderr) cmd = "sudo rm -f {}".format(save_path) exitcode, stdout, stderr = LocalMachine.run_command(cmd) BlockDeviceHandler.check_exitcode(cmd, exitcode, stderr) def safe_format_disk_check_force_mode(json_data, dev): dev_data_modified = False # disk is not formatted dev_data = BlockDeviceHandler.get_device_info_data(dev) if json_data['label'] != dev_data['label']: dev_data_modified = True if json_data['format'] != dev_data['filesystem']: dev_data_modified = True if str(json_data['is_formatted']).lower() == "false": if str(json_data['force']).lower() == "true" and dev_data_modified is False: module_print("[i] [format] Block device paramaters not changed but force mode is ON") return True elif dev_data_modified is True: module_print("[i] [format] Requested block device parameter(s) changed - format") return True else: module_print("[i] [Skip format] Blockdevice format not needed - label and system not changed") return False else: module_print("[i] [is_formatted:True] Blockdevice already formatted.") return False def format_device_based_on_config_file(dev, premount_path): module_print("Format device") diskconf_path = premount_path data = parse_config_file_from_disk(diskconf_path) if data is not None: if safe_format_disk_check_force_mode(data, dev): module_print("\tSave disk config file before formatting") save_diskconf_file(diskconf_path) module_print("\tUnmount device before formatting") BlockDeviceHandler.unmount_device(dev) module_print("\tFormat device") BlockDeviceHandler.format_ex4(dev, data['label']) module_print("\tMount formatted device") mount_point = BlockDeviceHandler.mount_device(dev) module_print("\tRestore config file to disk after formating") restore_diskconf_file(mount_point) module_print("\tSave back the the config file with the new state") write_state_config_file_from_disk(mount_point, data) else: module_print("\tDisk already formatted: {}:{}".format(dev, premount_path)) module_print("mount device: " + str(dev)) mount_point = BlockDeviceHandler.mount_device(dev) def prepare_block_device(): if BlockDeviceHandler.is_any_device_avaible(): module_print("Block device exists") devices = BlockDeviceHandler.list_connected_devices() for dev in devices: premount_path = BlockDeviceHandler.premount_device(dev) format_device_based_on_config_file(dev, premount_path) BlockDeviceHandler.unmount_all_premounted_devices() if __name__ == "__main__": prepare_block_device() #BlockDeviceHandler.unmount_all_devices(del_mount_point=True)
normal
{ "blob_id": "927470fe0087b17e5fe67a9b8b3cc13a40d8be1a", "index": 7554, "step-1": "<mask token>\n\n\ndef parse_config_file_from_disk(path, confname='diskconf.json'):\n json_path = str(path) + '/' + str(confname)\n if not os.path.exists(json_path):\n module_print('\\tPath not exists: ' + str(json_path))\n return None\n try:\n with open(json_path, 'r') as f:\n data = json.load(f)\n module_print('config: ' + str(confname) + ' => ' + str(data))\n except Exception as e:\n module_print('Json parse error: ' + str(e))\n return None\n return data\n\n\ndef write_state_config_file_from_disk(path, data, confname='diskconf.json'):\n json_path = str(path) + '/' + str(confname)\n try:\n if os.path.exists(json_path):\n module_print('\\tWrite back format state to ' + str(json_path))\n with open(json_path, 'w') as f:\n if str(data['is_formatted']).lower() == 'false':\n data['is_formatted'] = 'True'\n json.dump(data, f, indent=2)\n module_print('\\t\\tSUCCESS')\n else:\n module_print('State already set')\n else:\n module_print('diskconf not exists: ' + str(json_path))\n except Exception as e:\n module_print('\\t\\tFAILED')\n module_print('Write back format state to disk failed:' + str(e))\n\n\ndef save_diskconf_file(path, confname='diskconf.json'):\n json_path = str(path) + '/' + str(confname)\n save_path = '/tmp'\n cmd = 'sudo cp {} {}'.format(json_path, save_path)\n exitcode, stdout, stderr = LocalMachine.run_command(cmd)\n BlockDeviceHandler.check_exitcode(cmd, exitcode, stderr)\n\n\ndef restore_diskconf_file(path, confname='diskconf.json'):\n json_path = str(path) + '/' + str(confname)\n save_path = '/tmp/' + str(confname)\n cmd = 'sudo cp {} {}'.format(save_path, json_path)\n exitcode, stdout, stderr = LocalMachine.run_command(cmd)\n BlockDeviceHandler.check_exitcode(cmd, exitcode, stderr)\n cmd = 'sudo rm -f {}'.format(save_path)\n exitcode, stdout, stderr = LocalMachine.run_command(cmd)\n BlockDeviceHandler.check_exitcode(cmd, exitcode, stderr)\n\n\ndef safe_format_disk_check_force_mode(json_data, dev):\n dev_data_modified = False\n dev_data = BlockDeviceHandler.get_device_info_data(dev)\n if json_data['label'] != dev_data['label']:\n dev_data_modified = True\n if json_data['format'] != dev_data['filesystem']:\n dev_data_modified = True\n if str(json_data['is_formatted']).lower() == 'false':\n if str(json_data['force']).lower(\n ) == 'true' and dev_data_modified is False:\n module_print(\n '[i] [format] Block device paramaters not changed but force mode is ON'\n )\n return True\n elif dev_data_modified is True:\n module_print(\n '[i] [format] Requested block device parameter(s) changed - format'\n )\n return True\n else:\n module_print(\n '[i] [Skip format] Blockdevice format not needed - label and system not changed'\n )\n return False\n else:\n module_print('[i] [is_formatted:True] Blockdevice already formatted.')\n return False\n\n\ndef format_device_based_on_config_file(dev, premount_path):\n module_print('Format device')\n diskconf_path = premount_path\n data = parse_config_file_from_disk(diskconf_path)\n if data is not None:\n if safe_format_disk_check_force_mode(data, dev):\n module_print('\\tSave disk config file before formatting')\n save_diskconf_file(diskconf_path)\n module_print('\\tUnmount device before formatting')\n BlockDeviceHandler.unmount_device(dev)\n module_print('\\tFormat device')\n BlockDeviceHandler.format_ex4(dev, data['label'])\n module_print('\\tMount formatted device')\n mount_point = BlockDeviceHandler.mount_device(dev)\n module_print('\\tRestore config file to disk after formating')\n restore_diskconf_file(mount_point)\n module_print('\\tSave back the the config file with the new state')\n write_state_config_file_from_disk(mount_point, data)\n else:\n module_print('\\tDisk already formatted: {}:{}'.format(dev,\n premount_path))\n module_print('mount device: ' + str(dev))\n mount_point = BlockDeviceHandler.mount_device(dev)\n\n\ndef prepare_block_device():\n if BlockDeviceHandler.is_any_device_avaible():\n module_print('Block device exists')\n devices = BlockDeviceHandler.list_connected_devices()\n for dev in devices:\n premount_path = BlockDeviceHandler.premount_device(dev)\n format_device_based_on_config_file(dev, premount_path)\n BlockDeviceHandler.unmount_all_premounted_devices()\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef module_print(text):\n print_text = '[ autoformat disk ] ' + str(text)\n print(print_text)\n\n\ndef parse_config_file_from_disk(path, confname='diskconf.json'):\n json_path = str(path) + '/' + str(confname)\n if not os.path.exists(json_path):\n module_print('\\tPath not exists: ' + str(json_path))\n return None\n try:\n with open(json_path, 'r') as f:\n data = json.load(f)\n module_print('config: ' + str(confname) + ' => ' + str(data))\n except Exception as e:\n module_print('Json parse error: ' + str(e))\n return None\n return data\n\n\ndef write_state_config_file_from_disk(path, data, confname='diskconf.json'):\n json_path = str(path) + '/' + str(confname)\n try:\n if os.path.exists(json_path):\n module_print('\\tWrite back format state to ' + str(json_path))\n with open(json_path, 'w') as f:\n if str(data['is_formatted']).lower() == 'false':\n data['is_formatted'] = 'True'\n json.dump(data, f, indent=2)\n module_print('\\t\\tSUCCESS')\n else:\n module_print('State already set')\n else:\n module_print('diskconf not exists: ' + str(json_path))\n except Exception as e:\n module_print('\\t\\tFAILED')\n module_print('Write back format state to disk failed:' + str(e))\n\n\ndef save_diskconf_file(path, confname='diskconf.json'):\n json_path = str(path) + '/' + str(confname)\n save_path = '/tmp'\n cmd = 'sudo cp {} {}'.format(json_path, save_path)\n exitcode, stdout, stderr = LocalMachine.run_command(cmd)\n BlockDeviceHandler.check_exitcode(cmd, exitcode, stderr)\n\n\ndef restore_diskconf_file(path, confname='diskconf.json'):\n json_path = str(path) + '/' + str(confname)\n save_path = '/tmp/' + str(confname)\n cmd = 'sudo cp {} {}'.format(save_path, json_path)\n exitcode, stdout, stderr = LocalMachine.run_command(cmd)\n BlockDeviceHandler.check_exitcode(cmd, exitcode, stderr)\n cmd = 'sudo rm -f {}'.format(save_path)\n exitcode, stdout, stderr = LocalMachine.run_command(cmd)\n BlockDeviceHandler.check_exitcode(cmd, exitcode, stderr)\n\n\ndef safe_format_disk_check_force_mode(json_data, dev):\n dev_data_modified = False\n dev_data = BlockDeviceHandler.get_device_info_data(dev)\n if json_data['label'] != dev_data['label']:\n dev_data_modified = True\n if json_data['format'] != dev_data['filesystem']:\n dev_data_modified = True\n if str(json_data['is_formatted']).lower() == 'false':\n if str(json_data['force']).lower(\n ) == 'true' and dev_data_modified is False:\n module_print(\n '[i] [format] Block device paramaters not changed but force mode is ON'\n )\n return True\n elif dev_data_modified is True:\n module_print(\n '[i] [format] Requested block device parameter(s) changed - format'\n )\n return True\n else:\n module_print(\n '[i] [Skip format] Blockdevice format not needed - label and system not changed'\n )\n return False\n else:\n module_print('[i] [is_formatted:True] Blockdevice already formatted.')\n return False\n\n\ndef format_device_based_on_config_file(dev, premount_path):\n module_print('Format device')\n diskconf_path = premount_path\n data = parse_config_file_from_disk(diskconf_path)\n if data is not None:\n if safe_format_disk_check_force_mode(data, dev):\n module_print('\\tSave disk config file before formatting')\n save_diskconf_file(diskconf_path)\n module_print('\\tUnmount device before formatting')\n BlockDeviceHandler.unmount_device(dev)\n module_print('\\tFormat device')\n BlockDeviceHandler.format_ex4(dev, data['label'])\n module_print('\\tMount formatted device')\n mount_point = BlockDeviceHandler.mount_device(dev)\n module_print('\\tRestore config file to disk after formating')\n restore_diskconf_file(mount_point)\n module_print('\\tSave back the the config file with the new state')\n write_state_config_file_from_disk(mount_point, data)\n else:\n module_print('\\tDisk already formatted: {}:{}'.format(dev,\n premount_path))\n module_print('mount device: ' + str(dev))\n mount_point = BlockDeviceHandler.mount_device(dev)\n\n\ndef prepare_block_device():\n if BlockDeviceHandler.is_any_device_avaible():\n module_print('Block device exists')\n devices = BlockDeviceHandler.list_connected_devices()\n for dev in devices:\n premount_path = BlockDeviceHandler.premount_device(dev)\n format_device_based_on_config_file(dev, premount_path)\n BlockDeviceHandler.unmount_all_premounted_devices()\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef module_print(text):\n print_text = '[ autoformat disk ] ' + str(text)\n print(print_text)\n\n\ndef parse_config_file_from_disk(path, confname='diskconf.json'):\n json_path = str(path) + '/' + str(confname)\n if not os.path.exists(json_path):\n module_print('\\tPath not exists: ' + str(json_path))\n return None\n try:\n with open(json_path, 'r') as f:\n data = json.load(f)\n module_print('config: ' + str(confname) + ' => ' + str(data))\n except Exception as e:\n module_print('Json parse error: ' + str(e))\n return None\n return data\n\n\ndef write_state_config_file_from_disk(path, data, confname='diskconf.json'):\n json_path = str(path) + '/' + str(confname)\n try:\n if os.path.exists(json_path):\n module_print('\\tWrite back format state to ' + str(json_path))\n with open(json_path, 'w') as f:\n if str(data['is_formatted']).lower() == 'false':\n data['is_formatted'] = 'True'\n json.dump(data, f, indent=2)\n module_print('\\t\\tSUCCESS')\n else:\n module_print('State already set')\n else:\n module_print('diskconf not exists: ' + str(json_path))\n except Exception as e:\n module_print('\\t\\tFAILED')\n module_print('Write back format state to disk failed:' + str(e))\n\n\ndef save_diskconf_file(path, confname='diskconf.json'):\n json_path = str(path) + '/' + str(confname)\n save_path = '/tmp'\n cmd = 'sudo cp {} {}'.format(json_path, save_path)\n exitcode, stdout, stderr = LocalMachine.run_command(cmd)\n BlockDeviceHandler.check_exitcode(cmd, exitcode, stderr)\n\n\ndef restore_diskconf_file(path, confname='diskconf.json'):\n json_path = str(path) + '/' + str(confname)\n save_path = '/tmp/' + str(confname)\n cmd = 'sudo cp {} {}'.format(save_path, json_path)\n exitcode, stdout, stderr = LocalMachine.run_command(cmd)\n BlockDeviceHandler.check_exitcode(cmd, exitcode, stderr)\n cmd = 'sudo rm -f {}'.format(save_path)\n exitcode, stdout, stderr = LocalMachine.run_command(cmd)\n BlockDeviceHandler.check_exitcode(cmd, exitcode, stderr)\n\n\ndef safe_format_disk_check_force_mode(json_data, dev):\n dev_data_modified = False\n dev_data = BlockDeviceHandler.get_device_info_data(dev)\n if json_data['label'] != dev_data['label']:\n dev_data_modified = True\n if json_data['format'] != dev_data['filesystem']:\n dev_data_modified = True\n if str(json_data['is_formatted']).lower() == 'false':\n if str(json_data['force']).lower(\n ) == 'true' and dev_data_modified is False:\n module_print(\n '[i] [format] Block device paramaters not changed but force mode is ON'\n )\n return True\n elif dev_data_modified is True:\n module_print(\n '[i] [format] Requested block device parameter(s) changed - format'\n )\n return True\n else:\n module_print(\n '[i] [Skip format] Blockdevice format not needed - label and system not changed'\n )\n return False\n else:\n module_print('[i] [is_formatted:True] Blockdevice already formatted.')\n return False\n\n\ndef format_device_based_on_config_file(dev, premount_path):\n module_print('Format device')\n diskconf_path = premount_path\n data = parse_config_file_from_disk(diskconf_path)\n if data is not None:\n if safe_format_disk_check_force_mode(data, dev):\n module_print('\\tSave disk config file before formatting')\n save_diskconf_file(diskconf_path)\n module_print('\\tUnmount device before formatting')\n BlockDeviceHandler.unmount_device(dev)\n module_print('\\tFormat device')\n BlockDeviceHandler.format_ex4(dev, data['label'])\n module_print('\\tMount formatted device')\n mount_point = BlockDeviceHandler.mount_device(dev)\n module_print('\\tRestore config file to disk after formating')\n restore_diskconf_file(mount_point)\n module_print('\\tSave back the the config file with the new state')\n write_state_config_file_from_disk(mount_point, data)\n else:\n module_print('\\tDisk already formatted: {}:{}'.format(dev,\n premount_path))\n module_print('mount device: ' + str(dev))\n mount_point = BlockDeviceHandler.mount_device(dev)\n\n\ndef prepare_block_device():\n if BlockDeviceHandler.is_any_device_avaible():\n module_print('Block device exists')\n devices = BlockDeviceHandler.list_connected_devices()\n for dev in devices:\n premount_path = BlockDeviceHandler.premount_device(dev)\n format_device_based_on_config_file(dev, premount_path)\n BlockDeviceHandler.unmount_all_premounted_devices()\n\n\nif __name__ == '__main__':\n prepare_block_device()\n", "step-4": "import BlockDeviceHandler\nimport json\nimport LocalMachine\nimport os\n<mask token>\n\n\ndef module_print(text):\n print_text = '[ autoformat disk ] ' + str(text)\n print(print_text)\n\n\ndef parse_config_file_from_disk(path, confname='diskconf.json'):\n json_path = str(path) + '/' + str(confname)\n if not os.path.exists(json_path):\n module_print('\\tPath not exists: ' + str(json_path))\n return None\n try:\n with open(json_path, 'r') as f:\n data = json.load(f)\n module_print('config: ' + str(confname) + ' => ' + str(data))\n except Exception as e:\n module_print('Json parse error: ' + str(e))\n return None\n return data\n\n\ndef write_state_config_file_from_disk(path, data, confname='diskconf.json'):\n json_path = str(path) + '/' + str(confname)\n try:\n if os.path.exists(json_path):\n module_print('\\tWrite back format state to ' + str(json_path))\n with open(json_path, 'w') as f:\n if str(data['is_formatted']).lower() == 'false':\n data['is_formatted'] = 'True'\n json.dump(data, f, indent=2)\n module_print('\\t\\tSUCCESS')\n else:\n module_print('State already set')\n else:\n module_print('diskconf not exists: ' + str(json_path))\n except Exception as e:\n module_print('\\t\\tFAILED')\n module_print('Write back format state to disk failed:' + str(e))\n\n\ndef save_diskconf_file(path, confname='diskconf.json'):\n json_path = str(path) + '/' + str(confname)\n save_path = '/tmp'\n cmd = 'sudo cp {} {}'.format(json_path, save_path)\n exitcode, stdout, stderr = LocalMachine.run_command(cmd)\n BlockDeviceHandler.check_exitcode(cmd, exitcode, stderr)\n\n\ndef restore_diskconf_file(path, confname='diskconf.json'):\n json_path = str(path) + '/' + str(confname)\n save_path = '/tmp/' + str(confname)\n cmd = 'sudo cp {} {}'.format(save_path, json_path)\n exitcode, stdout, stderr = LocalMachine.run_command(cmd)\n BlockDeviceHandler.check_exitcode(cmd, exitcode, stderr)\n cmd = 'sudo rm -f {}'.format(save_path)\n exitcode, stdout, stderr = LocalMachine.run_command(cmd)\n BlockDeviceHandler.check_exitcode(cmd, exitcode, stderr)\n\n\ndef safe_format_disk_check_force_mode(json_data, dev):\n dev_data_modified = False\n dev_data = BlockDeviceHandler.get_device_info_data(dev)\n if json_data['label'] != dev_data['label']:\n dev_data_modified = True\n if json_data['format'] != dev_data['filesystem']:\n dev_data_modified = True\n if str(json_data['is_formatted']).lower() == 'false':\n if str(json_data['force']).lower(\n ) == 'true' and dev_data_modified is False:\n module_print(\n '[i] [format] Block device paramaters not changed but force mode is ON'\n )\n return True\n elif dev_data_modified is True:\n module_print(\n '[i] [format] Requested block device parameter(s) changed - format'\n )\n return True\n else:\n module_print(\n '[i] [Skip format] Blockdevice format not needed - label and system not changed'\n )\n return False\n else:\n module_print('[i] [is_formatted:True] Blockdevice already formatted.')\n return False\n\n\ndef format_device_based_on_config_file(dev, premount_path):\n module_print('Format device')\n diskconf_path = premount_path\n data = parse_config_file_from_disk(diskconf_path)\n if data is not None:\n if safe_format_disk_check_force_mode(data, dev):\n module_print('\\tSave disk config file before formatting')\n save_diskconf_file(diskconf_path)\n module_print('\\tUnmount device before formatting')\n BlockDeviceHandler.unmount_device(dev)\n module_print('\\tFormat device')\n BlockDeviceHandler.format_ex4(dev, data['label'])\n module_print('\\tMount formatted device')\n mount_point = BlockDeviceHandler.mount_device(dev)\n module_print('\\tRestore config file to disk after formating')\n restore_diskconf_file(mount_point)\n module_print('\\tSave back the the config file with the new state')\n write_state_config_file_from_disk(mount_point, data)\n else:\n module_print('\\tDisk already formatted: {}:{}'.format(dev,\n premount_path))\n module_print('mount device: ' + str(dev))\n mount_point = BlockDeviceHandler.mount_device(dev)\n\n\ndef prepare_block_device():\n if BlockDeviceHandler.is_any_device_avaible():\n module_print('Block device exists')\n devices = BlockDeviceHandler.list_connected_devices()\n for dev in devices:\n premount_path = BlockDeviceHandler.premount_device(dev)\n format_device_based_on_config_file(dev, premount_path)\n BlockDeviceHandler.unmount_all_premounted_devices()\n\n\nif __name__ == '__main__':\n prepare_block_device()\n", "step-5": "import BlockDeviceHandler\nimport json\nimport LocalMachine\nimport os\n\n\"\"\" This module automaticly format the disk based on diskconf.json \"\"\"\n\ndef module_print(text):\n print_text = \"[ autoformat disk ] \" + str(text)\n print(print_text)\n\ndef parse_config_file_from_disk(path, confname=\"diskconf.json\"):\n json_path = str(path) + \"/\" + str(confname)\n if not os.path.exists(json_path):\n module_print(\"\\tPath not exists: \" + str(json_path))\n return None\n try:\n with open(json_path, \"r\") as f:\n data = json.load(f)\n module_print(\"config: \" + str(confname) + \" => \" + str(data))\n except Exception as e:\n module_print(\"Json parse error: \" + str(e))\n return None\n return data\n\ndef write_state_config_file_from_disk(path, data, confname=\"diskconf.json\"):\n json_path = str(path) + \"/\" + str(confname)\n try:\n if os.path.exists(json_path):\n module_print(\"\\tWrite back format state to \" + str(json_path))\n with open(json_path, \"w\") as f:\n if str(data['is_formatted']).lower() == \"false\":\n data['is_formatted'] = \"True\"\n json.dump(data, f, indent=2)\n module_print(\"\\t\\tSUCCESS\")\n else:\n module_print(\"State already set\")\n else:\n module_print(\"diskconf not exists: \" + str(json_path))\n except Exception as e:\n module_print(\"\\t\\tFAILED\")\n module_print(\"Write back format state to disk failed:\" + str(e))\n\ndef save_diskconf_file(path, confname=\"diskconf.json\"):\n json_path = str(path) + \"/\" + str(confname)\n save_path = \"/tmp\"\n cmd = \"sudo cp {} {}\".format(json_path, save_path)\n exitcode, stdout, stderr = LocalMachine.run_command(cmd)\n BlockDeviceHandler.check_exitcode(cmd, exitcode, stderr)\n\ndef restore_diskconf_file(path, confname=\"diskconf.json\"):\n json_path = str(path) + \"/\" + str(confname)\n save_path = \"/tmp/\" + str(confname)\n cmd = \"sudo cp {} {}\".format(save_path, json_path)\n exitcode, stdout, stderr = LocalMachine.run_command(cmd)\n BlockDeviceHandler.check_exitcode(cmd, exitcode, stderr)\n cmd = \"sudo rm -f {}\".format(save_path)\n exitcode, stdout, stderr = LocalMachine.run_command(cmd)\n BlockDeviceHandler.check_exitcode(cmd, exitcode, stderr)\n\ndef safe_format_disk_check_force_mode(json_data, dev):\n dev_data_modified = False\n # disk is not formatted\n dev_data = BlockDeviceHandler.get_device_info_data(dev)\n if json_data['label'] != dev_data['label']:\n dev_data_modified = True\n if json_data['format'] != dev_data['filesystem']:\n dev_data_modified = True\n\n if str(json_data['is_formatted']).lower() == \"false\":\n if str(json_data['force']).lower() == \"true\" and dev_data_modified is False:\n module_print(\"[i] [format] Block device paramaters not changed but force mode is ON\")\n return True\n elif dev_data_modified is True:\n module_print(\"[i] [format] Requested block device parameter(s) changed - format\")\n return True\n else:\n module_print(\"[i] [Skip format] Blockdevice format not needed - label and system not changed\")\n return False\n else:\n module_print(\"[i] [is_formatted:True] Blockdevice already formatted.\")\n return False\n\ndef format_device_based_on_config_file(dev, premount_path):\n module_print(\"Format device\")\n diskconf_path = premount_path\n data = parse_config_file_from_disk(diskconf_path)\n if data is not None:\n if safe_format_disk_check_force_mode(data, dev):\n module_print(\"\\tSave disk config file before formatting\")\n save_diskconf_file(diskconf_path)\n module_print(\"\\tUnmount device before formatting\")\n BlockDeviceHandler.unmount_device(dev)\n module_print(\"\\tFormat device\")\n BlockDeviceHandler.format_ex4(dev, data['label'])\n module_print(\"\\tMount formatted device\")\n mount_point = BlockDeviceHandler.mount_device(dev)\n module_print(\"\\tRestore config file to disk after formating\")\n restore_diskconf_file(mount_point)\n module_print(\"\\tSave back the the config file with the new state\")\n write_state_config_file_from_disk(mount_point, data)\n else:\n module_print(\"\\tDisk already formatted: {}:{}\".format(dev, premount_path))\n module_print(\"mount device: \" + str(dev))\n mount_point = BlockDeviceHandler.mount_device(dev)\n\ndef prepare_block_device():\n if BlockDeviceHandler.is_any_device_avaible():\n module_print(\"Block device exists\")\n devices = BlockDeviceHandler.list_connected_devices()\n for dev in devices:\n premount_path = BlockDeviceHandler.premount_device(dev)\n format_device_based_on_config_file(dev, premount_path)\n BlockDeviceHandler.unmount_all_premounted_devices()\n\nif __name__ == \"__main__\":\n prepare_block_device()\n #BlockDeviceHandler.unmount_all_devices(del_mount_point=True)\n", "step-ids": [ 7, 8, 9, 10, 11 ] }
[ 7, 8, 9, 10, 11 ]
# Generated by Django 3.2.3 on 2021-07-02 08:18 from django.db import migrations, models import django.utils.timezone class Migration(migrations.Migration): dependencies = [ ('khovan', '0003_nhapkho'), ] operations = [ migrations.AddField( model_name='phieunhaphang', name='xulykho', field=models.BooleanField(default=False, verbose_name='Xu Ly Kho'), preserve_default=False, ), ]
normal
{ "blob_id": "016255d74ccf4ac547e4b212d33bb9a39295c830", "index": 2715, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('khovan', '0003_nhapkho')]\n operations = [migrations.AddField(model_name='phieunhaphang', name=\n 'xulykho', field=models.BooleanField(default=False, verbose_name=\n 'Xu Ly Kho'), preserve_default=False)]\n", "step-4": "from django.db import migrations, models\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n dependencies = [('khovan', '0003_nhapkho')]\n operations = [migrations.AddField(model_name='phieunhaphang', name=\n 'xulykho', field=models.BooleanField(default=False, verbose_name=\n 'Xu Ly Kho'), preserve_default=False)]\n", "step-5": "# Generated by Django 3.2.3 on 2021-07-02 08:18\n\nfrom django.db import migrations, models\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('khovan', '0003_nhapkho'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='phieunhaphang',\n name='xulykho',\n field=models.BooleanField(default=False, verbose_name='Xu Ly Kho'),\n preserve_default=False,\n ),\n ]\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
''' EXERCICIO: Faça um programa que leia quantidade de pessoas que serão convidadas para uma festa. O programa irá perguntar o nome de todas as pessoas e colcar num lista de convidados. Após isso deve imprimir todos os nomes da lista ''' ''' qtd = int(input("Quantas pessoas vão ser convidadas?")) lista_pessoas = [] while qtd > 0: lista_pessoas.append(input('Nome: ')) qtd -= 1 for pessoa in lista_pessoas: print(pessoa) ''' # Resolução do exercício print('Programinha de controle de festinhas 1.0') print('#' * 20) numero_de_convidados = int(input('Coloque o número de convidados: ')) lista_de_convidados =[] i = 1 while i <= numero_de_convidados: nome_do_convidado = input('Coloque o nome do convidado #' + str(i) + ': ') lista_de_convidados.append(nome_do_convidado) i += 1 print('Serão ', numero_de_convidados, 'convidados') print('\nLISTA DE CONVIDADOS') for convidado in lista_de_convidados: print(convidado)
normal
{ "blob_id": "426a8fb6d1adf5d4577d299083ce047c919dda67", "index": 3525, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint('Programinha de controle de festinhas 1.0')\nprint('#' * 20)\n<mask token>\nwhile i <= numero_de_convidados:\n nome_do_convidado = input('Coloque o nome do convidado #' + str(i) + ': ')\n lista_de_convidados.append(nome_do_convidado)\n i += 1\nprint('Serão ', numero_de_convidados, 'convidados')\nprint(\"\"\"\nLISTA DE CONVIDADOS\"\"\")\nfor convidado in lista_de_convidados:\n print(convidado)\n", "step-3": "<mask token>\nprint('Programinha de controle de festinhas 1.0')\nprint('#' * 20)\nnumero_de_convidados = int(input('Coloque o número de convidados: '))\nlista_de_convidados = []\ni = 1\nwhile i <= numero_de_convidados:\n nome_do_convidado = input('Coloque o nome do convidado #' + str(i) + ': ')\n lista_de_convidados.append(nome_do_convidado)\n i += 1\nprint('Serão ', numero_de_convidados, 'convidados')\nprint(\"\"\"\nLISTA DE CONVIDADOS\"\"\")\nfor convidado in lista_de_convidados:\n print(convidado)\n", "step-4": "'''\n EXERCICIO: Faça um programa que leia quantidade de pessoas que serão convidadas para uma festa.\n O programa irá perguntar o nome de todas as pessoas e colcar num lista de convidados.\n Após isso deve imprimir todos os nomes da lista\n'''\n\n'''\nqtd = int(input(\"Quantas pessoas vão ser convidadas?\"))\n\nlista_pessoas = []\n\nwhile qtd > 0:\n lista_pessoas.append(input('Nome: '))\n qtd -= 1\n\nfor pessoa in lista_pessoas:\n print(pessoa)\n'''\n\n# Resolução do exercício\nprint('Programinha de controle de festinhas 1.0')\nprint('#' * 20)\n\nnumero_de_convidados = int(input('Coloque o número de convidados: '))\nlista_de_convidados =[]\n\ni = 1\nwhile i <= numero_de_convidados:\n nome_do_convidado = input('Coloque o nome do convidado #' + str(i) + ': ')\n lista_de_convidados.append(nome_do_convidado)\n i += 1\n\nprint('Serão ', numero_de_convidados, 'convidados')\nprint('\\nLISTA DE CONVIDADOS')\n\nfor convidado in lista_de_convidados:\n print(convidado)\n\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
import pytest from freezegun import freeze_time from datetime import datetime from khayyam import JalaliDatetime, TehranTimezone from dilami_calendar import DilamiDatetime, dilami_to_jalali def test_dilami_date(): gdate = datetime(2018, 2, 1) ddate = DilamiDatetime(gdate, tzinfo=TehranTimezone) assert ddate.year == 1591 assert ddate.month == 6 assert ddate.day == 28 ddate = DilamiDatetime(1591, 6, 28, tzinfo=TehranTimezone) assert ddate ddate = DilamiDatetime(1592, 5, 1, tzinfo=TehranTimezone) dilami_date = DilamiDatetime(ddate) assert dilami_date # Check Dilami date return today ddate = DilamiDatetime().now() jy, jm, jd = dilami_to_jalali(ddate.year, ddate.month, ddate.day) today = JalaliDatetime.now(TehranTimezone()) assert today.year == jy assert today.month == jm assert today.day == jd with freeze_time(datetime.now()): dilami_now = DilamiDatetime(datetime.now()).to_datetime() assert dilami_now.time() == datetime.now().time() now = datetime.now() dilami_date = DilamiDatetime(now) assert dilami_date.to_date() == now.date() def test_limits(): # Test MinYear and MaxYear with pytest.raises(ValueError): DilamiDatetime(194, 1, 1) with pytest.raises(ValueError): DilamiDatetime(3373, 1, 1) # Test months with pytest.raises(ValueError): DilamiDatetime(1592, -1, 3) with pytest.raises(ValueError): DilamiDatetime(1592, 13, 1) # Test days with pytest.raises(ValueError): DilamiDatetime(1592, 1, 32) with pytest.raises(ValueError): DilamiDatetime(1592, 1, -1) # Test days of leap year with pytest.raises(ValueError): DilamiDatetime(1595, 0, 0) with pytest.raises(ValueError): DilamiDatetime(1593, 0, 6)
normal
{ "blob_id": "7997efb00f24ecc5c4fbf3ca049eca6b5b178d53", "index": 4088, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef test_dilami_date():\n gdate = datetime(2018, 2, 1)\n ddate = DilamiDatetime(gdate, tzinfo=TehranTimezone)\n assert ddate.year == 1591\n assert ddate.month == 6\n assert ddate.day == 28\n ddate = DilamiDatetime(1591, 6, 28, tzinfo=TehranTimezone)\n assert ddate\n ddate = DilamiDatetime(1592, 5, 1, tzinfo=TehranTimezone)\n dilami_date = DilamiDatetime(ddate)\n assert dilami_date\n ddate = DilamiDatetime().now()\n jy, jm, jd = dilami_to_jalali(ddate.year, ddate.month, ddate.day)\n today = JalaliDatetime.now(TehranTimezone())\n assert today.year == jy\n assert today.month == jm\n assert today.day == jd\n with freeze_time(datetime.now()):\n dilami_now = DilamiDatetime(datetime.now()).to_datetime()\n assert dilami_now.time() == datetime.now().time()\n now = datetime.now()\n dilami_date = DilamiDatetime(now)\n assert dilami_date.to_date() == now.date()\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef test_dilami_date():\n gdate = datetime(2018, 2, 1)\n ddate = DilamiDatetime(gdate, tzinfo=TehranTimezone)\n assert ddate.year == 1591\n assert ddate.month == 6\n assert ddate.day == 28\n ddate = DilamiDatetime(1591, 6, 28, tzinfo=TehranTimezone)\n assert ddate\n ddate = DilamiDatetime(1592, 5, 1, tzinfo=TehranTimezone)\n dilami_date = DilamiDatetime(ddate)\n assert dilami_date\n ddate = DilamiDatetime().now()\n jy, jm, jd = dilami_to_jalali(ddate.year, ddate.month, ddate.day)\n today = JalaliDatetime.now(TehranTimezone())\n assert today.year == jy\n assert today.month == jm\n assert today.day == jd\n with freeze_time(datetime.now()):\n dilami_now = DilamiDatetime(datetime.now()).to_datetime()\n assert dilami_now.time() == datetime.now().time()\n now = datetime.now()\n dilami_date = DilamiDatetime(now)\n assert dilami_date.to_date() == now.date()\n\n\ndef test_limits():\n with pytest.raises(ValueError):\n DilamiDatetime(194, 1, 1)\n with pytest.raises(ValueError):\n DilamiDatetime(3373, 1, 1)\n with pytest.raises(ValueError):\n DilamiDatetime(1592, -1, 3)\n with pytest.raises(ValueError):\n DilamiDatetime(1592, 13, 1)\n with pytest.raises(ValueError):\n DilamiDatetime(1592, 1, 32)\n with pytest.raises(ValueError):\n DilamiDatetime(1592, 1, -1)\n with pytest.raises(ValueError):\n DilamiDatetime(1595, 0, 0)\n with pytest.raises(ValueError):\n DilamiDatetime(1593, 0, 6)\n", "step-4": "import pytest\nfrom freezegun import freeze_time\nfrom datetime import datetime\nfrom khayyam import JalaliDatetime, TehranTimezone\nfrom dilami_calendar import DilamiDatetime, dilami_to_jalali\n\n\ndef test_dilami_date():\n gdate = datetime(2018, 2, 1)\n ddate = DilamiDatetime(gdate, tzinfo=TehranTimezone)\n assert ddate.year == 1591\n assert ddate.month == 6\n assert ddate.day == 28\n ddate = DilamiDatetime(1591, 6, 28, tzinfo=TehranTimezone)\n assert ddate\n ddate = DilamiDatetime(1592, 5, 1, tzinfo=TehranTimezone)\n dilami_date = DilamiDatetime(ddate)\n assert dilami_date\n ddate = DilamiDatetime().now()\n jy, jm, jd = dilami_to_jalali(ddate.year, ddate.month, ddate.day)\n today = JalaliDatetime.now(TehranTimezone())\n assert today.year == jy\n assert today.month == jm\n assert today.day == jd\n with freeze_time(datetime.now()):\n dilami_now = DilamiDatetime(datetime.now()).to_datetime()\n assert dilami_now.time() == datetime.now().time()\n now = datetime.now()\n dilami_date = DilamiDatetime(now)\n assert dilami_date.to_date() == now.date()\n\n\ndef test_limits():\n with pytest.raises(ValueError):\n DilamiDatetime(194, 1, 1)\n with pytest.raises(ValueError):\n DilamiDatetime(3373, 1, 1)\n with pytest.raises(ValueError):\n DilamiDatetime(1592, -1, 3)\n with pytest.raises(ValueError):\n DilamiDatetime(1592, 13, 1)\n with pytest.raises(ValueError):\n DilamiDatetime(1592, 1, 32)\n with pytest.raises(ValueError):\n DilamiDatetime(1592, 1, -1)\n with pytest.raises(ValueError):\n DilamiDatetime(1595, 0, 0)\n with pytest.raises(ValueError):\n DilamiDatetime(1593, 0, 6)\n", "step-5": "import pytest\n\nfrom freezegun import freeze_time\nfrom datetime import datetime\nfrom khayyam import JalaliDatetime, TehranTimezone\n\nfrom dilami_calendar import DilamiDatetime, dilami_to_jalali\n\n\ndef test_dilami_date():\n gdate = datetime(2018, 2, 1)\n ddate = DilamiDatetime(gdate, tzinfo=TehranTimezone)\n\n assert ddate.year == 1591\n assert ddate.month == 6\n assert ddate.day == 28\n\n ddate = DilamiDatetime(1591, 6, 28, tzinfo=TehranTimezone)\n assert ddate\n\n ddate = DilamiDatetime(1592, 5, 1, tzinfo=TehranTimezone)\n dilami_date = DilamiDatetime(ddate)\n assert dilami_date\n\n # Check Dilami date return today\n ddate = DilamiDatetime().now()\n jy, jm, jd = dilami_to_jalali(ddate.year, ddate.month, ddate.day)\n\n today = JalaliDatetime.now(TehranTimezone())\n assert today.year == jy\n assert today.month == jm\n assert today.day == jd\n\n with freeze_time(datetime.now()):\n dilami_now = DilamiDatetime(datetime.now()).to_datetime()\n assert dilami_now.time() == datetime.now().time()\n\n now = datetime.now()\n dilami_date = DilamiDatetime(now)\n assert dilami_date.to_date() == now.date()\n\n\ndef test_limits():\n # Test MinYear and MaxYear\n with pytest.raises(ValueError):\n DilamiDatetime(194, 1, 1)\n with pytest.raises(ValueError):\n DilamiDatetime(3373, 1, 1)\n\n # Test months\n with pytest.raises(ValueError):\n DilamiDatetime(1592, -1, 3)\n\n with pytest.raises(ValueError):\n DilamiDatetime(1592, 13, 1)\n\n # Test days\n with pytest.raises(ValueError):\n DilamiDatetime(1592, 1, 32)\n with pytest.raises(ValueError):\n DilamiDatetime(1592, 1, -1)\n\n # Test days of leap year\n with pytest.raises(ValueError):\n DilamiDatetime(1595, 0, 0)\n\n with pytest.raises(ValueError):\n DilamiDatetime(1593, 0, 6)\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import requests import time while 1: r = requests.put("http://localhost:3000/api/4", data={"temperature": 24, "led": 1}) print r.text time.sleep(1)
normal
{ "blob_id": "23a560c5f5553fc32329121ea47f8a7ae1196889", "index": 440, "step-1": "import requests\nimport time\n\nwhile 1:\n r = requests.put(\"http://localhost:3000/api/4\", data={\"temperature\": 24, \"led\": 1})\n print r.text\n time.sleep(1)", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
from qcg.appscheduler.errors import * class Node: def __init__(self, name=None, totalCores=0, used=0): self.__name = name self.__totalCores = totalCores self.__usedCores = used self.resources = None def __getName(self): return self.__name def __getTotalCores(self): return self.__totalCores def __setTotalCores(self, total): assert total >= 0 and total >= self.__usedCores self.__totalCores = total def __getUsedCores(self): return self.__usedCores def __setUsedCores(self, used): assert used > 0 and used <= self.__totalCores self.__usedCores = used def __getFreeCores(self): return self.__totalCores - self.__usedCores def __str__(self): return "%s %d (%d used)" % (self.__name, self.__totalCores, self.__usedCores) """ Allocate maximum number of cores on a node. Args: cores (int): maximum number of cores to allocate Returns: int: number of allocated cores """ def allocate(self, cores): allocated = min(cores, self.free) self.__usedCores += allocated if self.resources is not None: self.resources.nodeCoresAllocated(allocated) return allocated """ Release specified number of cores on a node. Args: cores (int): number of cores to release Raises: InvalidResourceSpec: when number of cores to release exceeds number of of used cores. """ def release(self, cores): if cores > self.__usedCores: raise InvalidResourceSpec() self.__usedCores -= cores if self.resources is not None: self.resources.nodeCoresReleased(cores) name = property(__getName, None, None, "name of the node") total = property(__getTotalCores, __setTotalCores, None, "total number of cores") used = property(__getUsedCores, __setUsedCores, None, "number of allocated cores") free = property(__getFreeCores, None, None, "number of available cores") class Resources: def __init__(self, nodes=None): self.__nodes = nodes if self.__nodes is None: self.__nodes = [] for node in self.__nodes: node.resources = self self.__totalCores = 0 self.__usedCores = 0 # print "initializing %d nodes" % len(nodes) self.__computeCores() def __computeCores(self): total, used = 0, 0 for node in self.__nodes: total += node.total used += node.used self.__totalCores = total self.__usedCores = used def __getNodes(self): return self.__nodes def __getTotalCores(self): return self.__totalCores def __getUsedCores(self): return self.__usedCores def __getFreeCores(self): return self.__totalCores - self.__usedCores """ Function called by the node when some cores has been allocated. This function should track number of used cores in Resources statistics. Args: cores (int): number of allocated cores """ def nodeCoresAllocated(self, cores): self.__usedCores += cores """ Function called by the node when some cores has been released. This function should track number of used cores in Resources statistics. Args: cores (int): number of released cores """ def nodeCoresReleased(self, cores): self.__usedCores -= cores """ Relase allocated resources. Args: alloc (Allocation): allocation to release Raises: InvalidResourceSpec: when number of cores to release on a node is greater than number of used cores. """ def releaseAllocation(self, alloc): for node in alloc.nodeAllocations: node.node.release(node.cores) def __str__(self): header = '%d (%d used) cores on %d nodes\n' % (self.__totalCores, self.__usedCores, \ len(self.__nodes)) return header + '\n'.join([str(node) for node in self.__nodes]) # if self.__nodes: # for node in self.__nodes: # result.join("\n%s" % node) # return result def nNodes(self): return len(self.__nodes) nodes = property(__getNodes, None, None, "list of a nodes") totalNodes = property(nNodes, None, None, "total number of nodes") totalCores = property(__getTotalCores, None, None, "total number of cores") usedCores = property(__getUsedCores, None, None, "used number of cores") freeCores = property(__getFreeCores, None, None, "free number of cores")
normal
{ "blob_id": "23a7aa6b9a98bfd4fd43fea1ecfa26cb44969804", "index": 8061, "step-1": "<mask token>\n\n\nclass Node:\n <mask token>\n\n def __getName(self):\n return self.__name\n\n def __getTotalCores(self):\n return self.__totalCores\n\n def __setTotalCores(self, total):\n assert total >= 0 and total >= self.__usedCores\n self.__totalCores = total\n\n def __getUsedCores(self):\n return self.__usedCores\n <mask token>\n\n def __getFreeCores(self):\n return self.__totalCores - self.__usedCores\n <mask token>\n <mask token>\n\n def allocate(self, cores):\n allocated = min(cores, self.free)\n self.__usedCores += allocated\n if self.resources is not None:\n self.resources.nodeCoresAllocated(allocated)\n return allocated\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Resources:\n\n def __init__(self, nodes=None):\n self.__nodes = nodes\n if self.__nodes is None:\n self.__nodes = []\n for node in self.__nodes:\n node.resources = self\n self.__totalCores = 0\n self.__usedCores = 0\n self.__computeCores()\n\n def __computeCores(self):\n total, used = 0, 0\n for node in self.__nodes:\n total += node.total\n used += node.used\n self.__totalCores = total\n self.__usedCores = used\n\n def __getNodes(self):\n return self.__nodes\n\n def __getTotalCores(self):\n return self.__totalCores\n\n def __getUsedCores(self):\n return self.__usedCores\n\n def __getFreeCores(self):\n return self.__totalCores - self.__usedCores\n \"\"\"\n Function called by the node when some cores has been allocated.\n This function should track number of used cores in Resources statistics.\n\n Args:\n cores (int): number of allocated cores\n \"\"\"\n\n def nodeCoresAllocated(self, cores):\n self.__usedCores += cores\n \"\"\"\n Function called by the node when some cores has been released.\n This function should track number of used cores in Resources statistics.\n\n Args:\n cores (int): number of released cores\n \"\"\"\n\n def nodeCoresReleased(self, cores):\n self.__usedCores -= cores\n \"\"\"\n Relase allocated resources.\n\n Args:\n alloc (Allocation): allocation to release\n\n Raises:\n InvalidResourceSpec: when number of cores to release on a node is greater \n than number of used cores.\n \"\"\"\n\n def releaseAllocation(self, alloc):\n for node in alloc.nodeAllocations:\n node.node.release(node.cores)\n\n def __str__(self):\n header = '%d (%d used) cores on %d nodes\\n' % (self.__totalCores,\n self.__usedCores, len(self.__nodes))\n return header + '\\n'.join([str(node) for node in self.__nodes])\n\n def nNodes(self):\n return len(self.__nodes)\n nodes = property(__getNodes, None, None, 'list of a nodes')\n totalNodes = property(nNodes, None, None, 'total number of nodes')\n totalCores = property(__getTotalCores, None, None, 'total number of cores')\n usedCores = property(__getUsedCores, None, None, 'used number of cores')\n freeCores = property(__getFreeCores, None, None, 'free number of cores')\n", "step-2": "<mask token>\n\n\nclass Node:\n <mask token>\n\n def __getName(self):\n return self.__name\n\n def __getTotalCores(self):\n return self.__totalCores\n\n def __setTotalCores(self, total):\n assert total >= 0 and total >= self.__usedCores\n self.__totalCores = total\n\n def __getUsedCores(self):\n return self.__usedCores\n\n def __setUsedCores(self, used):\n assert used > 0 and used <= self.__totalCores\n self.__usedCores = used\n\n def __getFreeCores(self):\n return self.__totalCores - self.__usedCores\n\n def __str__(self):\n return '%s %d (%d used)' % (self.__name, self.__totalCores, self.\n __usedCores)\n <mask token>\n\n def allocate(self, cores):\n allocated = min(cores, self.free)\n self.__usedCores += allocated\n if self.resources is not None:\n self.resources.nodeCoresAllocated(allocated)\n return allocated\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Resources:\n\n def __init__(self, nodes=None):\n self.__nodes = nodes\n if self.__nodes is None:\n self.__nodes = []\n for node in self.__nodes:\n node.resources = self\n self.__totalCores = 0\n self.__usedCores = 0\n self.__computeCores()\n\n def __computeCores(self):\n total, used = 0, 0\n for node in self.__nodes:\n total += node.total\n used += node.used\n self.__totalCores = total\n self.__usedCores = used\n\n def __getNodes(self):\n return self.__nodes\n\n def __getTotalCores(self):\n return self.__totalCores\n\n def __getUsedCores(self):\n return self.__usedCores\n\n def __getFreeCores(self):\n return self.__totalCores - self.__usedCores\n \"\"\"\n Function called by the node when some cores has been allocated.\n This function should track number of used cores in Resources statistics.\n\n Args:\n cores (int): number of allocated cores\n \"\"\"\n\n def nodeCoresAllocated(self, cores):\n self.__usedCores += cores\n \"\"\"\n Function called by the node when some cores has been released.\n This function should track number of used cores in Resources statistics.\n\n Args:\n cores (int): number of released cores\n \"\"\"\n\n def nodeCoresReleased(self, cores):\n self.__usedCores -= cores\n \"\"\"\n Relase allocated resources.\n\n Args:\n alloc (Allocation): allocation to release\n\n Raises:\n InvalidResourceSpec: when number of cores to release on a node is greater \n than number of used cores.\n \"\"\"\n\n def releaseAllocation(self, alloc):\n for node in alloc.nodeAllocations:\n node.node.release(node.cores)\n\n def __str__(self):\n header = '%d (%d used) cores on %d nodes\\n' % (self.__totalCores,\n self.__usedCores, len(self.__nodes))\n return header + '\\n'.join([str(node) for node in self.__nodes])\n\n def nNodes(self):\n return len(self.__nodes)\n nodes = property(__getNodes, None, None, 'list of a nodes')\n totalNodes = property(nNodes, None, None, 'total number of nodes')\n totalCores = property(__getTotalCores, None, None, 'total number of cores')\n usedCores = property(__getUsedCores, None, None, 'used number of cores')\n freeCores = property(__getFreeCores, None, None, 'free number of cores')\n", "step-3": "<mask token>\n\n\nclass Node:\n\n def __init__(self, name=None, totalCores=0, used=0):\n self.__name = name\n self.__totalCores = totalCores\n self.__usedCores = used\n self.resources = None\n\n def __getName(self):\n return self.__name\n\n def __getTotalCores(self):\n return self.__totalCores\n\n def __setTotalCores(self, total):\n assert total >= 0 and total >= self.__usedCores\n self.__totalCores = total\n\n def __getUsedCores(self):\n return self.__usedCores\n\n def __setUsedCores(self, used):\n assert used > 0 and used <= self.__totalCores\n self.__usedCores = used\n\n def __getFreeCores(self):\n return self.__totalCores - self.__usedCores\n\n def __str__(self):\n return '%s %d (%d used)' % (self.__name, self.__totalCores, self.\n __usedCores)\n <mask token>\n\n def allocate(self, cores):\n allocated = min(cores, self.free)\n self.__usedCores += allocated\n if self.resources is not None:\n self.resources.nodeCoresAllocated(allocated)\n return allocated\n <mask token>\n\n def release(self, cores):\n if cores > self.__usedCores:\n raise InvalidResourceSpec()\n self.__usedCores -= cores\n if self.resources is not None:\n self.resources.nodeCoresReleased(cores)\n name = property(__getName, None, None, 'name of the node')\n total = property(__getTotalCores, __setTotalCores, None,\n 'total number of cores')\n used = property(__getUsedCores, __setUsedCores, None,\n 'number of allocated cores')\n free = property(__getFreeCores, None, None, 'number of available cores')\n\n\nclass Resources:\n\n def __init__(self, nodes=None):\n self.__nodes = nodes\n if self.__nodes is None:\n self.__nodes = []\n for node in self.__nodes:\n node.resources = self\n self.__totalCores = 0\n self.__usedCores = 0\n self.__computeCores()\n\n def __computeCores(self):\n total, used = 0, 0\n for node in self.__nodes:\n total += node.total\n used += node.used\n self.__totalCores = total\n self.__usedCores = used\n\n def __getNodes(self):\n return self.__nodes\n\n def __getTotalCores(self):\n return self.__totalCores\n\n def __getUsedCores(self):\n return self.__usedCores\n\n def __getFreeCores(self):\n return self.__totalCores - self.__usedCores\n \"\"\"\n Function called by the node when some cores has been allocated.\n This function should track number of used cores in Resources statistics.\n\n Args:\n cores (int): number of allocated cores\n \"\"\"\n\n def nodeCoresAllocated(self, cores):\n self.__usedCores += cores\n \"\"\"\n Function called by the node when some cores has been released.\n This function should track number of used cores in Resources statistics.\n\n Args:\n cores (int): number of released cores\n \"\"\"\n\n def nodeCoresReleased(self, cores):\n self.__usedCores -= cores\n \"\"\"\n Relase allocated resources.\n\n Args:\n alloc (Allocation): allocation to release\n\n Raises:\n InvalidResourceSpec: when number of cores to release on a node is greater \n than number of used cores.\n \"\"\"\n\n def releaseAllocation(self, alloc):\n for node in alloc.nodeAllocations:\n node.node.release(node.cores)\n\n def __str__(self):\n header = '%d (%d used) cores on %d nodes\\n' % (self.__totalCores,\n self.__usedCores, len(self.__nodes))\n return header + '\\n'.join([str(node) for node in self.__nodes])\n\n def nNodes(self):\n return len(self.__nodes)\n nodes = property(__getNodes, None, None, 'list of a nodes')\n totalNodes = property(nNodes, None, None, 'total number of nodes')\n totalCores = property(__getTotalCores, None, None, 'total number of cores')\n usedCores = property(__getUsedCores, None, None, 'used number of cores')\n freeCores = property(__getFreeCores, None, None, 'free number of cores')\n", "step-4": "from qcg.appscheduler.errors import *\n\n\nclass Node:\n\n def __init__(self, name=None, totalCores=0, used=0):\n self.__name = name\n self.__totalCores = totalCores\n self.__usedCores = used\n self.resources = None\n\n def __getName(self):\n return self.__name\n\n def __getTotalCores(self):\n return self.__totalCores\n\n def __setTotalCores(self, total):\n assert total >= 0 and total >= self.__usedCores\n self.__totalCores = total\n\n def __getUsedCores(self):\n return self.__usedCores\n\n def __setUsedCores(self, used):\n assert used > 0 and used <= self.__totalCores\n self.__usedCores = used\n\n def __getFreeCores(self):\n return self.__totalCores - self.__usedCores\n\n def __str__(self):\n return '%s %d (%d used)' % (self.__name, self.__totalCores, self.\n __usedCores)\n \"\"\"\n Allocate maximum number of cores on a node.\n\n Args:\n cores (int): maximum number of cores to allocate\n\n Returns:\n int: number of allocated cores\n \"\"\"\n\n def allocate(self, cores):\n allocated = min(cores, self.free)\n self.__usedCores += allocated\n if self.resources is not None:\n self.resources.nodeCoresAllocated(allocated)\n return allocated\n \"\"\"\n Release specified number of cores on a node.\n\n Args:\n cores (int): number of cores to release\n\n Raises:\n InvalidResourceSpec: when number of cores to release exceeds number of of\n used cores.\n \"\"\"\n\n def release(self, cores):\n if cores > self.__usedCores:\n raise InvalidResourceSpec()\n self.__usedCores -= cores\n if self.resources is not None:\n self.resources.nodeCoresReleased(cores)\n name = property(__getName, None, None, 'name of the node')\n total = property(__getTotalCores, __setTotalCores, None,\n 'total number of cores')\n used = property(__getUsedCores, __setUsedCores, None,\n 'number of allocated cores')\n free = property(__getFreeCores, None, None, 'number of available cores')\n\n\nclass Resources:\n\n def __init__(self, nodes=None):\n self.__nodes = nodes\n if self.__nodes is None:\n self.__nodes = []\n for node in self.__nodes:\n node.resources = self\n self.__totalCores = 0\n self.__usedCores = 0\n self.__computeCores()\n\n def __computeCores(self):\n total, used = 0, 0\n for node in self.__nodes:\n total += node.total\n used += node.used\n self.__totalCores = total\n self.__usedCores = used\n\n def __getNodes(self):\n return self.__nodes\n\n def __getTotalCores(self):\n return self.__totalCores\n\n def __getUsedCores(self):\n return self.__usedCores\n\n def __getFreeCores(self):\n return self.__totalCores - self.__usedCores\n \"\"\"\n Function called by the node when some cores has been allocated.\n This function should track number of used cores in Resources statistics.\n\n Args:\n cores (int): number of allocated cores\n \"\"\"\n\n def nodeCoresAllocated(self, cores):\n self.__usedCores += cores\n \"\"\"\n Function called by the node when some cores has been released.\n This function should track number of used cores in Resources statistics.\n\n Args:\n cores (int): number of released cores\n \"\"\"\n\n def nodeCoresReleased(self, cores):\n self.__usedCores -= cores\n \"\"\"\n Relase allocated resources.\n\n Args:\n alloc (Allocation): allocation to release\n\n Raises:\n InvalidResourceSpec: when number of cores to release on a node is greater \n than number of used cores.\n \"\"\"\n\n def releaseAllocation(self, alloc):\n for node in alloc.nodeAllocations:\n node.node.release(node.cores)\n\n def __str__(self):\n header = '%d (%d used) cores on %d nodes\\n' % (self.__totalCores,\n self.__usedCores, len(self.__nodes))\n return header + '\\n'.join([str(node) for node in self.__nodes])\n\n def nNodes(self):\n return len(self.__nodes)\n nodes = property(__getNodes, None, None, 'list of a nodes')\n totalNodes = property(nNodes, None, None, 'total number of nodes')\n totalCores = property(__getTotalCores, None, None, 'total number of cores')\n usedCores = property(__getUsedCores, None, None, 'used number of cores')\n freeCores = property(__getFreeCores, None, None, 'free number of cores')\n", "step-5": "from qcg.appscheduler.errors import *\n\n\nclass Node:\n def __init__(self, name=None, totalCores=0, used=0):\n self.__name = name\n self.__totalCores = totalCores\n self.__usedCores = used\n self.resources = None\n\n def __getName(self):\n return self.__name\n\n def __getTotalCores(self):\n return self.__totalCores\n\n def __setTotalCores(self, total):\n assert total >= 0 and total >= self.__usedCores\n self.__totalCores = total\n\n def __getUsedCores(self):\n return self.__usedCores\n\n def __setUsedCores(self, used):\n assert used > 0 and used <= self.__totalCores\n self.__usedCores = used\n\n def __getFreeCores(self):\n return self.__totalCores - self.__usedCores\n\n def __str__(self):\n return \"%s %d (%d used)\" % (self.__name, self.__totalCores, self.__usedCores)\n\n \"\"\"\n Allocate maximum number of cores on a node.\n\n Args:\n cores (int): maximum number of cores to allocate\n\n Returns:\n int: number of allocated cores\n \"\"\"\n\n def allocate(self, cores):\n allocated = min(cores, self.free)\n self.__usedCores += allocated\n\n if self.resources is not None:\n self.resources.nodeCoresAllocated(allocated)\n\n return allocated\n\n \"\"\"\n Release specified number of cores on a node.\n\n Args:\n cores (int): number of cores to release\n\n Raises:\n InvalidResourceSpec: when number of cores to release exceeds number of of\n used cores.\n \"\"\"\n\n def release(self, cores):\n if cores > self.__usedCores:\n raise InvalidResourceSpec()\n\n self.__usedCores -= cores\n\n if self.resources is not None:\n self.resources.nodeCoresReleased(cores)\n\n name = property(__getName, None, None, \"name of the node\")\n total = property(__getTotalCores, __setTotalCores, None, \"total number of cores\")\n used = property(__getUsedCores, __setUsedCores, None, \"number of allocated cores\")\n free = property(__getFreeCores, None, None, \"number of available cores\")\n\n\nclass Resources:\n\n def __init__(self, nodes=None):\n self.__nodes = nodes\n if self.__nodes is None:\n self.__nodes = []\n\n for node in self.__nodes:\n node.resources = self\n\n self.__totalCores = 0\n self.__usedCores = 0\n\n #\t\tprint \"initializing %d nodes\" % len(nodes)\n self.__computeCores()\n\n def __computeCores(self):\n total, used = 0, 0\n for node in self.__nodes:\n total += node.total\n used += node.used\n\n self.__totalCores = total\n self.__usedCores = used\n\n def __getNodes(self):\n return self.__nodes\n\n def __getTotalCores(self):\n return self.__totalCores\n\n def __getUsedCores(self):\n return self.__usedCores\n\n def __getFreeCores(self):\n return self.__totalCores - self.__usedCores\n\n \"\"\"\n Function called by the node when some cores has been allocated.\n This function should track number of used cores in Resources statistics.\n\n Args:\n cores (int): number of allocated cores\n \"\"\"\n\n def nodeCoresAllocated(self, cores):\n self.__usedCores += cores\n\n \"\"\"\n Function called by the node when some cores has been released.\n This function should track number of used cores in Resources statistics.\n\n Args:\n cores (int): number of released cores\n \"\"\"\n\n def nodeCoresReleased(self, cores):\n self.__usedCores -= cores\n\n \"\"\"\n Relase allocated resources.\n\n Args:\n alloc (Allocation): allocation to release\n\n Raises:\n InvalidResourceSpec: when number of cores to release on a node is greater \n than number of used cores.\n \"\"\"\n\n def releaseAllocation(self, alloc):\n for node in alloc.nodeAllocations:\n node.node.release(node.cores)\n\n def __str__(self):\n header = '%d (%d used) cores on %d nodes\\n' % (self.__totalCores, self.__usedCores, \\\n len(self.__nodes))\n return header + '\\n'.join([str(node) for node in self.__nodes])\n\n #\t\tif self.__nodes:\n #\t\t\tfor node in self.__nodes:\n #\t\t\t\tresult.join(\"\\n%s\" % node)\n #\t\treturn result\n\n def nNodes(self):\n return len(self.__nodes)\n\n nodes = property(__getNodes, None, None, \"list of a nodes\")\n totalNodes = property(nNodes, None, None, \"total number of nodes\")\n totalCores = property(__getTotalCores, None, None, \"total number of cores\")\n usedCores = property(__getUsedCores, None, None, \"used number of cores\")\n freeCores = property(__getFreeCores, None, None, \"free number of cores\")\n", "step-ids": [ 21, 23, 26, 28, 29 ] }
[ 21, 23, 26, 28, 29 ]
#!/usr/bin/python3 max_integer = __import__('9-max_integer').max_integer my_list = [1, 90, 2, 13, 34, 5, -13, 3] my_list1 = [] my_list2 = [1, 90, 2, 13, 34, 100, -13, 3] max_value = max_integer(my_list) max_value1 = max_integer(my_list1) max_value2 = max_integer(my_list2) max_value3 = max_integer() print("Max: {}".format(max_value)) print("Max: {}".format(max_value1)) print("Max: {}".format(max_value2)) print("Max: {}".format(max_value3))
normal
{ "blob_id": "f5b74ca95cb368d70139b5d36e3c8d553b8c5393", "index": 1393, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint('Max: {}'.format(max_value))\nprint('Max: {}'.format(max_value1))\nprint('Max: {}'.format(max_value2))\nprint('Max: {}'.format(max_value3))\n", "step-3": "max_integer = __import__('9-max_integer').max_integer\nmy_list = [1, 90, 2, 13, 34, 5, -13, 3]\nmy_list1 = []\nmy_list2 = [1, 90, 2, 13, 34, 100, -13, 3]\nmax_value = max_integer(my_list)\nmax_value1 = max_integer(my_list1)\nmax_value2 = max_integer(my_list2)\nmax_value3 = max_integer()\nprint('Max: {}'.format(max_value))\nprint('Max: {}'.format(max_value1))\nprint('Max: {}'.format(max_value2))\nprint('Max: {}'.format(max_value3))\n", "step-4": "#!/usr/bin/python3\nmax_integer = __import__('9-max_integer').max_integer\n\nmy_list = [1, 90, 2, 13, 34, 5, -13, 3]\nmy_list1 = []\nmy_list2 = [1, 90, 2, 13, 34, 100, -13, 3]\nmax_value = max_integer(my_list)\nmax_value1 = max_integer(my_list1)\nmax_value2 = max_integer(my_list2)\nmax_value3 = max_integer()\nprint(\"Max: {}\".format(max_value))\nprint(\"Max: {}\".format(max_value1))\nprint(\"Max: {}\".format(max_value2))\nprint(\"Max: {}\".format(max_value3))\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
import wx import os # os.environ["HTTPS_PROXY"] = "http://user:[email protected]:3128" import wikipedia import wolframalpha import pyttsx3 import webbrowser import winshell import json import requests import ctypes import random from urllib.request import urlopen import speech_recognition as sr import ssl import urllib.request import urllib.parse import re from regression import Regression # Remove SSL error requests.packages.urllib3.disable_warnings() try: _create_unverified_https_context = ssl._create_unverified_context except AttributeError: # Legacy Python that doesn't verify HTTPS certificates by default pass else: # Handle target environment that doesn't support HTTPS verification ssl._create_default_https_context = _create_unverified_https_context headers = {'''user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36'''} #speak = wincl.Dispatch("SAPI.SpVoice") speak = pyttsx3.init() voices = speak.getProperty('voices') voice = voices[1] speak.setProperty('voice', voice.id) # Requirements videos = ['C:\\Users\\nEW u\\Videos\\Um4WR.mkv', 'C:\\Users\\nEW u\\Videos\\Jaatishwar.mkv'] app_id = 'GY6T92-YG5RXA85AV' # GUI creation class MyFrame(wx.Frame): def __init__(self): wx.Frame.__init__(self, None, pos=wx.DefaultPosition, size=wx.Size(450, 100), style=wx.MINIMIZE_BOX | wx.SYSTEM_MENU | wx.CAPTION | wx.CLOSE_BOX | wx.CLIP_CHILDREN, title="Assistant") panel = wx.Panel(self) #ico = wx.Icon('programming.jpg', type=wx.ICON_ASTERISK, desiredWidth=-1, desiredHeight=-1) #self.SetIcon(ico) my_sizer = wx.BoxSizer(wx.VERTICAL) lbl = wx.StaticText(panel, label="Hello Sir. How can I help you?") my_sizer.Add(lbl, 0, wx.ALL, 5) self.txt = wx.TextCtrl(panel, style=wx.TE_PROCESS_ENTER, size=(400, 30)) self.txt.SetFocus() self.txt.Bind(wx.EVT_TEXT_ENTER, self.OnEnter) my_sizer.Add(self.txt, 0, wx.ALL, 5) panel.SetSizer(my_sizer) self.Show() speak.say('''Welcome back Sir, Your assistant at your service.''') speak.runAndWait() def OnEnter(self, event): put = self.txt.GetValue() put = put.lower() link = put.split() r = sr.Recognizer() if put == '': with sr.Microphone() as src: r.adjust_for_ambient_noise(src) speak.say("Yes? How can I help You?") speak.runAndWait() audio = r.listen(src) try: put = r.recognize_google(audio) put = put.lower() link = put.split() self.txt.SetValue(put) except sr.UnknownValueError: print("Google Speech Recognition could not understand audio") except sr.RequestError as e: print("Could not request results from Google STT; {0}".format(e)) except: print("Unknown exception occurred!") # Open a webpage if put.startswith('open '): try: speak.say("opening "+link[1]) speak.runAndWait() webbrowser.open('http://www.'+link[1]+'.com') except: print('Sorry, No Internet Connection!') # Play Song on Youtube elif put.startswith('play '): try: link = '+'.join(link[1:]) s = link.replace('+', ' ') query_string = urllib.parse.urlencode({"search_query" : link}) html_content = urllib.request.urlopen("http://www.youtube.com/results?" + query_string) search_results = re.findall(r'href=\"\/watch\?v=(.{11})', html_content.read().decode()) print("http://www.youtube.com/watch?v=" + search_results[0]) speak.say("playing "+s) speak.runAndWait() webbrowser.open("http://www.youtube.com/watch?v=" + search_results[0]) except: print('Sorry, No internet connection!') # Google Search elif put.startswith('search '): try: link = '+'.join(link[1:]) say = link.replace('+', ' ') speak.say("searching on google for "+say) speak.runAndWait() webbrowser.open('https://www.google.co.in/search?q='+link) except: print('Sorry, No internet connection!') # Empty Recycle bin elif put.startswith('empty '): try: winshell.recycle_bin().empty(confirm=False, show_progress=False, sound=True) speak.say("Recycle Bin Empty") speak.runAndWait() except: speak.say("Unknown Error") speak.runAndWait() # News elif put.startswith('science '): try: jsonObj = urlopen('''https://newsapi.org/v1/articles?source=new-scientist&sortBy=top&apiKey=your_API_here''') data = json.load(jsonObj) i = 1 speak.say('''Here are some top science news from new scientist''') speak.runAndWait() print(''' ================NEW SCIENTIST============= '''+'\n') for item in data['articles']: print(str(i)+'. '+item['title']+'\n') print(item['description']+'\n') i += 1 except: print('Sorry, No internet connection') elif put.startswith('headlines '): try: jsonObj = urlopen('''https://newsapi.org/v1/articles?source=the-times-of-india&sortBy=top&apiKey=your_API_here''') data = json.load(jsonObj) i = 1 speak.say('Here are some top news from the times of india') speak.runAndWait() print(''' ===============TIMES OF INDIA============''' +'\n') for item in data['articles']: print(str(i)+'. '+item['title']+'\n') print(item['description']+'\n') i += 1 except Exception as e: print(str(e)) # Lock the device elif put.startswith('lock '): try: speak.say("locking the device") speak.runAndWait() ctypes.windll.user32.LockWorkStation() except Exception as e: print(str(e)) # Play videos in boredom elif put.endswith('bored'): try: speak.say('''Sir, I\'m playing a video. Hope you like it''') speak.runAndWait() video = random.choice(videos) os.startfile(video) except Exception as e: print(str(e)) # Say Whats up elif put.startswith('whats up'): try: speak.say('''Nothing much, just trying to become the perfect assistant!''') speak.runAndWait() except Exception as e: print(str(e)) #Show stocks elif put.startswith('show stocks'): try: Regression.execute() except Exception as e: print(str(e)) # Other Cases else: try: # wolframalpha client = wolframalpha.Client(app_id) res = client.query(put) ans = next(res.results).text print(ans) speak.say(ans) speak.runAndWait() except: # wikipedia/google put = put.split() put = ' '.join(put[:]) #print(put) print(wikipedia.summary(put)) speak.say('Searched google for '+put) speak.runAndWait() webbrowser.open('https://www.google.co.in/search?q='+put) # Trigger GUI if __name__ == "__main__": app = wx.App(True) frame = MyFrame() app.MainLoop()
normal
{ "blob_id": "8f1e6ea93b2dd7add256cb31d2c621aa69721609", "index": 8834, "step-1": "<mask token>\n\n\nclass MyFrame(wx.Frame):\n\n def __init__(self):\n wx.Frame.__init__(self, None, pos=wx.DefaultPosition, size=wx.Size(\n 450, 100), style=wx.MINIMIZE_BOX | wx.SYSTEM_MENU | wx.CAPTION |\n wx.CLOSE_BOX | wx.CLIP_CHILDREN, title='Assistant')\n panel = wx.Panel(self)\n my_sizer = wx.BoxSizer(wx.VERTICAL)\n lbl = wx.StaticText(panel, label='Hello Sir. How can I help you?')\n my_sizer.Add(lbl, 0, wx.ALL, 5)\n self.txt = wx.TextCtrl(panel, style=wx.TE_PROCESS_ENTER, size=(400, 30)\n )\n self.txt.SetFocus()\n self.txt.Bind(wx.EVT_TEXT_ENTER, self.OnEnter)\n my_sizer.Add(self.txt, 0, wx.ALL, 5)\n panel.SetSizer(my_sizer)\n self.Show()\n speak.say('Welcome back Sir, Your assistant at your service.')\n speak.runAndWait()\n\n def OnEnter(self, event):\n put = self.txt.GetValue()\n put = put.lower()\n link = put.split()\n r = sr.Recognizer()\n if put == '':\n with sr.Microphone() as src:\n r.adjust_for_ambient_noise(src)\n speak.say('Yes? How can I help You?')\n speak.runAndWait()\n audio = r.listen(src)\n try:\n put = r.recognize_google(audio)\n put = put.lower()\n link = put.split()\n self.txt.SetValue(put)\n except sr.UnknownValueError:\n print('Google Speech Recognition could not understand audio')\n except sr.RequestError as e:\n print('Could not request results from Google STT; {0}'.\n format(e))\n except:\n print('Unknown exception occurred!')\n if put.startswith('open '):\n try:\n speak.say('opening ' + link[1])\n speak.runAndWait()\n webbrowser.open('http://www.' + link[1] + '.com')\n except:\n print('Sorry, No Internet Connection!')\n elif put.startswith('play '):\n try:\n link = '+'.join(link[1:])\n s = link.replace('+', ' ')\n query_string = urllib.parse.urlencode({'search_query': link})\n html_content = urllib.request.urlopen(\n 'http://www.youtube.com/results?' + query_string)\n search_results = re.findall('href=\\\\\"\\\\/watch\\\\?v=(.{11})',\n html_content.read().decode())\n print('http://www.youtube.com/watch?v=' + search_results[0])\n speak.say('playing ' + s)\n speak.runAndWait()\n webbrowser.open('http://www.youtube.com/watch?v=' +\n search_results[0])\n except:\n print('Sorry, No internet connection!')\n elif put.startswith('search '):\n try:\n link = '+'.join(link[1:])\n say = link.replace('+', ' ')\n speak.say('searching on google for ' + say)\n speak.runAndWait()\n webbrowser.open('https://www.google.co.in/search?q=' + link)\n except:\n print('Sorry, No internet connection!')\n elif put.startswith('empty '):\n try:\n winshell.recycle_bin().empty(confirm=False, show_progress=\n False, sound=True)\n speak.say('Recycle Bin Empty')\n speak.runAndWait()\n except:\n speak.say('Unknown Error')\n speak.runAndWait()\n elif put.startswith('science '):\n try:\n jsonObj = urlopen(\n 'https://newsapi.org/v1/articles?source=new-scientist&sortBy=top&apiKey=your_API_here'\n )\n data = json.load(jsonObj)\n i = 1\n speak.say('Here are some top science news from new scientist')\n speak.runAndWait()\n print(\n \"\"\" ================NEW SCIENTIST=============\n \"\"\"\n + '\\n')\n for item in data['articles']:\n print(str(i) + '. ' + item['title'] + '\\n')\n print(item['description'] + '\\n')\n i += 1\n except:\n print('Sorry, No internet connection')\n elif put.startswith('headlines '):\n try:\n jsonObj = urlopen(\n 'https://newsapi.org/v1/articles?source=the-times-of-india&sortBy=top&apiKey=your_API_here'\n )\n data = json.load(jsonObj)\n i = 1\n speak.say('Here are some top news from the times of india')\n speak.runAndWait()\n print(\n ' ===============TIMES OF INDIA============' +\n '\\n')\n for item in data['articles']:\n print(str(i) + '. ' + item['title'] + '\\n')\n print(item['description'] + '\\n')\n i += 1\n except Exception as e:\n print(str(e))\n elif put.startswith('lock '):\n try:\n speak.say('locking the device')\n speak.runAndWait()\n ctypes.windll.user32.LockWorkStation()\n except Exception as e:\n print(str(e))\n elif put.endswith('bored'):\n try:\n speak.say(\n \"\"\"Sir, I'm playing a video.\n Hope you like it\"\"\"\n )\n speak.runAndWait()\n video = random.choice(videos)\n os.startfile(video)\n except Exception as e:\n print(str(e))\n elif put.startswith('whats up'):\n try:\n speak.say(\n 'Nothing much, just trying to become the perfect assistant!'\n )\n speak.runAndWait()\n except Exception as e:\n print(str(e))\n elif put.startswith('show stocks'):\n try:\n Regression.execute()\n except Exception as e:\n print(str(e))\n else:\n try:\n client = wolframalpha.Client(app_id)\n res = client.query(put)\n ans = next(res.results).text\n print(ans)\n speak.say(ans)\n speak.runAndWait()\n except:\n put = put.split()\n put = ' '.join(put[:])\n print(wikipedia.summary(put))\n speak.say('Searched google for ' + put)\n speak.runAndWait()\n webbrowser.open('https://www.google.co.in/search?q=' + put)\n\n\n<mask token>\n", "step-2": "<mask token>\nrequests.packages.urllib3.disable_warnings()\ntry:\n _create_unverified_https_context = ssl._create_unverified_context\nexcept AttributeError:\n pass\nelse:\n ssl._create_default_https_context = _create_unverified_https_context\n<mask token>\nspeak.setProperty('voice', voice.id)\n<mask token>\n\n\nclass MyFrame(wx.Frame):\n\n def __init__(self):\n wx.Frame.__init__(self, None, pos=wx.DefaultPosition, size=wx.Size(\n 450, 100), style=wx.MINIMIZE_BOX | wx.SYSTEM_MENU | wx.CAPTION |\n wx.CLOSE_BOX | wx.CLIP_CHILDREN, title='Assistant')\n panel = wx.Panel(self)\n my_sizer = wx.BoxSizer(wx.VERTICAL)\n lbl = wx.StaticText(panel, label='Hello Sir. How can I help you?')\n my_sizer.Add(lbl, 0, wx.ALL, 5)\n self.txt = wx.TextCtrl(panel, style=wx.TE_PROCESS_ENTER, size=(400, 30)\n )\n self.txt.SetFocus()\n self.txt.Bind(wx.EVT_TEXT_ENTER, self.OnEnter)\n my_sizer.Add(self.txt, 0, wx.ALL, 5)\n panel.SetSizer(my_sizer)\n self.Show()\n speak.say('Welcome back Sir, Your assistant at your service.')\n speak.runAndWait()\n\n def OnEnter(self, event):\n put = self.txt.GetValue()\n put = put.lower()\n link = put.split()\n r = sr.Recognizer()\n if put == '':\n with sr.Microphone() as src:\n r.adjust_for_ambient_noise(src)\n speak.say('Yes? How can I help You?')\n speak.runAndWait()\n audio = r.listen(src)\n try:\n put = r.recognize_google(audio)\n put = put.lower()\n link = put.split()\n self.txt.SetValue(put)\n except sr.UnknownValueError:\n print('Google Speech Recognition could not understand audio')\n except sr.RequestError as e:\n print('Could not request results from Google STT; {0}'.\n format(e))\n except:\n print('Unknown exception occurred!')\n if put.startswith('open '):\n try:\n speak.say('opening ' + link[1])\n speak.runAndWait()\n webbrowser.open('http://www.' + link[1] + '.com')\n except:\n print('Sorry, No Internet Connection!')\n elif put.startswith('play '):\n try:\n link = '+'.join(link[1:])\n s = link.replace('+', ' ')\n query_string = urllib.parse.urlencode({'search_query': link})\n html_content = urllib.request.urlopen(\n 'http://www.youtube.com/results?' + query_string)\n search_results = re.findall('href=\\\\\"\\\\/watch\\\\?v=(.{11})',\n html_content.read().decode())\n print('http://www.youtube.com/watch?v=' + search_results[0])\n speak.say('playing ' + s)\n speak.runAndWait()\n webbrowser.open('http://www.youtube.com/watch?v=' +\n search_results[0])\n except:\n print('Sorry, No internet connection!')\n elif put.startswith('search '):\n try:\n link = '+'.join(link[1:])\n say = link.replace('+', ' ')\n speak.say('searching on google for ' + say)\n speak.runAndWait()\n webbrowser.open('https://www.google.co.in/search?q=' + link)\n except:\n print('Sorry, No internet connection!')\n elif put.startswith('empty '):\n try:\n winshell.recycle_bin().empty(confirm=False, show_progress=\n False, sound=True)\n speak.say('Recycle Bin Empty')\n speak.runAndWait()\n except:\n speak.say('Unknown Error')\n speak.runAndWait()\n elif put.startswith('science '):\n try:\n jsonObj = urlopen(\n 'https://newsapi.org/v1/articles?source=new-scientist&sortBy=top&apiKey=your_API_here'\n )\n data = json.load(jsonObj)\n i = 1\n speak.say('Here are some top science news from new scientist')\n speak.runAndWait()\n print(\n \"\"\" ================NEW SCIENTIST=============\n \"\"\"\n + '\\n')\n for item in data['articles']:\n print(str(i) + '. ' + item['title'] + '\\n')\n print(item['description'] + '\\n')\n i += 1\n except:\n print('Sorry, No internet connection')\n elif put.startswith('headlines '):\n try:\n jsonObj = urlopen(\n 'https://newsapi.org/v1/articles?source=the-times-of-india&sortBy=top&apiKey=your_API_here'\n )\n data = json.load(jsonObj)\n i = 1\n speak.say('Here are some top news from the times of india')\n speak.runAndWait()\n print(\n ' ===============TIMES OF INDIA============' +\n '\\n')\n for item in data['articles']:\n print(str(i) + '. ' + item['title'] + '\\n')\n print(item['description'] + '\\n')\n i += 1\n except Exception as e:\n print(str(e))\n elif put.startswith('lock '):\n try:\n speak.say('locking the device')\n speak.runAndWait()\n ctypes.windll.user32.LockWorkStation()\n except Exception as e:\n print(str(e))\n elif put.endswith('bored'):\n try:\n speak.say(\n \"\"\"Sir, I'm playing a video.\n Hope you like it\"\"\"\n )\n speak.runAndWait()\n video = random.choice(videos)\n os.startfile(video)\n except Exception as e:\n print(str(e))\n elif put.startswith('whats up'):\n try:\n speak.say(\n 'Nothing much, just trying to become the perfect assistant!'\n )\n speak.runAndWait()\n except Exception as e:\n print(str(e))\n elif put.startswith('show stocks'):\n try:\n Regression.execute()\n except Exception as e:\n print(str(e))\n else:\n try:\n client = wolframalpha.Client(app_id)\n res = client.query(put)\n ans = next(res.results).text\n print(ans)\n speak.say(ans)\n speak.runAndWait()\n except:\n put = put.split()\n put = ' '.join(put[:])\n print(wikipedia.summary(put))\n speak.say('Searched google for ' + put)\n speak.runAndWait()\n webbrowser.open('https://www.google.co.in/search?q=' + put)\n\n\nif __name__ == '__main__':\n app = wx.App(True)\n frame = MyFrame()\n app.MainLoop()\n", "step-3": "<mask token>\nrequests.packages.urllib3.disable_warnings()\ntry:\n _create_unverified_https_context = ssl._create_unverified_context\nexcept AttributeError:\n pass\nelse:\n ssl._create_default_https_context = _create_unverified_https_context\nheaders = {\n \"\"\"user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6)\n AppleWebKit/537.36 (KHTML, like Gecko)\n Chrome/53.0.2785.143 Safari/537.36\"\"\"\n }\nspeak = pyttsx3.init()\nvoices = speak.getProperty('voices')\nvoice = voices[1]\nspeak.setProperty('voice', voice.id)\nvideos = ['C:\\\\Users\\\\nEW u\\\\Videos\\\\Um4WR.mkv',\n 'C:\\\\Users\\\\nEW u\\\\Videos\\\\Jaatishwar.mkv']\napp_id = 'GY6T92-YG5RXA85AV'\n\n\nclass MyFrame(wx.Frame):\n\n def __init__(self):\n wx.Frame.__init__(self, None, pos=wx.DefaultPosition, size=wx.Size(\n 450, 100), style=wx.MINIMIZE_BOX | wx.SYSTEM_MENU | wx.CAPTION |\n wx.CLOSE_BOX | wx.CLIP_CHILDREN, title='Assistant')\n panel = wx.Panel(self)\n my_sizer = wx.BoxSizer(wx.VERTICAL)\n lbl = wx.StaticText(panel, label='Hello Sir. How can I help you?')\n my_sizer.Add(lbl, 0, wx.ALL, 5)\n self.txt = wx.TextCtrl(panel, style=wx.TE_PROCESS_ENTER, size=(400, 30)\n )\n self.txt.SetFocus()\n self.txt.Bind(wx.EVT_TEXT_ENTER, self.OnEnter)\n my_sizer.Add(self.txt, 0, wx.ALL, 5)\n panel.SetSizer(my_sizer)\n self.Show()\n speak.say('Welcome back Sir, Your assistant at your service.')\n speak.runAndWait()\n\n def OnEnter(self, event):\n put = self.txt.GetValue()\n put = put.lower()\n link = put.split()\n r = sr.Recognizer()\n if put == '':\n with sr.Microphone() as src:\n r.adjust_for_ambient_noise(src)\n speak.say('Yes? How can I help You?')\n speak.runAndWait()\n audio = r.listen(src)\n try:\n put = r.recognize_google(audio)\n put = put.lower()\n link = put.split()\n self.txt.SetValue(put)\n except sr.UnknownValueError:\n print('Google Speech Recognition could not understand audio')\n except sr.RequestError as e:\n print('Could not request results from Google STT; {0}'.\n format(e))\n except:\n print('Unknown exception occurred!')\n if put.startswith('open '):\n try:\n speak.say('opening ' + link[1])\n speak.runAndWait()\n webbrowser.open('http://www.' + link[1] + '.com')\n except:\n print('Sorry, No Internet Connection!')\n elif put.startswith('play '):\n try:\n link = '+'.join(link[1:])\n s = link.replace('+', ' ')\n query_string = urllib.parse.urlencode({'search_query': link})\n html_content = urllib.request.urlopen(\n 'http://www.youtube.com/results?' + query_string)\n search_results = re.findall('href=\\\\\"\\\\/watch\\\\?v=(.{11})',\n html_content.read().decode())\n print('http://www.youtube.com/watch?v=' + search_results[0])\n speak.say('playing ' + s)\n speak.runAndWait()\n webbrowser.open('http://www.youtube.com/watch?v=' +\n search_results[0])\n except:\n print('Sorry, No internet connection!')\n elif put.startswith('search '):\n try:\n link = '+'.join(link[1:])\n say = link.replace('+', ' ')\n speak.say('searching on google for ' + say)\n speak.runAndWait()\n webbrowser.open('https://www.google.co.in/search?q=' + link)\n except:\n print('Sorry, No internet connection!')\n elif put.startswith('empty '):\n try:\n winshell.recycle_bin().empty(confirm=False, show_progress=\n False, sound=True)\n speak.say('Recycle Bin Empty')\n speak.runAndWait()\n except:\n speak.say('Unknown Error')\n speak.runAndWait()\n elif put.startswith('science '):\n try:\n jsonObj = urlopen(\n 'https://newsapi.org/v1/articles?source=new-scientist&sortBy=top&apiKey=your_API_here'\n )\n data = json.load(jsonObj)\n i = 1\n speak.say('Here are some top science news from new scientist')\n speak.runAndWait()\n print(\n \"\"\" ================NEW SCIENTIST=============\n \"\"\"\n + '\\n')\n for item in data['articles']:\n print(str(i) + '. ' + item['title'] + '\\n')\n print(item['description'] + '\\n')\n i += 1\n except:\n print('Sorry, No internet connection')\n elif put.startswith('headlines '):\n try:\n jsonObj = urlopen(\n 'https://newsapi.org/v1/articles?source=the-times-of-india&sortBy=top&apiKey=your_API_here'\n )\n data = json.load(jsonObj)\n i = 1\n speak.say('Here are some top news from the times of india')\n speak.runAndWait()\n print(\n ' ===============TIMES OF INDIA============' +\n '\\n')\n for item in data['articles']:\n print(str(i) + '. ' + item['title'] + '\\n')\n print(item['description'] + '\\n')\n i += 1\n except Exception as e:\n print(str(e))\n elif put.startswith('lock '):\n try:\n speak.say('locking the device')\n speak.runAndWait()\n ctypes.windll.user32.LockWorkStation()\n except Exception as e:\n print(str(e))\n elif put.endswith('bored'):\n try:\n speak.say(\n \"\"\"Sir, I'm playing a video.\n Hope you like it\"\"\"\n )\n speak.runAndWait()\n video = random.choice(videos)\n os.startfile(video)\n except Exception as e:\n print(str(e))\n elif put.startswith('whats up'):\n try:\n speak.say(\n 'Nothing much, just trying to become the perfect assistant!'\n )\n speak.runAndWait()\n except Exception as e:\n print(str(e))\n elif put.startswith('show stocks'):\n try:\n Regression.execute()\n except Exception as e:\n print(str(e))\n else:\n try:\n client = wolframalpha.Client(app_id)\n res = client.query(put)\n ans = next(res.results).text\n print(ans)\n speak.say(ans)\n speak.runAndWait()\n except:\n put = put.split()\n put = ' '.join(put[:])\n print(wikipedia.summary(put))\n speak.say('Searched google for ' + put)\n speak.runAndWait()\n webbrowser.open('https://www.google.co.in/search?q=' + put)\n\n\nif __name__ == '__main__':\n app = wx.App(True)\n frame = MyFrame()\n app.MainLoop()\n", "step-4": "import wx\nimport os\nimport wikipedia\nimport wolframalpha\nimport pyttsx3\nimport webbrowser\nimport winshell\nimport json\nimport requests\nimport ctypes\nimport random\nfrom urllib.request import urlopen\nimport speech_recognition as sr\nimport ssl\nimport urllib.request\nimport urllib.parse\nimport re\nfrom regression import Regression\nrequests.packages.urllib3.disable_warnings()\ntry:\n _create_unverified_https_context = ssl._create_unverified_context\nexcept AttributeError:\n pass\nelse:\n ssl._create_default_https_context = _create_unverified_https_context\nheaders = {\n \"\"\"user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6)\n AppleWebKit/537.36 (KHTML, like Gecko)\n Chrome/53.0.2785.143 Safari/537.36\"\"\"\n }\nspeak = pyttsx3.init()\nvoices = speak.getProperty('voices')\nvoice = voices[1]\nspeak.setProperty('voice', voice.id)\nvideos = ['C:\\\\Users\\\\nEW u\\\\Videos\\\\Um4WR.mkv',\n 'C:\\\\Users\\\\nEW u\\\\Videos\\\\Jaatishwar.mkv']\napp_id = 'GY6T92-YG5RXA85AV'\n\n\nclass MyFrame(wx.Frame):\n\n def __init__(self):\n wx.Frame.__init__(self, None, pos=wx.DefaultPosition, size=wx.Size(\n 450, 100), style=wx.MINIMIZE_BOX | wx.SYSTEM_MENU | wx.CAPTION |\n wx.CLOSE_BOX | wx.CLIP_CHILDREN, title='Assistant')\n panel = wx.Panel(self)\n my_sizer = wx.BoxSizer(wx.VERTICAL)\n lbl = wx.StaticText(panel, label='Hello Sir. How can I help you?')\n my_sizer.Add(lbl, 0, wx.ALL, 5)\n self.txt = wx.TextCtrl(panel, style=wx.TE_PROCESS_ENTER, size=(400, 30)\n )\n self.txt.SetFocus()\n self.txt.Bind(wx.EVT_TEXT_ENTER, self.OnEnter)\n my_sizer.Add(self.txt, 0, wx.ALL, 5)\n panel.SetSizer(my_sizer)\n self.Show()\n speak.say('Welcome back Sir, Your assistant at your service.')\n speak.runAndWait()\n\n def OnEnter(self, event):\n put = self.txt.GetValue()\n put = put.lower()\n link = put.split()\n r = sr.Recognizer()\n if put == '':\n with sr.Microphone() as src:\n r.adjust_for_ambient_noise(src)\n speak.say('Yes? How can I help You?')\n speak.runAndWait()\n audio = r.listen(src)\n try:\n put = r.recognize_google(audio)\n put = put.lower()\n link = put.split()\n self.txt.SetValue(put)\n except sr.UnknownValueError:\n print('Google Speech Recognition could not understand audio')\n except sr.RequestError as e:\n print('Could not request results from Google STT; {0}'.\n format(e))\n except:\n print('Unknown exception occurred!')\n if put.startswith('open '):\n try:\n speak.say('opening ' + link[1])\n speak.runAndWait()\n webbrowser.open('http://www.' + link[1] + '.com')\n except:\n print('Sorry, No Internet Connection!')\n elif put.startswith('play '):\n try:\n link = '+'.join(link[1:])\n s = link.replace('+', ' ')\n query_string = urllib.parse.urlencode({'search_query': link})\n html_content = urllib.request.urlopen(\n 'http://www.youtube.com/results?' + query_string)\n search_results = re.findall('href=\\\\\"\\\\/watch\\\\?v=(.{11})',\n html_content.read().decode())\n print('http://www.youtube.com/watch?v=' + search_results[0])\n speak.say('playing ' + s)\n speak.runAndWait()\n webbrowser.open('http://www.youtube.com/watch?v=' +\n search_results[0])\n except:\n print('Sorry, No internet connection!')\n elif put.startswith('search '):\n try:\n link = '+'.join(link[1:])\n say = link.replace('+', ' ')\n speak.say('searching on google for ' + say)\n speak.runAndWait()\n webbrowser.open('https://www.google.co.in/search?q=' + link)\n except:\n print('Sorry, No internet connection!')\n elif put.startswith('empty '):\n try:\n winshell.recycle_bin().empty(confirm=False, show_progress=\n False, sound=True)\n speak.say('Recycle Bin Empty')\n speak.runAndWait()\n except:\n speak.say('Unknown Error')\n speak.runAndWait()\n elif put.startswith('science '):\n try:\n jsonObj = urlopen(\n 'https://newsapi.org/v1/articles?source=new-scientist&sortBy=top&apiKey=your_API_here'\n )\n data = json.load(jsonObj)\n i = 1\n speak.say('Here are some top science news from new scientist')\n speak.runAndWait()\n print(\n \"\"\" ================NEW SCIENTIST=============\n \"\"\"\n + '\\n')\n for item in data['articles']:\n print(str(i) + '. ' + item['title'] + '\\n')\n print(item['description'] + '\\n')\n i += 1\n except:\n print('Sorry, No internet connection')\n elif put.startswith('headlines '):\n try:\n jsonObj = urlopen(\n 'https://newsapi.org/v1/articles?source=the-times-of-india&sortBy=top&apiKey=your_API_here'\n )\n data = json.load(jsonObj)\n i = 1\n speak.say('Here are some top news from the times of india')\n speak.runAndWait()\n print(\n ' ===============TIMES OF INDIA============' +\n '\\n')\n for item in data['articles']:\n print(str(i) + '. ' + item['title'] + '\\n')\n print(item['description'] + '\\n')\n i += 1\n except Exception as e:\n print(str(e))\n elif put.startswith('lock '):\n try:\n speak.say('locking the device')\n speak.runAndWait()\n ctypes.windll.user32.LockWorkStation()\n except Exception as e:\n print(str(e))\n elif put.endswith('bored'):\n try:\n speak.say(\n \"\"\"Sir, I'm playing a video.\n Hope you like it\"\"\"\n )\n speak.runAndWait()\n video = random.choice(videos)\n os.startfile(video)\n except Exception as e:\n print(str(e))\n elif put.startswith('whats up'):\n try:\n speak.say(\n 'Nothing much, just trying to become the perfect assistant!'\n )\n speak.runAndWait()\n except Exception as e:\n print(str(e))\n elif put.startswith('show stocks'):\n try:\n Regression.execute()\n except Exception as e:\n print(str(e))\n else:\n try:\n client = wolframalpha.Client(app_id)\n res = client.query(put)\n ans = next(res.results).text\n print(ans)\n speak.say(ans)\n speak.runAndWait()\n except:\n put = put.split()\n put = ' '.join(put[:])\n print(wikipedia.summary(put))\n speak.say('Searched google for ' + put)\n speak.runAndWait()\n webbrowser.open('https://www.google.co.in/search?q=' + put)\n\n\nif __name__ == '__main__':\n app = wx.App(True)\n frame = MyFrame()\n app.MainLoop()\n", "step-5": "import wx\nimport os\n# os.environ[\"HTTPS_PROXY\"] = \"http://user:[email protected]:3128\"\nimport wikipedia\nimport wolframalpha\nimport pyttsx3\nimport webbrowser\nimport winshell\nimport json\nimport requests\nimport ctypes\nimport random\nfrom urllib.request import urlopen\nimport speech_recognition as sr\nimport ssl\nimport urllib.request\nimport urllib.parse\nimport re\nfrom regression import Regression\n# Remove SSL error\nrequests.packages.urllib3.disable_warnings()\n\ntry:\n _create_unverified_https_context = ssl._create_unverified_context\nexcept AttributeError:\n # Legacy Python that doesn't verify HTTPS certificates by default\n pass\nelse:\n # Handle target environment that doesn't support HTTPS verification\n ssl._create_default_https_context = _create_unverified_https_context\n\n\nheaders = {'''user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6)\n AppleWebKit/537.36 (KHTML, like Gecko)\n Chrome/53.0.2785.143 Safari/537.36'''}\n\n#speak = wincl.Dispatch(\"SAPI.SpVoice\")\nspeak = pyttsx3.init()\nvoices = speak.getProperty('voices')\nvoice = voices[1]\nspeak.setProperty('voice', voice.id)\n\n# Requirements\nvideos = ['C:\\\\Users\\\\nEW u\\\\Videos\\\\Um4WR.mkv', 'C:\\\\Users\\\\nEW u\\\\Videos\\\\Jaatishwar.mkv']\napp_id = 'GY6T92-YG5RXA85AV'\n\n\n# GUI creation\nclass MyFrame(wx.Frame):\n def __init__(self):\n wx.Frame.__init__(self, None,\n pos=wx.DefaultPosition, size=wx.Size(450, 100),\n style=wx.MINIMIZE_BOX | wx.SYSTEM_MENU | wx.CAPTION |\n wx.CLOSE_BOX | wx.CLIP_CHILDREN,\n title=\"Assistant\")\n panel = wx.Panel(self)\n\n #ico = wx.Icon('programming.jpg', type=wx.ICON_ASTERISK, desiredWidth=-1, desiredHeight=-1)\n #self.SetIcon(ico)\n \n my_sizer = wx.BoxSizer(wx.VERTICAL)\n lbl = wx.StaticText(panel,\n label=\"Hello Sir. How can I help you?\")\n my_sizer.Add(lbl, 0, wx.ALL, 5)\n self.txt = wx.TextCtrl(panel, style=wx.TE_PROCESS_ENTER,\n size=(400, 30))\n self.txt.SetFocus()\n self.txt.Bind(wx.EVT_TEXT_ENTER, self.OnEnter)\n my_sizer.Add(self.txt, 0, wx.ALL, 5)\n panel.SetSizer(my_sizer)\n self.Show()\n speak.say('''Welcome back Sir, Your assistant at your service.''')\n speak.runAndWait()\n\n\n def OnEnter(self, event):\n put = self.txt.GetValue()\n put = put.lower()\n link = put.split()\n r = sr.Recognizer()\n if put == '':\n with sr.Microphone() as src:\n r.adjust_for_ambient_noise(src) \n speak.say(\"Yes? How can I help You?\")\n speak.runAndWait()\n audio = r.listen(src)\n try:\n put = r.recognize_google(audio)\n put = put.lower()\n link = put.split()\n self.txt.SetValue(put)\n\n except sr.UnknownValueError:\n print(\"Google Speech Recognition could not understand audio\")\n except sr.RequestError as e:\n print(\"Could not request results from Google STT; {0}\".format(e))\n except:\n print(\"Unknown exception occurred!\")\n\n # Open a webpage\n if put.startswith('open '):\n try:\n speak.say(\"opening \"+link[1])\n speak.runAndWait()\n webbrowser.open('http://www.'+link[1]+'.com')\n except:\n print('Sorry, No Internet Connection!')\n # Play Song on Youtube\n elif put.startswith('play '):\n try:\n link = '+'.join(link[1:])\n s = link.replace('+', ' ')\n query_string = urllib.parse.urlencode({\"search_query\" : link})\n html_content = urllib.request.urlopen(\"http://www.youtube.com/results?\" + query_string)\n search_results = re.findall(r'href=\\\"\\/watch\\?v=(.{11})', html_content.read().decode())\n print(\"http://www.youtube.com/watch?v=\" + search_results[0])\n speak.say(\"playing \"+s)\n speak.runAndWait()\n webbrowser.open(\"http://www.youtube.com/watch?v=\" + search_results[0])\n except:\n print('Sorry, No internet connection!')\n # Google Search\n elif put.startswith('search '):\n try:\n link = '+'.join(link[1:])\n say = link.replace('+', ' ')\n speak.say(\"searching on google for \"+say)\n speak.runAndWait()\n webbrowser.open('https://www.google.co.in/search?q='+link)\n except:\n print('Sorry, No internet connection!')\n # Empty Recycle bin\n elif put.startswith('empty '):\n try:\n winshell.recycle_bin().empty(confirm=False,\n show_progress=False, sound=True)\n speak.say(\"Recycle Bin Empty\")\n speak.runAndWait()\n except:\n speak.say(\"Unknown Error\")\n speak.runAndWait()\n # News\n elif put.startswith('science '):\n try:\n jsonObj = urlopen('''https://newsapi.org/v1/articles?source=new-scientist&sortBy=top&apiKey=your_API_here''')\n data = json.load(jsonObj)\n i = 1\n speak.say('''Here are some top science news from new scientist''')\n speak.runAndWait()\n print(''' ================NEW SCIENTIST=============\n '''+'\\n')\n for item in data['articles']:\n print(str(i)+'. '+item['title']+'\\n')\n print(item['description']+'\\n')\n i += 1\n except:\n print('Sorry, No internet connection')\n elif put.startswith('headlines '):\n try:\n jsonObj = urlopen('''https://newsapi.org/v1/articles?source=the-times-of-india&sortBy=top&apiKey=your_API_here''')\n data = json.load(jsonObj)\n i = 1\n speak.say('Here are some top news from the times of india')\n speak.runAndWait()\n print(''' ===============TIMES OF INDIA============'''\n +'\\n')\n for item in data['articles']:\n print(str(i)+'. '+item['title']+'\\n')\n print(item['description']+'\\n')\n i += 1\n except Exception as e:\n print(str(e))\n # Lock the device\n elif put.startswith('lock '):\n try:\n speak.say(\"locking the device\")\n speak.runAndWait()\n ctypes.windll.user32.LockWorkStation()\n except Exception as e:\n print(str(e)) \n # Play videos in boredom\n elif put.endswith('bored'):\n try:\n speak.say('''Sir, I\\'m playing a video.\n Hope you like it''')\n speak.runAndWait()\n video = random.choice(videos)\n os.startfile(video)\n except Exception as e:\n print(str(e)) \n # Say Whats up \n elif put.startswith('whats up'):\n try:\n speak.say('''Nothing much, just trying to become the perfect assistant!''')\n speak.runAndWait()\n except Exception as e:\n print(str(e)) \n #Show stocks\n elif put.startswith('show stocks'):\n try:\n Regression.execute()\n except Exception as e:\n print(str(e))\n \n # Other Cases\n else:\n try:\n # wolframalpha\n client = wolframalpha.Client(app_id)\n res = client.query(put)\n ans = next(res.results).text\n print(ans)\n speak.say(ans)\n speak.runAndWait()\n\n except:\n # wikipedia/google\n put = put.split()\n put = ' '.join(put[:])\n #print(put)\n print(wikipedia.summary(put))\n speak.say('Searched google for '+put)\n speak.runAndWait()\n webbrowser.open('https://www.google.co.in/search?q='+put)\n\n\n# Trigger GUI\nif __name__ == \"__main__\":\n app = wx.App(True)\n frame = MyFrame()\n app.MainLoop()", "step-ids": [ 3, 4, 5, 6, 7 ] }
[ 3, 4, 5, 6, 7 ]
#!/usr/bin/env python3 import sys import hashlib # Usage if len(sys.argv) != 2: print("usage: part2.py puzzle_input") exit(1) # Get Secret puzzle_input = sys.argv[1] input_num = 0 # Calcuate for i in range(sys.maxsize): digest = hashlib.md5(puzzle_input.encode('utf-8')+str(i).encode('utf-8')).hexdigest() if (digest.startswith('000000')): # must start with 6 zeros input_num = i break; # Print Results print(f'puzzle_input: {puzzle_input} solved with {input_num}') print("\ndone.");
normal
{ "blob_id": "1219f7b7ac335f3a69e289d1ab2b6318a2aef23f", "index": 1900, "step-1": "<mask token>\n", "step-2": "<mask token>\nif len(sys.argv) != 2:\n print('usage: part2.py puzzle_input')\n exit(1)\n<mask token>\nfor i in range(sys.maxsize):\n digest = hashlib.md5(puzzle_input.encode('utf-8') + str(i).encode('utf-8')\n ).hexdigest()\n if digest.startswith('000000'):\n input_num = i\n break\nprint(f'puzzle_input: {puzzle_input} solved with {input_num}')\nprint('\\ndone.')\n", "step-3": "<mask token>\nif len(sys.argv) != 2:\n print('usage: part2.py puzzle_input')\n exit(1)\npuzzle_input = sys.argv[1]\ninput_num = 0\nfor i in range(sys.maxsize):\n digest = hashlib.md5(puzzle_input.encode('utf-8') + str(i).encode('utf-8')\n ).hexdigest()\n if digest.startswith('000000'):\n input_num = i\n break\nprint(f'puzzle_input: {puzzle_input} solved with {input_num}')\nprint('\\ndone.')\n", "step-4": "import sys\nimport hashlib\nif len(sys.argv) != 2:\n print('usage: part2.py puzzle_input')\n exit(1)\npuzzle_input = sys.argv[1]\ninput_num = 0\nfor i in range(sys.maxsize):\n digest = hashlib.md5(puzzle_input.encode('utf-8') + str(i).encode('utf-8')\n ).hexdigest()\n if digest.startswith('000000'):\n input_num = i\n break\nprint(f'puzzle_input: {puzzle_input} solved with {input_num}')\nprint('\\ndone.')\n", "step-5": "#!/usr/bin/env python3\n\nimport sys\nimport hashlib\n\n# Usage\nif len(sys.argv) != 2:\n\tprint(\"usage: part2.py puzzle_input\")\n\texit(1)\n\n# Get Secret\npuzzle_input = sys.argv[1]\ninput_num = 0\n\n# Calcuate \nfor i in range(sys.maxsize):\n\tdigest = hashlib.md5(puzzle_input.encode('utf-8')+str(i).encode('utf-8')).hexdigest()\n\tif (digest.startswith('000000')): # must start with 6 zeros\n\t\tinput_num = i\n\t\tbreak;\n\n# Print Results\t\t\nprint(f'puzzle_input: {puzzle_input} solved with {input_num}')\n\nprint(\"\\ndone.\");\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
class Node(object): def __init__(self,data): self.data = data self.left = None self.right = None self.parent = None class tree(object): def __init__(self): self.root = None def insert(self,root,value): if self.root == None: self.root = Node(value) else: if value < root.data: if root.left is None: root.left = Node(value) else: self.insert(root.left,value) elif value > root.data: if root.right is None: root.right = Node(value) else: self.insert(root.right,value) return root def delete(self,root,data,parent): if root is None: return root if root.data < data: parent = root root.right = self.delete(root.right,data,parent) elif root.data > data : parent = root root.left = self.delete(root.left,data,parent) else: if root is None or root.data != data: return False elif root.left is None and root.right is None: if data > parent.data: parent.right = None root = None else: parent.left = None root = None elif root.left is None: if data > parent.data: parent.right = root.right root = parent.right else: parent.left = root.right root = parent.left elif root.right is None: if data > parent.data: parent.right = root.right root = parent.right else: parent.left = root.right root = parent.right else: temp = self.successor(root.right) root.data = temp.data root.right = self.delete(root.right,temp.data,parent) return root def successor(self,root): temp = root if root.right: while temp.left: temp = temp.left return temp def inorder(self,root): if root is not None: self.inorder(root.left) print(root.data) self.inorder(root.right) def main(): Tree = tree() l =[50,30,20,40,70,60,80] for item in l: Tree.insert(Tree.root,item) print(Tree.delete(Tree.root,20,None)) print("inorder after deleting 20:") print(Tree.inorder(Tree.root)) print(Tree.delete(Tree.root,30,None)) print(Tree.delete(Tree.root,50,None)) print(Tree.inorder(Tree.root)) main()
normal
{ "blob_id": "64c32b3ada7fff51a7c4b07872b7688e100897d8", "index": 81, "step-1": "class Node(object):\n <mask token>\n\n\nclass tree(object):\n\n def __init__(self):\n self.root = None\n\n def insert(self, root, value):\n if self.root == None:\n self.root = Node(value)\n elif value < root.data:\n if root.left is None:\n root.left = Node(value)\n else:\n self.insert(root.left, value)\n elif value > root.data:\n if root.right is None:\n root.right = Node(value)\n else:\n self.insert(root.right, value)\n return root\n\n def delete(self, root, data, parent):\n if root is None:\n return root\n if root.data < data:\n parent = root\n root.right = self.delete(root.right, data, parent)\n elif root.data > data:\n parent = root\n root.left = self.delete(root.left, data, parent)\n elif root is None or root.data != data:\n return False\n elif root.left is None and root.right is None:\n if data > parent.data:\n parent.right = None\n root = None\n else:\n parent.left = None\n root = None\n elif root.left is None:\n if data > parent.data:\n parent.right = root.right\n root = parent.right\n else:\n parent.left = root.right\n root = parent.left\n elif root.right is None:\n if data > parent.data:\n parent.right = root.right\n root = parent.right\n else:\n parent.left = root.right\n root = parent.right\n else:\n temp = self.successor(root.right)\n root.data = temp.data\n root.right = self.delete(root.right, temp.data, parent)\n return root\n\n def successor(self, root):\n temp = root\n if root.right:\n while temp.left:\n temp = temp.left\n return temp\n\n def inorder(self, root):\n if root is not None:\n self.inorder(root.left)\n print(root.data)\n self.inorder(root.right)\n\n\n<mask token>\n", "step-2": "class Node(object):\n\n def __init__(self, data):\n self.data = data\n self.left = None\n self.right = None\n self.parent = None\n\n\nclass tree(object):\n\n def __init__(self):\n self.root = None\n\n def insert(self, root, value):\n if self.root == None:\n self.root = Node(value)\n elif value < root.data:\n if root.left is None:\n root.left = Node(value)\n else:\n self.insert(root.left, value)\n elif value > root.data:\n if root.right is None:\n root.right = Node(value)\n else:\n self.insert(root.right, value)\n return root\n\n def delete(self, root, data, parent):\n if root is None:\n return root\n if root.data < data:\n parent = root\n root.right = self.delete(root.right, data, parent)\n elif root.data > data:\n parent = root\n root.left = self.delete(root.left, data, parent)\n elif root is None or root.data != data:\n return False\n elif root.left is None and root.right is None:\n if data > parent.data:\n parent.right = None\n root = None\n else:\n parent.left = None\n root = None\n elif root.left is None:\n if data > parent.data:\n parent.right = root.right\n root = parent.right\n else:\n parent.left = root.right\n root = parent.left\n elif root.right is None:\n if data > parent.data:\n parent.right = root.right\n root = parent.right\n else:\n parent.left = root.right\n root = parent.right\n else:\n temp = self.successor(root.right)\n root.data = temp.data\n root.right = self.delete(root.right, temp.data, parent)\n return root\n\n def successor(self, root):\n temp = root\n if root.right:\n while temp.left:\n temp = temp.left\n return temp\n\n def inorder(self, root):\n if root is not None:\n self.inorder(root.left)\n print(root.data)\n self.inorder(root.right)\n\n\n<mask token>\n", "step-3": "class Node(object):\n\n def __init__(self, data):\n self.data = data\n self.left = None\n self.right = None\n self.parent = None\n\n\nclass tree(object):\n\n def __init__(self):\n self.root = None\n\n def insert(self, root, value):\n if self.root == None:\n self.root = Node(value)\n elif value < root.data:\n if root.left is None:\n root.left = Node(value)\n else:\n self.insert(root.left, value)\n elif value > root.data:\n if root.right is None:\n root.right = Node(value)\n else:\n self.insert(root.right, value)\n return root\n\n def delete(self, root, data, parent):\n if root is None:\n return root\n if root.data < data:\n parent = root\n root.right = self.delete(root.right, data, parent)\n elif root.data > data:\n parent = root\n root.left = self.delete(root.left, data, parent)\n elif root is None or root.data != data:\n return False\n elif root.left is None and root.right is None:\n if data > parent.data:\n parent.right = None\n root = None\n else:\n parent.left = None\n root = None\n elif root.left is None:\n if data > parent.data:\n parent.right = root.right\n root = parent.right\n else:\n parent.left = root.right\n root = parent.left\n elif root.right is None:\n if data > parent.data:\n parent.right = root.right\n root = parent.right\n else:\n parent.left = root.right\n root = parent.right\n else:\n temp = self.successor(root.right)\n root.data = temp.data\n root.right = self.delete(root.right, temp.data, parent)\n return root\n\n def successor(self, root):\n temp = root\n if root.right:\n while temp.left:\n temp = temp.left\n return temp\n\n def inorder(self, root):\n if root is not None:\n self.inorder(root.left)\n print(root.data)\n self.inorder(root.right)\n\n\ndef main():\n Tree = tree()\n l = [50, 30, 20, 40, 70, 60, 80]\n for item in l:\n Tree.insert(Tree.root, item)\n print(Tree.delete(Tree.root, 20, None))\n print('inorder after deleting 20:')\n print(Tree.inorder(Tree.root))\n print(Tree.delete(Tree.root, 30, None))\n print(Tree.delete(Tree.root, 50, None))\n print(Tree.inorder(Tree.root))\n\n\n<mask token>\n", "step-4": "class Node(object):\n\n def __init__(self, data):\n self.data = data\n self.left = None\n self.right = None\n self.parent = None\n\n\nclass tree(object):\n\n def __init__(self):\n self.root = None\n\n def insert(self, root, value):\n if self.root == None:\n self.root = Node(value)\n elif value < root.data:\n if root.left is None:\n root.left = Node(value)\n else:\n self.insert(root.left, value)\n elif value > root.data:\n if root.right is None:\n root.right = Node(value)\n else:\n self.insert(root.right, value)\n return root\n\n def delete(self, root, data, parent):\n if root is None:\n return root\n if root.data < data:\n parent = root\n root.right = self.delete(root.right, data, parent)\n elif root.data > data:\n parent = root\n root.left = self.delete(root.left, data, parent)\n elif root is None or root.data != data:\n return False\n elif root.left is None and root.right is None:\n if data > parent.data:\n parent.right = None\n root = None\n else:\n parent.left = None\n root = None\n elif root.left is None:\n if data > parent.data:\n parent.right = root.right\n root = parent.right\n else:\n parent.left = root.right\n root = parent.left\n elif root.right is None:\n if data > parent.data:\n parent.right = root.right\n root = parent.right\n else:\n parent.left = root.right\n root = parent.right\n else:\n temp = self.successor(root.right)\n root.data = temp.data\n root.right = self.delete(root.right, temp.data, parent)\n return root\n\n def successor(self, root):\n temp = root\n if root.right:\n while temp.left:\n temp = temp.left\n return temp\n\n def inorder(self, root):\n if root is not None:\n self.inorder(root.left)\n print(root.data)\n self.inorder(root.right)\n\n\ndef main():\n Tree = tree()\n l = [50, 30, 20, 40, 70, 60, 80]\n for item in l:\n Tree.insert(Tree.root, item)\n print(Tree.delete(Tree.root, 20, None))\n print('inorder after deleting 20:')\n print(Tree.inorder(Tree.root))\n print(Tree.delete(Tree.root, 30, None))\n print(Tree.delete(Tree.root, 50, None))\n print(Tree.inorder(Tree.root))\n\n\nmain()\n", "step-5": "class Node(object):\n def __init__(self,data):\n self.data = data\n self.left = None\n self.right = None\n self.parent = None\n\nclass tree(object):\n def __init__(self):\n self.root = None\n \n def insert(self,root,value):\n if self.root == None:\n self.root = Node(value)\n else:\n if value < root.data:\n if root.left is None:\n root.left = Node(value)\n else:\n self.insert(root.left,value)\n elif value > root.data:\n if root.right is None:\n root.right = Node(value)\n else:\n self.insert(root.right,value)\n return root \n def delete(self,root,data,parent):\n if root is None:\n return root\n if root.data < data:\n parent = root\n root.right = self.delete(root.right,data,parent)\n elif root.data > data :\n parent = root\n root.left = self.delete(root.left,data,parent)\n else:\n if root is None or root.data != data:\n return False\n elif root.left is None and root.right is None:\n if data > parent.data:\n parent.right = None\n root = None\n else:\n parent.left = None\n root = None\n elif root.left is None:\n if data > parent.data:\n parent.right = root.right\n root = parent.right\n else:\n parent.left = root.right\n root = parent.left\n \n elif root.right is None:\n if data > parent.data:\n parent.right = root.right\n root = parent.right\n else:\n parent.left = root.right\n root = parent.right\n else:\n temp = self.successor(root.right)\n root.data = temp.data\n root.right = self.delete(root.right,temp.data,parent)\n \n return root\n \n def successor(self,root):\n temp = root\n if root.right:\n while temp.left:\n temp = temp.left\n return temp\n def inorder(self,root):\n if root is not None:\n self.inorder(root.left)\n print(root.data)\n self.inorder(root.right)\n \ndef main():\n Tree = tree()\n l =[50,30,20,40,70,60,80]\n for item in l:\n Tree.insert(Tree.root,item)\n print(Tree.delete(Tree.root,20,None))\n print(\"inorder after deleting 20:\")\n print(Tree.inorder(Tree.root))\n print(Tree.delete(Tree.root,30,None))\n print(Tree.delete(Tree.root,50,None))\n print(Tree.inorder(Tree.root))\n \nmain()\n \n \n \n \n \n \n", "step-ids": [ 7, 8, 9, 10, 11 ] }
[ 7, 8, 9, 10, 11 ]
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Author: André Pacheco E-mail: [email protected] This file implements the methods and functions to load the image as a PyTorch dataset If you find any bug or have some suggestion, please, email me. """ from PIL import Image from torch.utils import data import torchvision.transforms as transforms class BuildDataset (data.Dataset): """ This the standard way to implement a dataset pipeline in PyTorch. We need to extend the torch.utils.data.Dataset class and implement the following methods: __len__, __getitem__ and the constructor __init__ """ def __init__(self, imgs_path, labels, extra_info=None, transform=None): """ The constructor gets the images path and their respectively labels and extra information (if it exists). In addition, you can specify some transform operation to be carry out on the images. It's important to note the images must match with the labels (an extra information if exist). For example, the imgs_path[x]'s label must take place on labels[x]. Parameters: :param imgs_path (list): a list of string containing the image paths :param labels (list) a list of labels for each image :param extra_info (list): a list of extra information regarding each image. If None, there is no information. Defaul is None. :param transform (torchvision.transforms.transforms.Compose): transform operations to be carry out on the images """ self.imgs_path = imgs_path self.labels = labels self.extra_info = extra_info # if transform is None, we need to ensure that the PIL image will be transformed to tensor, otherwise we'll got # an exception if (transform is not None): self.transform = transform else: self.transform = transforms.Compose([ transforms.Resize((224,224)), transforms.ToTensor() ]) def __len__(self): """ This method just returns the dataset size """ return len(self.imgs_path) def __getitem__(self, item): """ It gets the image, labels and extra information (if it exists) according to the index informed in `item`. It also performs the transform on the image. :param item (int): an index in the interval [0, ..., len(img_paths)-1] :return (tuple): a tuple containing the image, its label and extra information (if it exists) """ image = Image.open(self.imgs_path[item]).convert("RGB") # Applying the transformations image = self.transform(image) img_name = self.imgs_path[item].split('/')[-1].split('.')[0] # print(self.labels[item]) # print(self.extra_info[item]) if self.extra_info is None: extra_info = [] else: extra_info = self.extra_info[item] if self.labels is None: labels = [] else: labels = self.labels[item] return image, labels, extra_info, img_name def get_data_loader (imgs_path, labels, extra_info=None, transform=None, params=None): """ This function gets a list og images path, their labels and extra information (if it exists) and returns a DataLoader for these files. You also can set some transformations using torchvision.transforms in order to perform data augmentation. Lastly, params is a dictionary that you can set the following parameters: batch_size (int): the batch size for the dataset. If it's not informed the default is 30 shuf (bool): set it true if wanna shuffe the dataset. If it's not informed the default is True num_workers (int): the number thread in CPU to load the dataset. If it's not informed the default is 0 (which :param imgs_path (list): a list of string containing the images path :param labels (list): a list of labels for each image :param extra_info (list, optional): a list of extra information regarding each image. If it's None, it means there's no extra information. Default is None :param transform (torchvision.transforms, optional): use the torchvision.transforms.compose to perform the data augmentation for the dataset. Alternatively, you can use the jedy.pytorch.utils.augmentation to perform the augmentation. If it's None, none augmentation will be perform. Default is None :param params (dictionary, optional): this dictionary contains the following parameters: batch_size: the batch size. If the key is not informed or params = None, the default value will be 30 shuf: if you'd like to shuffle the dataset. If the key is not informed or params = None, the default value will be True num_workers: the number of threads to be used in CPU. If the key is not informed or params = None, the default value will be 4 pin_memory = set it to True to Pytorch preload the images on GPU. If the key is not informed or params = None, the default value will be True :return (torch.utils.data.DataLoader): a dataloader with the dataset and the chose params """ dt = BuildDataset(imgs_path, labels, extra_info, transform) # Checking the params values. If it's not defined in params of if params is None, the default values are described # below: batch_size = 30 shuf = True num_workers = 4 pin_memory = True # However, if the params is defined, we used the values described on it: if (params is not None): if ('batch_size' in params.keys()): batch_size = params['batch_size'] if ('shuf' in params.keys()): shuf = params['shuf'] if ('num_workers' in params.keys()): num_workers = params['num_workers'] if ('pin_memory' in params.keys()): pin_memory = params['pin_memory'] # Calling the dataloader dl = data.DataLoader (dataset=dt, batch_size=batch_size, shuffle=shuf, num_workers=num_workers, pin_memory=pin_memory) return dl
normal
{ "blob_id": "4e31c2a80bec77a1f5aafc8a91617fb4b2941788", "index": 432, "step-1": "<mask token>\n\n\nclass BuildDataset(data.Dataset):\n <mask token>\n\n def __init__(self, imgs_path, labels, extra_info=None, transform=None):\n \"\"\"\n The constructor gets the images path and their respectively labels and extra information (if it exists).\n In addition, you can specify some transform operation to be carry out on the images.\n\n It's important to note the images must match with the labels (an extra information if exist). For example, the\n imgs_path[x]'s label must take place on labels[x].\n\n Parameters:\n :param imgs_path (list): a list of string containing the image paths\n :param labels (list) a list of labels for each image\n :param extra_info (list): a list of extra information regarding each image. If None, there is no information.\n Defaul is None.\n :param transform (torchvision.transforms.transforms.Compose): transform operations to be carry out on the images\n \"\"\"\n self.imgs_path = imgs_path\n self.labels = labels\n self.extra_info = extra_info\n if transform is not None:\n self.transform = transform\n else:\n self.transform = transforms.Compose([transforms.Resize((224, \n 224)), transforms.ToTensor()])\n\n def __len__(self):\n \"\"\" This method just returns the dataset size \"\"\"\n return len(self.imgs_path)\n <mask token>\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass BuildDataset(data.Dataset):\n \"\"\"\n This the standard way to implement a dataset pipeline in PyTorch. We need to extend the torch.utils.data.Dataset\n class and implement the following methods: __len__, __getitem__ and the constructor __init__\n \"\"\"\n\n def __init__(self, imgs_path, labels, extra_info=None, transform=None):\n \"\"\"\n The constructor gets the images path and their respectively labels and extra information (if it exists).\n In addition, you can specify some transform operation to be carry out on the images.\n\n It's important to note the images must match with the labels (an extra information if exist). For example, the\n imgs_path[x]'s label must take place on labels[x].\n\n Parameters:\n :param imgs_path (list): a list of string containing the image paths\n :param labels (list) a list of labels for each image\n :param extra_info (list): a list of extra information regarding each image. If None, there is no information.\n Defaul is None.\n :param transform (torchvision.transforms.transforms.Compose): transform operations to be carry out on the images\n \"\"\"\n self.imgs_path = imgs_path\n self.labels = labels\n self.extra_info = extra_info\n if transform is not None:\n self.transform = transform\n else:\n self.transform = transforms.Compose([transforms.Resize((224, \n 224)), transforms.ToTensor()])\n\n def __len__(self):\n \"\"\" This method just returns the dataset size \"\"\"\n return len(self.imgs_path)\n\n def __getitem__(self, item):\n \"\"\"\n It gets the image, labels and extra information (if it exists) according to the index informed in `item`.\n It also performs the transform on the image.\n\n :param item (int): an index in the interval [0, ..., len(img_paths)-1]\n :return (tuple): a tuple containing the image, its label and extra information (if it exists)\n \"\"\"\n image = Image.open(self.imgs_path[item]).convert('RGB')\n image = self.transform(image)\n img_name = self.imgs_path[item].split('/')[-1].split('.')[0]\n if self.extra_info is None:\n extra_info = []\n else:\n extra_info = self.extra_info[item]\n if self.labels is None:\n labels = []\n else:\n labels = self.labels[item]\n return image, labels, extra_info, img_name\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass BuildDataset(data.Dataset):\n \"\"\"\n This the standard way to implement a dataset pipeline in PyTorch. We need to extend the torch.utils.data.Dataset\n class and implement the following methods: __len__, __getitem__ and the constructor __init__\n \"\"\"\n\n def __init__(self, imgs_path, labels, extra_info=None, transform=None):\n \"\"\"\n The constructor gets the images path and their respectively labels and extra information (if it exists).\n In addition, you can specify some transform operation to be carry out on the images.\n\n It's important to note the images must match with the labels (an extra information if exist). For example, the\n imgs_path[x]'s label must take place on labels[x].\n\n Parameters:\n :param imgs_path (list): a list of string containing the image paths\n :param labels (list) a list of labels for each image\n :param extra_info (list): a list of extra information regarding each image. If None, there is no information.\n Defaul is None.\n :param transform (torchvision.transforms.transforms.Compose): transform operations to be carry out on the images\n \"\"\"\n self.imgs_path = imgs_path\n self.labels = labels\n self.extra_info = extra_info\n if transform is not None:\n self.transform = transform\n else:\n self.transform = transforms.Compose([transforms.Resize((224, \n 224)), transforms.ToTensor()])\n\n def __len__(self):\n \"\"\" This method just returns the dataset size \"\"\"\n return len(self.imgs_path)\n\n def __getitem__(self, item):\n \"\"\"\n It gets the image, labels and extra information (if it exists) according to the index informed in `item`.\n It also performs the transform on the image.\n\n :param item (int): an index in the interval [0, ..., len(img_paths)-1]\n :return (tuple): a tuple containing the image, its label and extra information (if it exists)\n \"\"\"\n image = Image.open(self.imgs_path[item]).convert('RGB')\n image = self.transform(image)\n img_name = self.imgs_path[item].split('/')[-1].split('.')[0]\n if self.extra_info is None:\n extra_info = []\n else:\n extra_info = self.extra_info[item]\n if self.labels is None:\n labels = []\n else:\n labels = self.labels[item]\n return image, labels, extra_info, img_name\n\n\ndef get_data_loader(imgs_path, labels, extra_info=None, transform=None,\n params=None):\n \"\"\"\n This function gets a list og images path, their labels and extra information (if it exists) and returns a DataLoader\n for these files. You also can set some transformations using torchvision.transforms in order to perform data\n augmentation. Lastly, params is a dictionary that you can set the following parameters:\n batch_size (int): the batch size for the dataset. If it's not informed the default is 30\n shuf (bool): set it true if wanna shuffe the dataset. If it's not informed the default is True\n num_workers (int): the number thread in CPU to load the dataset. If it's not informed the default is 0 (which\n\n\n :param imgs_path (list): a list of string containing the images path\n :param labels (list): a list of labels for each image\n :param extra_info (list, optional): a list of extra information regarding each image. If it's None, it means there's\n no extra information. Default is None\n :param transform (torchvision.transforms, optional): use the torchvision.transforms.compose to perform the data\n augmentation for the dataset. Alternatively, you can use the jedy.pytorch.utils.augmentation to perform the\n augmentation. If it's None, none augmentation will be perform. Default is None\n :param params (dictionary, optional): this dictionary contains the following parameters:\n batch_size: the batch size. If the key is not informed or params = None, the default value will be 30\n shuf: if you'd like to shuffle the dataset. If the key is not informed or params = None,\n the default value will be True\n num_workers: the number of threads to be used in CPU. If the key is not informed or params = None, the default\n value will be 4\n pin_memory = set it to True to Pytorch preload the images on GPU. If the key is not informed or params = None,\n the default value will be True\n :return (torch.utils.data.DataLoader): a dataloader with the dataset and the chose params\n \"\"\"\n dt = BuildDataset(imgs_path, labels, extra_info, transform)\n batch_size = 30\n shuf = True\n num_workers = 4\n pin_memory = True\n if params is not None:\n if 'batch_size' in params.keys():\n batch_size = params['batch_size']\n if 'shuf' in params.keys():\n shuf = params['shuf']\n if 'num_workers' in params.keys():\n num_workers = params['num_workers']\n if 'pin_memory' in params.keys():\n pin_memory = params['pin_memory']\n dl = data.DataLoader(dataset=dt, batch_size=batch_size, shuffle=shuf,\n num_workers=num_workers, pin_memory=pin_memory)\n return dl\n", "step-4": "<mask token>\nfrom PIL import Image\nfrom torch.utils import data\nimport torchvision.transforms as transforms\n\n\nclass BuildDataset(data.Dataset):\n \"\"\"\n This the standard way to implement a dataset pipeline in PyTorch. We need to extend the torch.utils.data.Dataset\n class and implement the following methods: __len__, __getitem__ and the constructor __init__\n \"\"\"\n\n def __init__(self, imgs_path, labels, extra_info=None, transform=None):\n \"\"\"\n The constructor gets the images path and their respectively labels and extra information (if it exists).\n In addition, you can specify some transform operation to be carry out on the images.\n\n It's important to note the images must match with the labels (an extra information if exist). For example, the\n imgs_path[x]'s label must take place on labels[x].\n\n Parameters:\n :param imgs_path (list): a list of string containing the image paths\n :param labels (list) a list of labels for each image\n :param extra_info (list): a list of extra information regarding each image. If None, there is no information.\n Defaul is None.\n :param transform (torchvision.transforms.transforms.Compose): transform operations to be carry out on the images\n \"\"\"\n self.imgs_path = imgs_path\n self.labels = labels\n self.extra_info = extra_info\n if transform is not None:\n self.transform = transform\n else:\n self.transform = transforms.Compose([transforms.Resize((224, \n 224)), transforms.ToTensor()])\n\n def __len__(self):\n \"\"\" This method just returns the dataset size \"\"\"\n return len(self.imgs_path)\n\n def __getitem__(self, item):\n \"\"\"\n It gets the image, labels and extra information (if it exists) according to the index informed in `item`.\n It also performs the transform on the image.\n\n :param item (int): an index in the interval [0, ..., len(img_paths)-1]\n :return (tuple): a tuple containing the image, its label and extra information (if it exists)\n \"\"\"\n image = Image.open(self.imgs_path[item]).convert('RGB')\n image = self.transform(image)\n img_name = self.imgs_path[item].split('/')[-1].split('.')[0]\n if self.extra_info is None:\n extra_info = []\n else:\n extra_info = self.extra_info[item]\n if self.labels is None:\n labels = []\n else:\n labels = self.labels[item]\n return image, labels, extra_info, img_name\n\n\ndef get_data_loader(imgs_path, labels, extra_info=None, transform=None,\n params=None):\n \"\"\"\n This function gets a list og images path, their labels and extra information (if it exists) and returns a DataLoader\n for these files. You also can set some transformations using torchvision.transforms in order to perform data\n augmentation. Lastly, params is a dictionary that you can set the following parameters:\n batch_size (int): the batch size for the dataset. If it's not informed the default is 30\n shuf (bool): set it true if wanna shuffe the dataset. If it's not informed the default is True\n num_workers (int): the number thread in CPU to load the dataset. If it's not informed the default is 0 (which\n\n\n :param imgs_path (list): a list of string containing the images path\n :param labels (list): a list of labels for each image\n :param extra_info (list, optional): a list of extra information regarding each image. If it's None, it means there's\n no extra information. Default is None\n :param transform (torchvision.transforms, optional): use the torchvision.transforms.compose to perform the data\n augmentation for the dataset. Alternatively, you can use the jedy.pytorch.utils.augmentation to perform the\n augmentation. If it's None, none augmentation will be perform. Default is None\n :param params (dictionary, optional): this dictionary contains the following parameters:\n batch_size: the batch size. If the key is not informed or params = None, the default value will be 30\n shuf: if you'd like to shuffle the dataset. If the key is not informed or params = None,\n the default value will be True\n num_workers: the number of threads to be used in CPU. If the key is not informed or params = None, the default\n value will be 4\n pin_memory = set it to True to Pytorch preload the images on GPU. If the key is not informed or params = None,\n the default value will be True\n :return (torch.utils.data.DataLoader): a dataloader with the dataset and the chose params\n \"\"\"\n dt = BuildDataset(imgs_path, labels, extra_info, transform)\n batch_size = 30\n shuf = True\n num_workers = 4\n pin_memory = True\n if params is not None:\n if 'batch_size' in params.keys():\n batch_size = params['batch_size']\n if 'shuf' in params.keys():\n shuf = params['shuf']\n if 'num_workers' in params.keys():\n num_workers = params['num_workers']\n if 'pin_memory' in params.keys():\n pin_memory = params['pin_memory']\n dl = data.DataLoader(dataset=dt, batch_size=batch_size, shuffle=shuf,\n num_workers=num_workers, pin_memory=pin_memory)\n return dl\n", "step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nAuthor: André Pacheco\nE-mail: [email protected]\n\nThis file implements the methods and functions to load the image as a PyTorch dataset\n\nIf you find any bug or have some suggestion, please, email me.\n\"\"\"\n\nfrom PIL import Image\nfrom torch.utils import data\nimport torchvision.transforms as transforms\n\n\nclass BuildDataset (data.Dataset):\n \"\"\"\n This the standard way to implement a dataset pipeline in PyTorch. We need to extend the torch.utils.data.Dataset\n class and implement the following methods: __len__, __getitem__ and the constructor __init__\n \"\"\"\n\n def __init__(self, imgs_path, labels, extra_info=None, transform=None):\n \"\"\"\n The constructor gets the images path and their respectively labels and extra information (if it exists).\n In addition, you can specify some transform operation to be carry out on the images.\n\n It's important to note the images must match with the labels (an extra information if exist). For example, the\n imgs_path[x]'s label must take place on labels[x].\n\n Parameters:\n :param imgs_path (list): a list of string containing the image paths\n :param labels (list) a list of labels for each image\n :param extra_info (list): a list of extra information regarding each image. If None, there is no information.\n Defaul is None.\n :param transform (torchvision.transforms.transforms.Compose): transform operations to be carry out on the images\n \"\"\"\n\n self.imgs_path = imgs_path\n self.labels = labels\n self.extra_info = extra_info\n\n # if transform is None, we need to ensure that the PIL image will be transformed to tensor, otherwise we'll got\n # an exception\n if (transform is not None):\n self.transform = transform\n else:\n self.transform = transforms.Compose([\n transforms.Resize((224,224)),\n transforms.ToTensor()\n ])\n\n def __len__(self):\n \"\"\" This method just returns the dataset size \"\"\"\n return len(self.imgs_path)\n\n def __getitem__(self, item):\n \"\"\"\n It gets the image, labels and extra information (if it exists) according to the index informed in `item`.\n It also performs the transform on the image.\n\n :param item (int): an index in the interval [0, ..., len(img_paths)-1]\n :return (tuple): a tuple containing the image, its label and extra information (if it exists)\n \"\"\"\n\n image = Image.open(self.imgs_path[item]).convert(\"RGB\")\n\n # Applying the transformations\n image = self.transform(image)\n\n img_name = self.imgs_path[item].split('/')[-1].split('.')[0]\n # print(self.labels[item])\n # print(self.extra_info[item])\n\n if self.extra_info is None:\n extra_info = []\n else:\n extra_info = self.extra_info[item]\n\n if self.labels is None:\n labels = []\n else:\n labels = self.labels[item]\n\n return image, labels, extra_info, img_name\n\n\ndef get_data_loader (imgs_path, labels, extra_info=None, transform=None, params=None):\n \"\"\"\n This function gets a list og images path, their labels and extra information (if it exists) and returns a DataLoader\n for these files. You also can set some transformations using torchvision.transforms in order to perform data\n augmentation. Lastly, params is a dictionary that you can set the following parameters:\n batch_size (int): the batch size for the dataset. If it's not informed the default is 30\n shuf (bool): set it true if wanna shuffe the dataset. If it's not informed the default is True\n num_workers (int): the number thread in CPU to load the dataset. If it's not informed the default is 0 (which\n\n\n :param imgs_path (list): a list of string containing the images path\n :param labels (list): a list of labels for each image\n :param extra_info (list, optional): a list of extra information regarding each image. If it's None, it means there's\n no extra information. Default is None\n :param transform (torchvision.transforms, optional): use the torchvision.transforms.compose to perform the data\n augmentation for the dataset. Alternatively, you can use the jedy.pytorch.utils.augmentation to perform the\n augmentation. If it's None, none augmentation will be perform. Default is None\n :param params (dictionary, optional): this dictionary contains the following parameters:\n batch_size: the batch size. If the key is not informed or params = None, the default value will be 30\n shuf: if you'd like to shuffle the dataset. If the key is not informed or params = None,\n the default value will be True\n num_workers: the number of threads to be used in CPU. If the key is not informed or params = None, the default\n value will be 4\n pin_memory = set it to True to Pytorch preload the images on GPU. If the key is not informed or params = None,\n the default value will be True\n :return (torch.utils.data.DataLoader): a dataloader with the dataset and the chose params\n \"\"\"\n\n\n dt = BuildDataset(imgs_path, labels, extra_info, transform)\n\n # Checking the params values. If it's not defined in params of if params is None, the default values are described\n # below:\n batch_size = 30\n shuf = True\n num_workers = 4\n pin_memory = True\n\n # However, if the params is defined, we used the values described on it:\n if (params is not None):\n if ('batch_size' in params.keys()):\n batch_size = params['batch_size']\n if ('shuf' in params.keys()):\n shuf = params['shuf']\n if ('num_workers' in params.keys()):\n num_workers = params['num_workers']\n if ('pin_memory' in params.keys()):\n pin_memory = params['pin_memory']\n\n # Calling the dataloader\n dl = data.DataLoader (dataset=dt, batch_size=batch_size, shuffle=shuf, num_workers=num_workers,\n pin_memory=pin_memory)\n\n return dl\n\n", "step-ids": [ 3, 5, 6, 7, 8 ] }
[ 3, 5, 6, 7, 8 ]
import pandas as pd import numpy dato=pd.read_csv('medallero_Panamericanos_Lima2019.csv') print(dato) def calculo_suma(): print("---Funcion con Python---") print("la sumatoria de los valores: ", dato['Bronce'].sum()) print("---Funcion con Numpy---") print("la sumatoria de los valores: ", numpy.sum(dato['Bronce'])) print("---Otras Formas---") print(dato.Bronce.sum()) print(numpy.sum(dato.Bronce)) def calculo_conteo(): print("---Funcion de Python---") print("Los número de elementos son :",len(dato['Bronce'])) print(len(dato.Bronce)) print("---Funcion de Pandas---") print("Los número de elementos son :",dato['Bronce'].count()) print(dato.Bronce.count()) print("---Funcion de Numpy---") print("Los número de elementos son :",numpy.size(dato['Bronce'])) print(numpy.size(dato.Bronce)) def calculo_media(): print("---Funcion de Python---") print("La media es: ",dato.Bronce.sum()/dato.Bronce.count()) print("---Funcion de Pandas---") print("La media es: ",dato.Bronce.mean()) print("---Funcion de Numpy---") print("La media es: ",numpy.mean(dato.Bronce)) def calculo_media2(redondeo=2): print("---Mediana con 2 decimales---") media=dato.Bronce.mean() media=round(media, redondeo) return media def calculo_moda(): moda=dato.Bronce.mode() return moda def calculo_mediana(): nro_item=numpy.size(dato.Bronce) pos_mediana=round(nro_item/2) print('Posicion mediana: ', pos_mediana) mediana=dato.Bronce[pos_mediana-1] return mediana def calculo_percentiles(): tramos =[20, 50, 75] percentiles=numpy.percentile(dato['Bronce'], tramos) print('Percentiles', percentiles) def grafico_percentil(): import matplotlib.pylab as plt import seaborn as sb sb.boxplot(y="Bronce", data=dato) plt.show() def calculo_varianza(): vari=numpy.var(dato) print("La varianza es:" ,vari) calculo_varianza()
normal
{ "blob_id": "f5542cfe6827c352cc6e6da1147e727f2b2d8247", "index": 9586, "step-1": "<mask token>\n\n\ndef calculo_suma():\n print('---Funcion con Python---')\n print('la sumatoria de los valores: ', dato['Bronce'].sum())\n print('---Funcion con Numpy---')\n print('la sumatoria de los valores: ', numpy.sum(dato['Bronce']))\n print('---Otras Formas---')\n print(dato.Bronce.sum())\n print(numpy.sum(dato.Bronce))\n\n\ndef calculo_conteo():\n print('---Funcion de Python---')\n print('Los número de elementos son :', len(dato['Bronce']))\n print(len(dato.Bronce))\n print('---Funcion de Pandas---')\n print('Los número de elementos son :', dato['Bronce'].count())\n print(dato.Bronce.count())\n print('---Funcion de Numpy---')\n print('Los número de elementos son :', numpy.size(dato['Bronce']))\n print(numpy.size(dato.Bronce))\n\n\ndef calculo_media():\n print('---Funcion de Python---')\n print('La media es: ', dato.Bronce.sum() / dato.Bronce.count())\n print('---Funcion de Pandas---')\n print('La media es: ', dato.Bronce.mean())\n print('---Funcion de Numpy---')\n print('La media es: ', numpy.mean(dato.Bronce))\n\n\ndef calculo_media2(redondeo=2):\n print('---Mediana con 2 decimales---')\n media = dato.Bronce.mean()\n media = round(media, redondeo)\n return media\n\n\ndef calculo_moda():\n moda = dato.Bronce.mode()\n return moda\n\n\ndef calculo_mediana():\n nro_item = numpy.size(dato.Bronce)\n pos_mediana = round(nro_item / 2)\n print('Posicion mediana: ', pos_mediana)\n mediana = dato.Bronce[pos_mediana - 1]\n return mediana\n\n\ndef calculo_percentiles():\n tramos = [20, 50, 75]\n percentiles = numpy.percentile(dato['Bronce'], tramos)\n print('Percentiles', percentiles)\n\n\ndef grafico_percentil():\n import matplotlib.pylab as plt\n import seaborn as sb\n sb.boxplot(y='Bronce', data=dato)\n plt.show()\n\n\ndef calculo_varianza():\n vari = numpy.var(dato)\n print('La varianza es:', vari)\n\n\n<mask token>\n", "step-2": "<mask token>\nprint(dato)\n\n\ndef calculo_suma():\n print('---Funcion con Python---')\n print('la sumatoria de los valores: ', dato['Bronce'].sum())\n print('---Funcion con Numpy---')\n print('la sumatoria de los valores: ', numpy.sum(dato['Bronce']))\n print('---Otras Formas---')\n print(dato.Bronce.sum())\n print(numpy.sum(dato.Bronce))\n\n\ndef calculo_conteo():\n print('---Funcion de Python---')\n print('Los número de elementos son :', len(dato['Bronce']))\n print(len(dato.Bronce))\n print('---Funcion de Pandas---')\n print('Los número de elementos son :', dato['Bronce'].count())\n print(dato.Bronce.count())\n print('---Funcion de Numpy---')\n print('Los número de elementos son :', numpy.size(dato['Bronce']))\n print(numpy.size(dato.Bronce))\n\n\ndef calculo_media():\n print('---Funcion de Python---')\n print('La media es: ', dato.Bronce.sum() / dato.Bronce.count())\n print('---Funcion de Pandas---')\n print('La media es: ', dato.Bronce.mean())\n print('---Funcion de Numpy---')\n print('La media es: ', numpy.mean(dato.Bronce))\n\n\ndef calculo_media2(redondeo=2):\n print('---Mediana con 2 decimales---')\n media = dato.Bronce.mean()\n media = round(media, redondeo)\n return media\n\n\ndef calculo_moda():\n moda = dato.Bronce.mode()\n return moda\n\n\ndef calculo_mediana():\n nro_item = numpy.size(dato.Bronce)\n pos_mediana = round(nro_item / 2)\n print('Posicion mediana: ', pos_mediana)\n mediana = dato.Bronce[pos_mediana - 1]\n return mediana\n\n\ndef calculo_percentiles():\n tramos = [20, 50, 75]\n percentiles = numpy.percentile(dato['Bronce'], tramos)\n print('Percentiles', percentiles)\n\n\ndef grafico_percentil():\n import matplotlib.pylab as plt\n import seaborn as sb\n sb.boxplot(y='Bronce', data=dato)\n plt.show()\n\n\ndef calculo_varianza():\n vari = numpy.var(dato)\n print('La varianza es:', vari)\n\n\ncalculo_varianza()\n", "step-3": "<mask token>\ndato = pd.read_csv('medallero_Panamericanos_Lima2019.csv')\nprint(dato)\n\n\ndef calculo_suma():\n print('---Funcion con Python---')\n print('la sumatoria de los valores: ', dato['Bronce'].sum())\n print('---Funcion con Numpy---')\n print('la sumatoria de los valores: ', numpy.sum(dato['Bronce']))\n print('---Otras Formas---')\n print(dato.Bronce.sum())\n print(numpy.sum(dato.Bronce))\n\n\ndef calculo_conteo():\n print('---Funcion de Python---')\n print('Los número de elementos son :', len(dato['Bronce']))\n print(len(dato.Bronce))\n print('---Funcion de Pandas---')\n print('Los número de elementos son :', dato['Bronce'].count())\n print(dato.Bronce.count())\n print('---Funcion de Numpy---')\n print('Los número de elementos son :', numpy.size(dato['Bronce']))\n print(numpy.size(dato.Bronce))\n\n\ndef calculo_media():\n print('---Funcion de Python---')\n print('La media es: ', dato.Bronce.sum() / dato.Bronce.count())\n print('---Funcion de Pandas---')\n print('La media es: ', dato.Bronce.mean())\n print('---Funcion de Numpy---')\n print('La media es: ', numpy.mean(dato.Bronce))\n\n\ndef calculo_media2(redondeo=2):\n print('---Mediana con 2 decimales---')\n media = dato.Bronce.mean()\n media = round(media, redondeo)\n return media\n\n\ndef calculo_moda():\n moda = dato.Bronce.mode()\n return moda\n\n\ndef calculo_mediana():\n nro_item = numpy.size(dato.Bronce)\n pos_mediana = round(nro_item / 2)\n print('Posicion mediana: ', pos_mediana)\n mediana = dato.Bronce[pos_mediana - 1]\n return mediana\n\n\ndef calculo_percentiles():\n tramos = [20, 50, 75]\n percentiles = numpy.percentile(dato['Bronce'], tramos)\n print('Percentiles', percentiles)\n\n\ndef grafico_percentil():\n import matplotlib.pylab as plt\n import seaborn as sb\n sb.boxplot(y='Bronce', data=dato)\n plt.show()\n\n\ndef calculo_varianza():\n vari = numpy.var(dato)\n print('La varianza es:', vari)\n\n\ncalculo_varianza()\n", "step-4": "import pandas as pd\nimport numpy\ndato = pd.read_csv('medallero_Panamericanos_Lima2019.csv')\nprint(dato)\n\n\ndef calculo_suma():\n print('---Funcion con Python---')\n print('la sumatoria de los valores: ', dato['Bronce'].sum())\n print('---Funcion con Numpy---')\n print('la sumatoria de los valores: ', numpy.sum(dato['Bronce']))\n print('---Otras Formas---')\n print(dato.Bronce.sum())\n print(numpy.sum(dato.Bronce))\n\n\ndef calculo_conteo():\n print('---Funcion de Python---')\n print('Los número de elementos son :', len(dato['Bronce']))\n print(len(dato.Bronce))\n print('---Funcion de Pandas---')\n print('Los número de elementos son :', dato['Bronce'].count())\n print(dato.Bronce.count())\n print('---Funcion de Numpy---')\n print('Los número de elementos son :', numpy.size(dato['Bronce']))\n print(numpy.size(dato.Bronce))\n\n\ndef calculo_media():\n print('---Funcion de Python---')\n print('La media es: ', dato.Bronce.sum() / dato.Bronce.count())\n print('---Funcion de Pandas---')\n print('La media es: ', dato.Bronce.mean())\n print('---Funcion de Numpy---')\n print('La media es: ', numpy.mean(dato.Bronce))\n\n\ndef calculo_media2(redondeo=2):\n print('---Mediana con 2 decimales---')\n media = dato.Bronce.mean()\n media = round(media, redondeo)\n return media\n\n\ndef calculo_moda():\n moda = dato.Bronce.mode()\n return moda\n\n\ndef calculo_mediana():\n nro_item = numpy.size(dato.Bronce)\n pos_mediana = round(nro_item / 2)\n print('Posicion mediana: ', pos_mediana)\n mediana = dato.Bronce[pos_mediana - 1]\n return mediana\n\n\ndef calculo_percentiles():\n tramos = [20, 50, 75]\n percentiles = numpy.percentile(dato['Bronce'], tramos)\n print('Percentiles', percentiles)\n\n\ndef grafico_percentil():\n import matplotlib.pylab as plt\n import seaborn as sb\n sb.boxplot(y='Bronce', data=dato)\n plt.show()\n\n\ndef calculo_varianza():\n vari = numpy.var(dato)\n print('La varianza es:', vari)\n\n\ncalculo_varianza()\n", "step-5": "import pandas as pd\nimport numpy\n\ndato=pd.read_csv('medallero_Panamericanos_Lima2019.csv')\nprint(dato)\n\ndef calculo_suma():\n print(\"---Funcion con Python---\")\n print(\"la sumatoria de los valores: \", dato['Bronce'].sum())\n print(\"---Funcion con Numpy---\")\n print(\"la sumatoria de los valores: \", numpy.sum(dato['Bronce']))\n print(\"---Otras Formas---\")\n print(dato.Bronce.sum())\n print(numpy.sum(dato.Bronce))\n\ndef calculo_conteo():\n print(\"---Funcion de Python---\")\n print(\"Los número de elementos son :\",len(dato['Bronce']))\n print(len(dato.Bronce))\n print(\"---Funcion de Pandas---\")\n print(\"Los número de elementos son :\",dato['Bronce'].count())\n print(dato.Bronce.count())\n print(\"---Funcion de Numpy---\")\n print(\"Los número de elementos son :\",numpy.size(dato['Bronce']))\n print(numpy.size(dato.Bronce))\n\ndef calculo_media():\n print(\"---Funcion de Python---\")\n print(\"La media es: \",dato.Bronce.sum()/dato.Bronce.count())\n print(\"---Funcion de Pandas---\")\n print(\"La media es: \",dato.Bronce.mean())\n print(\"---Funcion de Numpy---\")\n print(\"La media es: \",numpy.mean(dato.Bronce))\n\ndef calculo_media2(redondeo=2):\n print(\"---Mediana con 2 decimales---\")\n media=dato.Bronce.mean()\n media=round(media, redondeo)\n return media\n\ndef calculo_moda():\n moda=dato.Bronce.mode()\n return moda\ndef calculo_mediana():\n nro_item=numpy.size(dato.Bronce)\n pos_mediana=round(nro_item/2)\n print('Posicion mediana: ', pos_mediana)\n mediana=dato.Bronce[pos_mediana-1]\n return mediana\n\ndef calculo_percentiles():\n tramos =[20, 50, 75]\n percentiles=numpy.percentile(dato['Bronce'], tramos)\n print('Percentiles', percentiles)\n\ndef grafico_percentil():\n import matplotlib.pylab as plt\n import seaborn as sb\n sb.boxplot(y=\"Bronce\", data=dato)\n plt.show()\n\ndef calculo_varianza():\n vari=numpy.var(dato)\n print(\"La varianza es:\" ,vari)\n\ncalculo_varianza()\n", "step-ids": [ 9, 10, 11, 12, 13 ] }
[ 9, 10, 11, 12, 13 ]
# MolecularMatch API (MM-DATA) Python Example Sheet # Based on documentation at https://api.molecularmatch.com # Author: Shane Neeley, MolecularMatch Inc., Jan. 30, 2018 import requests import json import numpy as np import sys resourceURLs = { "trialSearch": "/v2/search/trials", "drugSearch": "/v2/search/drugs", "publicationSearch": "/v2/search/publications", "mutationGet": "/v2/mutation/get", "geneGet": "/v2/gene/get", "mutationClassify": "/v2/mutation/classify", "validateTerms": "/v2/validate/terms", "assertionSearch": "/v2/search/assertions", "assertionExport": "/v2/export/assertions" } mmService = "https://api.molecularmatch.com" # CHANGE THIS TO YOUR KEY or use as parameter (e.g. $ python3 publicationsAPI.py key) apiKey = '<your api key>' if apiKey == '<your api key>' and sys.argv[1]: apiKey = sys.argv[1] #// TODO: geolocation searches #####################search trials################################## url = mmService + resourceURLs["trialSearch"] filters = [{'facet':'CONDITION','term':'Lung cancer'}] payload = { 'apiKey': apiKey, 'filters': filters } r = requests.post(url, json=payload) print(json.dumps(r.json())) ################################################################## #####################SCENARIOS#################################### ################################################################## #### Clinical trial reporting # When looking up trials for an actual patient, it is important to include the filters of Enrolling and Interventional url = mmService + resourceURLs["trialSearch"] filters = [ {"facet":"CONDITION","term":"Colorectal cancer"}, {"facet":"MUTATION","term":"BRAF V600E"}, {"facet":"STATUS", "term":"Enrolling"}, {"facet":"TRIALTYPE", "term":"Interventional"}, {"facet":"COUNTRY", "term":"France"} ] payload = { 'apiKey': apiKey, 'filters': filters } r = requests.post(url, json=payload) # Question: how many trials for a patient with this mutation and disease are interventional and enrolling in France? print(r.json()['total']) # Answer: 4 # Question: what are these trials ClinicalTrials.gov IDs and titles and email addresses for contact? for i in np.arange(0, len(r.json()['rows']) ): print(r.json()['rows'][i]['id']) print(r.json()['rows'][i]['briefTitle']) print(r.json()['rows'][i]['overallContact']) # Answer: # NCT02291289 - A Multi-Center Study of Biomarker-Driven Therapy in Metastatic Colorectal Cancer - [email protected] # NCT01677741 - A Study to Determine Safety, Tolerability and Pharmacokinetics of Oral Dabrafenib In Children and Adolescent Subjects - [email protected] # NCT02788279 - A Study to Investigate Efficacy and Safety of Cobimetinib Plus Atezolizumab and Atezolizumab Monotherapy Versus Regorafenib in Participants With Metastatic Colorectal Adenocarcinoma - [email protected] # NCT02751177 - Detection of KRAS, NRAS et BRAF Mutations in Plasma Circulating DNA From Patients With Metastatic Colorectal Cancer - [email protected] # Question: what are all the mutations that are associated with trial NCT02291289? filters = [ {"facet":"ID","term":"NCT02291289"} ] payload = { 'apiKey': apiKey, 'filters': filters } r = requests.post(url, json=payload) # Note: must have tags activated on api key for this to work. Not all api key users get tags. for tag in r.json()['rows'][0]['tags']: if tag['facet'] == "MUTATION": print(tag) # Answer: # 3 mutations are for inclusion criteria # {'facet': 'MUTATION', 'term': 'EGFR P546S', 'alias': 'EGFR P546S', 'priority': '0', 'filterType': 'include'} # {'facet': 'MUTATION', 'term': 'BRAF V600E', 'alias': 'BRAF V600E', 'priority': '0', 'filterType': 'include'} # {'facet': 'MUTATION', 'term': 'Microsatellite instability', 'alias': 'Microsatellite instability', 'priority': '0', 'filterType': 'include'} # 2 mutations are for exclusion criteria (filterType = 'exclude') # {'facet': 'MUTATION', 'term': 'EGFR S492R', 'alias': 'EGFR S492R', 'priority': 1, 'filterType': 'exclude'} # {'facet': 'MUTATION', 'term': 'BRAF G469L', 'alias': 'BRAF G469L', 'priority': 1, 'filterType': 'exclude'} # See more about the trial data model at: https://api.molecularmatch.com/#trialDataModel #### Mutation details lookup # So you want to know everything there is to know about BRAF V600E? url = mmService + resourceURLs["mutationGet"] payload = { 'apiKey': apiKey, 'name': 'BRAF V600E' } r = requests.get(url, params=payload) # Question: what databases have reported this mutation? print(r.json()['sources']) # Answer: 'COSMIC', 'CIViC', 'DoCM', 'cBioPortal', 'ClinVar' # Question: is there a known protein domain this mutation is in? for i in r.json()['parents']: if (i['type'] == 'domain'): print(i) # Answer: BRAF Pkinase_Tyr domain (protein tyrosine kinase domain) # What is the clinical interpretation of BRAF V600E? Are there trials, drugs, publications about it? url = mmService + resourceURLs["mutationClassify"] payload = { 'apiKey': apiKey, 'variant': 'BRAF V600E', 'condition': 'Lung cancer' } r = requests.post(url, json=payload) # Question: How does MolecularMatch classify this mutation in this condition? print(r.json()['classifications'][0]['classification']) # Answer: actionable # Question: How many drugs approved and on label for the condition provided? print(r.json()['classifications'][0]['drugsApprovedOnLabelCount']) # Answer: 0 # Question: How many drugs approved but off-label for the condition provided? print(r.json()['classifications'][0]['drugsApprovedOffLabelCount']) # Answer: 6 # Question: What about experimental drugs? print(r.json()['classifications'][0]['drugsExperimentalCount']) # Answer: 4 # Question: How many clinical trials are open for this mutation and condition? print(r.json()['classifications'][0]['trialCount']) # Answer: 24 # Question: Is there a lot of research publications about this mutation in this condition? print(r.json()['classifications'][0]['publicationCount']) # Answer: 47 # Question: Ok, what are these 4 experimental drugs? url = mmService + resourceURLs["drugSearch"] # set geneExpand for Drug to False so drugs return only for V600E, not BRAF (see https://api.molecularmatch.com/#geneExpansion) filters = [ {'facet':'CONDITION','term':'Lung cancer'}, {'facet':'MUTATION','term':'BRAF V600E', "geneExpand": {"Drug": False}} ] payload = { 'apiKey': apiKey, 'filters': filters, 'mode': 'discovery' } r = requests.post(url, json=payload) for drug in r.json()['rows']: print(drug) if drug['approved'] == False: print(drug['name']) # Answer: # Lgx818 # Plx8394 # BGB-283 # Cep-32496 ################################################################## #####################BASIC QUERIES################################ ################################################################## ####################search drugs################################## url = mmService + resourceURLs["drugSearch"] filters = [{'facet':'CONDITION','term':'Lung cancer'}] payload = { 'apiKey': apiKey, 'filters': filters, 'mode': 'discovery' # 'criteriaunmet' # multiple modes avaiable for drugsearch. see api docs. } r = requests.post(url, json=payload) print(json.dumps(r.json())) #####################search trials################################## url = mmService + resourceURLs["trialSearch"] filters = [{'facet':'CONDITION','term':'Lung cancer'}] payload = { 'apiKey': apiKey, 'filters': filters } r = requests.post(url, json=payload) print(json.dumps(r.json())) # Search trials by various ID types filters = [ {"facet":"ID","term":"EUDRACT2017-003305-18"} ] payload = { 'apiKey': apiKey, 'filters': filters } r = requests.post(url, json=payload) print('r here') print(r.json()) #####################search publications############################# url = mmService + resourceURLs["publicationSearch"] filters = [{'facet':'CONDITION','term':'Lung cancer'}] payload = { 'apiKey': apiKey, 'filters': filters } r = requests.post(url, json=payload) print(json.dumps(r.json())) ####################get mutation################################### url = mmService + resourceURLs["mutationGet"] payload = { 'apiKey': apiKey, 'name': 'BRAF V600E' } r = requests.get(url, params=payload) print(json.dumps(r.json())) ######################get gene################################# url = mmService + resourceURLs["geneGet"] payload = { 'apiKey': apiKey, 'symbol': 'BRAF' } r = requests.get(url, params=payload) print(json.dumps(r.json())) ######################classify mutation############################## url = mmService + resourceURLs["mutationClassify"] payload = { 'apiKey': apiKey, 'variant': 'EGFR T790M', 'condition': 'Lung cancer' } r = requests.post(url, json=payload) print(json.dumps(r.json()))
normal
{ "blob_id": "b4593b3229b88db26c5e200431d00838c357c8e0", "index": 2359, "step-1": "<mask token>\n", "step-2": "<mask token>\nif apiKey == '<your api key>' and sys.argv[1]:\n apiKey = sys.argv[1]\n<mask token>\nprint(json.dumps(r.json()))\n<mask token>\nprint(r.json()['total'])\nfor i in np.arange(0, len(r.json()['rows'])):\n print(r.json()['rows'][i]['id'])\n print(r.json()['rows'][i]['briefTitle'])\n print(r.json()['rows'][i]['overallContact'])\n<mask token>\nfor tag in r.json()['rows'][0]['tags']:\n if tag['facet'] == 'MUTATION':\n print(tag)\n<mask token>\nprint(r.json()['sources'])\nfor i in r.json()['parents']:\n if i['type'] == 'domain':\n print(i)\n<mask token>\nprint(r.json()['classifications'][0]['classification'])\nprint(r.json()['classifications'][0]['drugsApprovedOnLabelCount'])\nprint(r.json()['classifications'][0]['drugsApprovedOffLabelCount'])\nprint(r.json()['classifications'][0]['drugsExperimentalCount'])\nprint(r.json()['classifications'][0]['trialCount'])\nprint(r.json()['classifications'][0]['publicationCount'])\n<mask token>\nfor drug in r.json()['rows']:\n print(drug)\n if drug['approved'] == False:\n print(drug['name'])\n<mask token>\nprint(json.dumps(r.json()))\n<mask token>\nprint(json.dumps(r.json()))\n<mask token>\nprint('r here')\nprint(r.json())\n<mask token>\nprint(json.dumps(r.json()))\n<mask token>\nprint(json.dumps(r.json()))\n<mask token>\nprint(json.dumps(r.json()))\n<mask token>\nprint(json.dumps(r.json()))\n", "step-3": "<mask token>\nresourceURLs = {'trialSearch': '/v2/search/trials', 'drugSearch':\n '/v2/search/drugs', 'publicationSearch': '/v2/search/publications',\n 'mutationGet': '/v2/mutation/get', 'geneGet': '/v2/gene/get',\n 'mutationClassify': '/v2/mutation/classify', 'validateTerms':\n '/v2/validate/terms', 'assertionSearch': '/v2/search/assertions',\n 'assertionExport': '/v2/export/assertions'}\nmmService = 'https://api.molecularmatch.com'\napiKey = '<your api key>'\nif apiKey == '<your api key>' and sys.argv[1]:\n apiKey = sys.argv[1]\nurl = mmService + resourceURLs['trialSearch']\nfilters = [{'facet': 'CONDITION', 'term': 'Lung cancer'}]\npayload = {'apiKey': apiKey, 'filters': filters}\nr = requests.post(url, json=payload)\nprint(json.dumps(r.json()))\nurl = mmService + resourceURLs['trialSearch']\nfilters = [{'facet': 'CONDITION', 'term': 'Colorectal cancer'}, {'facet':\n 'MUTATION', 'term': 'BRAF V600E'}, {'facet': 'STATUS', 'term':\n 'Enrolling'}, {'facet': 'TRIALTYPE', 'term': 'Interventional'}, {\n 'facet': 'COUNTRY', 'term': 'France'}]\npayload = {'apiKey': apiKey, 'filters': filters}\nr = requests.post(url, json=payload)\nprint(r.json()['total'])\nfor i in np.arange(0, len(r.json()['rows'])):\n print(r.json()['rows'][i]['id'])\n print(r.json()['rows'][i]['briefTitle'])\n print(r.json()['rows'][i]['overallContact'])\nfilters = [{'facet': 'ID', 'term': 'NCT02291289'}]\npayload = {'apiKey': apiKey, 'filters': filters}\nr = requests.post(url, json=payload)\nfor tag in r.json()['rows'][0]['tags']:\n if tag['facet'] == 'MUTATION':\n print(tag)\nurl = mmService + resourceURLs['mutationGet']\npayload = {'apiKey': apiKey, 'name': 'BRAF V600E'}\nr = requests.get(url, params=payload)\nprint(r.json()['sources'])\nfor i in r.json()['parents']:\n if i['type'] == 'domain':\n print(i)\nurl = mmService + resourceURLs['mutationClassify']\npayload = {'apiKey': apiKey, 'variant': 'BRAF V600E', 'condition':\n 'Lung cancer'}\nr = requests.post(url, json=payload)\nprint(r.json()['classifications'][0]['classification'])\nprint(r.json()['classifications'][0]['drugsApprovedOnLabelCount'])\nprint(r.json()['classifications'][0]['drugsApprovedOffLabelCount'])\nprint(r.json()['classifications'][0]['drugsExperimentalCount'])\nprint(r.json()['classifications'][0]['trialCount'])\nprint(r.json()['classifications'][0]['publicationCount'])\nurl = mmService + resourceURLs['drugSearch']\nfilters = [{'facet': 'CONDITION', 'term': 'Lung cancer'}, {'facet':\n 'MUTATION', 'term': 'BRAF V600E', 'geneExpand': {'Drug': False}}]\npayload = {'apiKey': apiKey, 'filters': filters, 'mode': 'discovery'}\nr = requests.post(url, json=payload)\nfor drug in r.json()['rows']:\n print(drug)\n if drug['approved'] == False:\n print(drug['name'])\nurl = mmService + resourceURLs['drugSearch']\nfilters = [{'facet': 'CONDITION', 'term': 'Lung cancer'}]\npayload = {'apiKey': apiKey, 'filters': filters, 'mode': 'discovery'}\nr = requests.post(url, json=payload)\nprint(json.dumps(r.json()))\nurl = mmService + resourceURLs['trialSearch']\nfilters = [{'facet': 'CONDITION', 'term': 'Lung cancer'}]\npayload = {'apiKey': apiKey, 'filters': filters}\nr = requests.post(url, json=payload)\nprint(json.dumps(r.json()))\nfilters = [{'facet': 'ID', 'term': 'EUDRACT2017-003305-18'}]\npayload = {'apiKey': apiKey, 'filters': filters}\nr = requests.post(url, json=payload)\nprint('r here')\nprint(r.json())\nurl = mmService + resourceURLs['publicationSearch']\nfilters = [{'facet': 'CONDITION', 'term': 'Lung cancer'}]\npayload = {'apiKey': apiKey, 'filters': filters}\nr = requests.post(url, json=payload)\nprint(json.dumps(r.json()))\nurl = mmService + resourceURLs['mutationGet']\npayload = {'apiKey': apiKey, 'name': 'BRAF V600E'}\nr = requests.get(url, params=payload)\nprint(json.dumps(r.json()))\nurl = mmService + resourceURLs['geneGet']\npayload = {'apiKey': apiKey, 'symbol': 'BRAF'}\nr = requests.get(url, params=payload)\nprint(json.dumps(r.json()))\nurl = mmService + resourceURLs['mutationClassify']\npayload = {'apiKey': apiKey, 'variant': 'EGFR T790M', 'condition':\n 'Lung cancer'}\nr = requests.post(url, json=payload)\nprint(json.dumps(r.json()))\n", "step-4": "import requests\nimport json\nimport numpy as np\nimport sys\nresourceURLs = {'trialSearch': '/v2/search/trials', 'drugSearch':\n '/v2/search/drugs', 'publicationSearch': '/v2/search/publications',\n 'mutationGet': '/v2/mutation/get', 'geneGet': '/v2/gene/get',\n 'mutationClassify': '/v2/mutation/classify', 'validateTerms':\n '/v2/validate/terms', 'assertionSearch': '/v2/search/assertions',\n 'assertionExport': '/v2/export/assertions'}\nmmService = 'https://api.molecularmatch.com'\napiKey = '<your api key>'\nif apiKey == '<your api key>' and sys.argv[1]:\n apiKey = sys.argv[1]\nurl = mmService + resourceURLs['trialSearch']\nfilters = [{'facet': 'CONDITION', 'term': 'Lung cancer'}]\npayload = {'apiKey': apiKey, 'filters': filters}\nr = requests.post(url, json=payload)\nprint(json.dumps(r.json()))\nurl = mmService + resourceURLs['trialSearch']\nfilters = [{'facet': 'CONDITION', 'term': 'Colorectal cancer'}, {'facet':\n 'MUTATION', 'term': 'BRAF V600E'}, {'facet': 'STATUS', 'term':\n 'Enrolling'}, {'facet': 'TRIALTYPE', 'term': 'Interventional'}, {\n 'facet': 'COUNTRY', 'term': 'France'}]\npayload = {'apiKey': apiKey, 'filters': filters}\nr = requests.post(url, json=payload)\nprint(r.json()['total'])\nfor i in np.arange(0, len(r.json()['rows'])):\n print(r.json()['rows'][i]['id'])\n print(r.json()['rows'][i]['briefTitle'])\n print(r.json()['rows'][i]['overallContact'])\nfilters = [{'facet': 'ID', 'term': 'NCT02291289'}]\npayload = {'apiKey': apiKey, 'filters': filters}\nr = requests.post(url, json=payload)\nfor tag in r.json()['rows'][0]['tags']:\n if tag['facet'] == 'MUTATION':\n print(tag)\nurl = mmService + resourceURLs['mutationGet']\npayload = {'apiKey': apiKey, 'name': 'BRAF V600E'}\nr = requests.get(url, params=payload)\nprint(r.json()['sources'])\nfor i in r.json()['parents']:\n if i['type'] == 'domain':\n print(i)\nurl = mmService + resourceURLs['mutationClassify']\npayload = {'apiKey': apiKey, 'variant': 'BRAF V600E', 'condition':\n 'Lung cancer'}\nr = requests.post(url, json=payload)\nprint(r.json()['classifications'][0]['classification'])\nprint(r.json()['classifications'][0]['drugsApprovedOnLabelCount'])\nprint(r.json()['classifications'][0]['drugsApprovedOffLabelCount'])\nprint(r.json()['classifications'][0]['drugsExperimentalCount'])\nprint(r.json()['classifications'][0]['trialCount'])\nprint(r.json()['classifications'][0]['publicationCount'])\nurl = mmService + resourceURLs['drugSearch']\nfilters = [{'facet': 'CONDITION', 'term': 'Lung cancer'}, {'facet':\n 'MUTATION', 'term': 'BRAF V600E', 'geneExpand': {'Drug': False}}]\npayload = {'apiKey': apiKey, 'filters': filters, 'mode': 'discovery'}\nr = requests.post(url, json=payload)\nfor drug in r.json()['rows']:\n print(drug)\n if drug['approved'] == False:\n print(drug['name'])\nurl = mmService + resourceURLs['drugSearch']\nfilters = [{'facet': 'CONDITION', 'term': 'Lung cancer'}]\npayload = {'apiKey': apiKey, 'filters': filters, 'mode': 'discovery'}\nr = requests.post(url, json=payload)\nprint(json.dumps(r.json()))\nurl = mmService + resourceURLs['trialSearch']\nfilters = [{'facet': 'CONDITION', 'term': 'Lung cancer'}]\npayload = {'apiKey': apiKey, 'filters': filters}\nr = requests.post(url, json=payload)\nprint(json.dumps(r.json()))\nfilters = [{'facet': 'ID', 'term': 'EUDRACT2017-003305-18'}]\npayload = {'apiKey': apiKey, 'filters': filters}\nr = requests.post(url, json=payload)\nprint('r here')\nprint(r.json())\nurl = mmService + resourceURLs['publicationSearch']\nfilters = [{'facet': 'CONDITION', 'term': 'Lung cancer'}]\npayload = {'apiKey': apiKey, 'filters': filters}\nr = requests.post(url, json=payload)\nprint(json.dumps(r.json()))\nurl = mmService + resourceURLs['mutationGet']\npayload = {'apiKey': apiKey, 'name': 'BRAF V600E'}\nr = requests.get(url, params=payload)\nprint(json.dumps(r.json()))\nurl = mmService + resourceURLs['geneGet']\npayload = {'apiKey': apiKey, 'symbol': 'BRAF'}\nr = requests.get(url, params=payload)\nprint(json.dumps(r.json()))\nurl = mmService + resourceURLs['mutationClassify']\npayload = {'apiKey': apiKey, 'variant': 'EGFR T790M', 'condition':\n 'Lung cancer'}\nr = requests.post(url, json=payload)\nprint(json.dumps(r.json()))\n", "step-5": "# MolecularMatch API (MM-DATA) Python Example Sheet\n# Based on documentation at https://api.molecularmatch.com\n# Author: Shane Neeley, MolecularMatch Inc., Jan. 30, 2018\n\nimport requests\nimport json\nimport numpy as np\nimport sys\n\nresourceURLs = {\n\t\"trialSearch\": \"/v2/search/trials\",\n\t\"drugSearch\": \"/v2/search/drugs\",\n\t\"publicationSearch\": \"/v2/search/publications\",\n\t\"mutationGet\": \"/v2/mutation/get\",\n\t\"geneGet\": \"/v2/gene/get\",\n\t\"mutationClassify\": \"/v2/mutation/classify\",\n\t\"validateTerms\": \"/v2/validate/terms\",\n\t\"assertionSearch\": \"/v2/search/assertions\",\n\t\"assertionExport\": \"/v2/export/assertions\"\n}\nmmService = \"https://api.molecularmatch.com\"\n\n# CHANGE THIS TO YOUR KEY or use as parameter (e.g. $ python3 publicationsAPI.py key)\napiKey = '<your api key>'\nif apiKey == '<your api key>' and sys.argv[1]:\n\tapiKey = sys.argv[1]\n\n#// TODO: geolocation searches\n\n#####################search trials##################################\n\nurl = mmService + resourceURLs[\"trialSearch\"]\nfilters = [{'facet':'CONDITION','term':'Lung cancer'}]\npayload = {\n\t'apiKey': apiKey,\n\t'filters': filters\n}\nr = requests.post(url, json=payload)\nprint(json.dumps(r.json()))\n\n##################################################################\n#####################SCENARIOS####################################\n##################################################################\n\n#### Clinical trial reporting\n\n# When looking up trials for an actual patient, it is important to include the filters of Enrolling and Interventional\nurl = mmService + resourceURLs[\"trialSearch\"]\nfilters = [\n\t{\"facet\":\"CONDITION\",\"term\":\"Colorectal cancer\"},\n\t{\"facet\":\"MUTATION\",\"term\":\"BRAF V600E\"},\n\t{\"facet\":\"STATUS\", \"term\":\"Enrolling\"},\n\t{\"facet\":\"TRIALTYPE\", \"term\":\"Interventional\"},\n\t{\"facet\":\"COUNTRY\", \"term\":\"France\"}\n]\npayload = {\n\t'apiKey': apiKey,\n\t'filters': filters\n}\nr = requests.post(url, json=payload)\n\n# Question: how many trials for a patient with this mutation and disease are interventional and enrolling in France?\nprint(r.json()['total'])\n# Answer: 4\n\n# Question: what are these trials ClinicalTrials.gov IDs and titles and email addresses for contact?\nfor i in np.arange(0, len(r.json()['rows']) ):\n\tprint(r.json()['rows'][i]['id'])\n\tprint(r.json()['rows'][i]['briefTitle'])\n\tprint(r.json()['rows'][i]['overallContact'])\n# Answer:\n# NCT02291289 - A Multi-Center Study of Biomarker-Driven Therapy in Metastatic Colorectal Cancer - [email protected]\n# NCT01677741 - A Study to Determine Safety, Tolerability and Pharmacokinetics of Oral Dabrafenib In Children and Adolescent Subjects - [email protected]\n# NCT02788279 - A Study to Investigate Efficacy and Safety of Cobimetinib Plus Atezolizumab and Atezolizumab Monotherapy Versus Regorafenib in Participants With Metastatic Colorectal Adenocarcinoma - [email protected]\n# NCT02751177 - Detection of KRAS, NRAS et BRAF Mutations in Plasma Circulating DNA From Patients With Metastatic Colorectal Cancer - [email protected]\n\n# Question: what are all the mutations that are associated with trial NCT02291289?\nfilters = [\n\t{\"facet\":\"ID\",\"term\":\"NCT02291289\"}\n]\npayload = {\n\t'apiKey': apiKey,\n\t'filters': filters\n}\nr = requests.post(url, json=payload)\n# Note: must have tags activated on api key for this to work. Not all api key users get tags.\nfor tag in r.json()['rows'][0]['tags']:\n\tif tag['facet'] == \"MUTATION\":\n\t\tprint(tag)\n\n# Answer:\n# 3 mutations are for inclusion criteria\n# {'facet': 'MUTATION', 'term': 'EGFR P546S', 'alias': 'EGFR P546S', 'priority': '0', 'filterType': 'include'}\n# {'facet': 'MUTATION', 'term': 'BRAF V600E', 'alias': 'BRAF V600E', 'priority': '0', 'filterType': 'include'}\n# {'facet': 'MUTATION', 'term': 'Microsatellite instability', 'alias': 'Microsatellite instability', 'priority': '0', 'filterType': 'include'}\n# 2 mutations are for exclusion criteria (filterType = 'exclude')\n# {'facet': 'MUTATION', 'term': 'EGFR S492R', 'alias': 'EGFR S492R', 'priority': 1, 'filterType': 'exclude'}\n# {'facet': 'MUTATION', 'term': 'BRAF G469L', 'alias': 'BRAF G469L', 'priority': 1, 'filterType': 'exclude'}\n\n# See more about the trial data model at: https://api.molecularmatch.com/#trialDataModel\n\n#### Mutation details lookup\n\n# So you want to know everything there is to know about BRAF V600E?\n\nurl = mmService + resourceURLs[\"mutationGet\"]\npayload = {\n\t'apiKey': apiKey,\n\t'name': 'BRAF V600E'\n}\nr = requests.get(url, params=payload)\n\n# Question: what databases have reported this mutation?\nprint(r.json()['sources'])\n# Answer: 'COSMIC', 'CIViC', 'DoCM', 'cBioPortal', 'ClinVar'\n\n# Question: is there a known protein domain this mutation is in?\nfor i in r.json()['parents']:\n\tif (i['type'] == 'domain'):\n\t\tprint(i)\n# Answer: BRAF Pkinase_Tyr domain (protein tyrosine kinase domain)\n\n# What is the clinical interpretation of BRAF V600E? Are there trials, drugs, publications about it?\n\nurl = mmService + resourceURLs[\"mutationClassify\"]\npayload = {\n\t'apiKey': apiKey,\n\t'variant': 'BRAF V600E',\n\t'condition': 'Lung cancer'\n}\nr = requests.post(url, json=payload)\n\n# Question: How does MolecularMatch classify this mutation in this condition?\nprint(r.json()['classifications'][0]['classification'])\n# Answer: actionable\n\n# Question: How many drugs approved and on label for the condition provided?\nprint(r.json()['classifications'][0]['drugsApprovedOnLabelCount'])\n# Answer: 0\n\n# Question: How many drugs approved but off-label for the condition provided?\nprint(r.json()['classifications'][0]['drugsApprovedOffLabelCount'])\n# Answer: 6\n\n# Question: What about experimental drugs?\nprint(r.json()['classifications'][0]['drugsExperimentalCount'])\n# Answer: 4\n\n# Question: How many clinical trials are open for this mutation and condition?\nprint(r.json()['classifications'][0]['trialCount'])\n# Answer: 24\n\n# Question: Is there a lot of research publications about this mutation in this condition?\nprint(r.json()['classifications'][0]['publicationCount'])\n# Answer: 47\n\n# Question: Ok, what are these 4 experimental drugs?\nurl = mmService + resourceURLs[\"drugSearch\"]\n# set geneExpand for Drug to False so drugs return only for V600E, not BRAF (see https://api.molecularmatch.com/#geneExpansion)\nfilters = [\n\t{'facet':'CONDITION','term':'Lung cancer'},\n\t{'facet':'MUTATION','term':'BRAF V600E', \"geneExpand\": {\"Drug\": False}}\n]\npayload = {\n\t'apiKey': apiKey,\n\t'filters': filters,\n\t'mode': 'discovery'\n}\nr = requests.post(url, json=payload)\nfor drug in r.json()['rows']:\n\tprint(drug)\n\tif drug['approved'] == False:\n\t\tprint(drug['name'])\n\n# Answer:\n# Lgx818\n# Plx8394\n# BGB-283\n# Cep-32496\n\n##################################################################\n#####################BASIC QUERIES################################\n##################################################################\n\n####################search drugs##################################\n\nurl = mmService + resourceURLs[\"drugSearch\"]\nfilters = [{'facet':'CONDITION','term':'Lung cancer'}]\npayload = {\n\t'apiKey': apiKey,\n\t'filters': filters,\n\t'mode': 'discovery' # 'criteriaunmet' # multiple modes avaiable for drugsearch. see api docs.\n}\nr = requests.post(url, json=payload)\nprint(json.dumps(r.json()))\n\n#####################search trials##################################\n\nurl = mmService + resourceURLs[\"trialSearch\"]\nfilters = [{'facet':'CONDITION','term':'Lung cancer'}]\npayload = {\n\t'apiKey': apiKey,\n\t'filters': filters\n}\nr = requests.post(url, json=payload)\nprint(json.dumps(r.json()))\n\n# Search trials by various ID types\nfilters = [\n\t{\"facet\":\"ID\",\"term\":\"EUDRACT2017-003305-18\"}\n]\npayload = {\n\t'apiKey': apiKey,\n\t'filters': filters\n}\nr = requests.post(url, json=payload)\nprint('r here')\nprint(r.json())\n\n#####################search publications#############################\n\nurl = mmService + resourceURLs[\"publicationSearch\"]\nfilters = [{'facet':'CONDITION','term':'Lung cancer'}]\npayload = {\n\t'apiKey': apiKey,\n\t'filters': filters\n}\nr = requests.post(url, json=payload)\nprint(json.dumps(r.json()))\n\n####################get mutation###################################\n\nurl = mmService + resourceURLs[\"mutationGet\"]\npayload = {\n\t'apiKey': apiKey,\n\t'name': 'BRAF V600E'\n}\nr = requests.get(url, params=payload)\nprint(json.dumps(r.json()))\n\n######################get gene#################################\n\nurl = mmService + resourceURLs[\"geneGet\"]\npayload = {\n\t'apiKey': apiKey,\n\t'symbol': 'BRAF'\n}\nr = requests.get(url, params=payload)\nprint(json.dumps(r.json()))\n\n######################classify mutation##############################\n\nurl = mmService + resourceURLs[\"mutationClassify\"]\npayload = {\n\t'apiKey': apiKey,\n\t'variant': 'EGFR T790M',\n\t'condition': 'Lung cancer'\n}\nr = requests.post(url, json=payload)\nprint(json.dumps(r.json()))\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
from .kahfm_batch import KaHFMBatch
normal
{ "blob_id": "8e317d4d8ae8dc3d692d237e7e0abfaf37aecbb6", "index": 7017, "step-1": "<mask token>\n", "step-2": "from .kahfm_batch import KaHFMBatch\n", "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0, 1 ] }
[ 0, 1 ]
#!/usr/bin/env python2 # coding=utf8 from __future__ import absolute_import, division, print_function from sqlalchemy import func from walis.model.walis import walis_session from walis.model.zeus import zeus_session, zeus_db_handler from walis.model.zeus.activity import ( SubsidyProcessRecord, SubsidyPayRecord, ActivityStats, ) from walis.model.walis.activity import PaymentNoticeRecord as NoticeRecord from walis.utils.time import get_today_begin_time, get_today_end_time MAX_LIST_SIZE = 1000 DEFAULT_LIST_SIZE = 200 def get_new_pay_records(process_at, limit=200): with zeus_session() as session: result = session.query(SubsidyPayRecord.id, SubsidyPayRecord.restaurant_id, SubsidyProcessRecord.card_id, SubsidyProcessRecord.processed_at, SubsidyPayRecord.status). \ outerjoin(SubsidyProcessRecord, SubsidyProcessRecord.pay_record_id == SubsidyPayRecord.id). \ filter(SubsidyPayRecord.id > process_at). \ filter(SubsidyProcessRecord.status != SubsidyProcessRecord.STATUS_FAIL). \ order_by(SubsidyPayRecord.id.asc()).limit(limit).all() return result def get_success_pay_records(record_ids): with zeus_session() as session: result = session.query(SubsidyPayRecord.id, SubsidyPayRecord.restaurant_id, SubsidyProcessRecord.card_id, SubsidyProcessRecord.processed_at,). \ outerjoin(SubsidyProcessRecord, SubsidyProcessRecord.pay_record_id == SubsidyPayRecord.id). \ filter(SubsidyPayRecord.status == SubsidyPayRecord.STATUS_SUCCESS). \ filter(SubsidyProcessRecord.status != SubsidyProcessRecord.STATUS_FAIL). \ filter(SubsidyPayRecord.id.in_(record_ids)).all() return result def get_activity_stats(pay_record_id): with zeus_session() as session: results = session.query(ActivityStats.activity_id, ActivityStats.activity_category_id, func.sum(ActivityStats.total_subsidy), func.min(ActivityStats.date), func.max(ActivityStats.date), func.sum(ActivityStats.quantity), ).group_by( ActivityStats.restaurant_id, ActivityStats.activity_id, ActivityStats.activity_category_id). \ filter(ActivityStats.pay_record_id == pay_record_id). \ filter(ActivityStats.status == ActivityStats.STATUS_PAY_SUCCESS).all() return results def get_success_record_ids_by_restaurant( restaurant_id, activity_id=None, activity_category_id=None): with zeus_session() as session: query = session.query(SubsidyPayRecord.id). \ filter(SubsidyPayRecord.restaurant_id == restaurant_id). \ filter(SubsidyPayRecord.status == SubsidyPayRecord.STATUS_SUCCESS) if activity_id is not None: query.filter(SubsidyPayRecord.activity_id == activity_id) if activity_category_id is not None: query.filter( SubsidyPayRecord.activity_category_id == activity_category_id) record_ids = query.all() return [r[0] for r in record_ids] PAYLOG_STATUS_LIST = { ActivityStats.STATUS_PAY_RECORD_GENERATED, ActivityStats.STATUS_PAY_SUCCESS, ActivityStats.STATUS_PAY_FAIL, } @zeus_db_handler def query_paylog_by_rst(restaurant_id, activity_id=None, activity_category_id=None, offset=None, limit=None): """ Except ActivityStats.STATUS_PENDING (未审核状态) """ q = session.query( ActivityStats.pay_record_id, ActivityStats.activity_id, ActivityStats.activity_category_id, ActivityStats.status, func.min(ActivityStats.date), func.max(ActivityStats.date), func.sum(ActivityStats.quantity), func.sum(ActivityStats.total_subsidy), SubsidyPayRecord.created_at, func.max(SubsidyProcessRecord.id)). \ group_by(ActivityStats.pay_record_id, ActivityStats.activity_id, ActivityStats.activity_category_id). \ outerjoin(SubsidyPayRecord, SubsidyPayRecord.id == ActivityStats.pay_record_id). \ outerjoin(SubsidyProcessRecord, SubsidyProcessRecord.pay_record_id == SubsidyPayRecord.id). \ filter(ActivityStats.restaurant_id == restaurant_id).\ filter(ActivityStats.status.in_(PAYLOG_STATUS_LIST)).\ order_by(SubsidyPayRecord.created_at.desc()) if activity_id is not None: q = q.filter(ActivityStats.activity_id == activity_id) if activity_category_id is not None: q = q.filter(ActivityStats.activity_category_id == activity_category_id) if limit is not None: q = q.limit(min(limit, MAX_LIST_SIZE)) else: q = q.limit(DEFAULT_LIST_SIZE) if offset is not None: q = q.offset(offset) return q @zeus_db_handler def query_pay_records(restaurant_id, offset=None, limit=None): q = session.query(SubsidyPayRecord).\ filter(SubsidyPayRecord.restaurant_id == restaurant_id).\ order_by(SubsidyPayRecord.created_at.desc()) if limit is not None: q = q.limit(min(limit, MAX_LIST_SIZE)) else: q = q.limit(DEFAULT_LIST_SIZE) if offset is not None: q = q.offset(offset) return q.all() @zeus_db_handler def query_paylog(pay_record_ids, activity_id=None, activity_category_id=None, offset=None, limit=None): q = session.query( ActivityStats.pay_record_id, ActivityStats.activity_id, ActivityStats.activity_category_id, ActivityStats.status, func.min(ActivityStats.date), func.max(ActivityStats.date), func.sum(ActivityStats.quantity), func.sum(ActivityStats.total_subsidy)).\ group_by(ActivityStats.pay_record_id, ActivityStats.activity_id, ActivityStats.activity_category_id). \ filter(ActivityStats.pay_record_id.in_(pay_record_ids)).\ filter(ActivityStats.status.in_(PAYLOG_STATUS_LIST)).\ order_by(ActivityStats.created_at.desc()) if activity_id is not None: q = q.filter(ActivityStats.activity_id == activity_id) if activity_category_id is not None: q = q.filter(ActivityStats.activity_category_id == activity_category_id) if limit is not None: q = q.limit(min(limit, MAX_LIST_SIZE)) else: q = q.limit(DEFAULT_LIST_SIZE) if offset is not None: q = q.offset(offset) return q @zeus_db_handler def get_max_subsidy_process_record_ids(pay_record_ids): q = session.query(func.max(SubsidyProcessRecord.id)).\ group_by(SubsidyProcessRecord.pay_record_id).\ filter(SubsidyProcessRecord.pay_record_id.in_(pay_record_ids)) return q @zeus_db_handler def count_paylog_by_rst(restaurant_id, activity_id=None, activity_category_id=None): """ Except ActivityStats.STATUS_PENDING (未审核状态) """ q = session.query(ActivityStats.id). \ group_by(ActivityStats.pay_record_id, ActivityStats.activity_id, ActivityStats.activity_category_id). \ filter(ActivityStats.restaurant_id == restaurant_id).\ filter(ActivityStats.status.in_(PAYLOG_STATUS_LIST)) if activity_id is not None: q = q.filter(ActivityStats.activity_id == activity_id) if activity_category_id is not None: q = q.filter(ActivityStats.activity_category_id == activity_category_id) return len(q.all()) @zeus_db_handler def query_process_records_by_ids(process_ids): query = session.query(SubsidyProcessRecord).\ filter(SubsidyProcessRecord.id.in_(process_ids)) return query.all() @zeus_db_handler def get_subsidy_record_process_time(record_ids, status): return session.query( SubsidyProcessRecord.pay_record_id, SubsidyProcessRecord.processed_at).\ filter(SubsidyProcessRecord.pay_record_id.in_(record_ids)).\ filter(SubsidyProcessRecord.status == status).all() def get_pay_activities_by_restaurant(rst_id): with zeus_session() as session: query = session.query( ActivityStats.activity_id, ActivityStats.activity_category_id,). \ group_by(ActivityStats.activity_id, ActivityStats.activity_category_id). \ filter(ActivityStats.restaurant_id == rst_id) return query.all() # javis model begins def query_sms_send_info(start_time=None, end_time=None, phone=None, restaurant_id=None, card_num_tail=None, status=None): with walis_session() as session: query = session.query(NoticeRecord) if phone: query = query.filter(NoticeRecord.phone == phone) if restaurant_id: query = query.filter(NoticeRecord.restaurant_id == restaurant_id) if card_num_tail: query = query.filter(NoticeRecord.card_num_tail == card_num_tail) if status: query = query.filter(NoticeRecord.status == status) if not start_time: start_time = get_today_begin_time() if not end_time: end_time = get_today_end_time() query = query.filter(NoticeRecord.created_at > start_time).\ filter(NoticeRecord.created_at < end_time) return query.all() def query_sms_send_count(start_time=None, end_time=None, status=None): with walis_session() as session: if not start_time: start_time = get_today_begin_time() if not end_time: end_time = get_today_end_time() query = session.query(func.count(NoticeRecord.record_id)).\ filter(NoticeRecord.created_at > start_time).\ filter(NoticeRecord.created_at < end_time) if status is not None: query = query.filter(NoticeRecord.status == status) return query.scalar() @zeus_db_handler def query_auto_pay_activity_stats_result( city_ids=None, restaurant_ids=None, activity_id=None, activity_category_id=None, from_date=None, to_date=None, statuses=None, offset=None, limit=None, with_subsidy=None): q = session.query(ActivityStats.restaurant_id, ActivityStats.activity_id, ActivityStats.activity_category_id, func.sum(ActivityStats.quantity), func.sum(ActivityStats.total_subsidy), func.min(ActivityStats.date), func.max(ActivityStats.date)).\ group_by(ActivityStats.restaurant_id, ActivityStats.activity_id, ActivityStats.activity_category_id).\ order_by(ActivityStats.restaurant_id.desc()) return _query_activity_stats( q, city_ids, restaurant_ids, activity_id, activity_category_id, from_date, to_date, statuses, with_subsidy, offset, limit) def _query_activity_stats( q, city_ids=None, restaurant_ids=None, activity_id=None, activity_category_id=None, from_date=None, to_date=None, statuses=None, with_subsidy=None, offset=None, limit=None): if activity_id is not None: q = q.filter(ActivityStats.activity_id == activity_id) if activity_category_id is not None: q = q.filter(ActivityStats.activity_category_id == activity_category_id) # noqa if city_ids is not None: q = q.filter(ActivityStats.city_id.in_(city_ids)) if restaurant_ids is not None: q = q.filter(ActivityStats.restaurant_id.in_(restaurant_ids)) if from_date is not None: q = q.filter(ActivityStats.date >= from_date) if to_date is not None: q = q.filter(ActivityStats.date <= to_date) if statuses is not None: q = q.filter(ActivityStats.status.in_(statuses)) if with_subsidy is not None: if with_subsidy: q = q.filter(ActivityStats.total_subsidy > 0) else: q = q.filter(ActivityStats.total_subsidy == 0) if offset is not None: q = q.offset(offset) q = q.limit(1000) return q
normal
{ "blob_id": "68d537cb8488ae4f2c8300e885be78540952dec0", "index": 450, "step-1": "<mask token>\n\n\ndef get_new_pay_records(process_at, limit=200):\n with zeus_session() as session:\n result = session.query(SubsidyPayRecord.id, SubsidyPayRecord.\n restaurant_id, SubsidyProcessRecord.card_id,\n SubsidyProcessRecord.processed_at, SubsidyPayRecord.status\n ).outerjoin(SubsidyProcessRecord, SubsidyProcessRecord.\n pay_record_id == SubsidyPayRecord.id).filter(SubsidyPayRecord.\n id > process_at).filter(SubsidyProcessRecord.status !=\n SubsidyProcessRecord.STATUS_FAIL).order_by(SubsidyPayRecord.id.\n asc()).limit(limit).all()\n return result\n\n\n<mask token>\n\n\ndef get_activity_stats(pay_record_id):\n with zeus_session() as session:\n results = session.query(ActivityStats.activity_id, ActivityStats.\n activity_category_id, func.sum(ActivityStats.total_subsidy),\n func.min(ActivityStats.date), func.max(ActivityStats.date),\n func.sum(ActivityStats.quantity)).group_by(ActivityStats.\n restaurant_id, ActivityStats.activity_id, ActivityStats.\n activity_category_id).filter(ActivityStats.pay_record_id ==\n pay_record_id).filter(ActivityStats.status == ActivityStats.\n STATUS_PAY_SUCCESS).all()\n return results\n\n\ndef get_success_record_ids_by_restaurant(restaurant_id, activity_id=None,\n activity_category_id=None):\n with zeus_session() as session:\n query = session.query(SubsidyPayRecord.id).filter(SubsidyPayRecord.\n restaurant_id == restaurant_id).filter(SubsidyPayRecord.status ==\n SubsidyPayRecord.STATUS_SUCCESS)\n if activity_id is not None:\n query.filter(SubsidyPayRecord.activity_id == activity_id)\n if activity_category_id is not None:\n query.filter(SubsidyPayRecord.activity_category_id ==\n activity_category_id)\n record_ids = query.all()\n return [r[0] for r in record_ids]\n\n\n<mask token>\n\n\n@zeus_db_handler\ndef query_paylog_by_rst(restaurant_id, activity_id=None,\n activity_category_id=None, offset=None, limit=None):\n \"\"\" Except ActivityStats.STATUS_PENDING (未审核状态)\n \"\"\"\n q = session.query(ActivityStats.pay_record_id, ActivityStats.\n activity_id, ActivityStats.activity_category_id, ActivityStats.\n status, func.min(ActivityStats.date), func.max(ActivityStats.date),\n func.sum(ActivityStats.quantity), func.sum(ActivityStats.\n total_subsidy), SubsidyPayRecord.created_at, func.max(\n SubsidyProcessRecord.id)).group_by(ActivityStats.pay_record_id,\n ActivityStats.activity_id, ActivityStats.activity_category_id\n ).outerjoin(SubsidyPayRecord, SubsidyPayRecord.id == ActivityStats.\n pay_record_id).outerjoin(SubsidyProcessRecord, SubsidyProcessRecord\n .pay_record_id == SubsidyPayRecord.id).filter(ActivityStats.\n restaurant_id == restaurant_id).filter(ActivityStats.status.in_(\n PAYLOG_STATUS_LIST)).order_by(SubsidyPayRecord.created_at.desc())\n if activity_id is not None:\n q = q.filter(ActivityStats.activity_id == activity_id)\n if activity_category_id is not None:\n q = q.filter(ActivityStats.activity_category_id == activity_category_id\n )\n if limit is not None:\n q = q.limit(min(limit, MAX_LIST_SIZE))\n else:\n q = q.limit(DEFAULT_LIST_SIZE)\n if offset is not None:\n q = q.offset(offset)\n return q\n\n\n@zeus_db_handler\ndef query_pay_records(restaurant_id, offset=None, limit=None):\n q = session.query(SubsidyPayRecord).filter(SubsidyPayRecord.\n restaurant_id == restaurant_id).order_by(SubsidyPayRecord.\n created_at.desc())\n if limit is not None:\n q = q.limit(min(limit, MAX_LIST_SIZE))\n else:\n q = q.limit(DEFAULT_LIST_SIZE)\n if offset is not None:\n q = q.offset(offset)\n return q.all()\n\n\n@zeus_db_handler\ndef query_paylog(pay_record_ids, activity_id=None, activity_category_id=\n None, offset=None, limit=None):\n q = session.query(ActivityStats.pay_record_id, ActivityStats.\n activity_id, ActivityStats.activity_category_id, ActivityStats.\n status, func.min(ActivityStats.date), func.max(ActivityStats.date),\n func.sum(ActivityStats.quantity), func.sum(ActivityStats.total_subsidy)\n ).group_by(ActivityStats.pay_record_id, ActivityStats.activity_id,\n ActivityStats.activity_category_id).filter(ActivityStats.\n pay_record_id.in_(pay_record_ids)).filter(ActivityStats.status.in_(\n PAYLOG_STATUS_LIST)).order_by(ActivityStats.created_at.desc())\n if activity_id is not None:\n q = q.filter(ActivityStats.activity_id == activity_id)\n if activity_category_id is not None:\n q = q.filter(ActivityStats.activity_category_id == activity_category_id\n )\n if limit is not None:\n q = q.limit(min(limit, MAX_LIST_SIZE))\n else:\n q = q.limit(DEFAULT_LIST_SIZE)\n if offset is not None:\n q = q.offset(offset)\n return q\n\n\n<mask token>\n\n\n@zeus_db_handler\ndef count_paylog_by_rst(restaurant_id, activity_id=None,\n activity_category_id=None):\n \"\"\" Except ActivityStats.STATUS_PENDING (未审核状态)\n \"\"\"\n q = session.query(ActivityStats.id).group_by(ActivityStats.\n pay_record_id, ActivityStats.activity_id, ActivityStats.\n activity_category_id).filter(ActivityStats.restaurant_id ==\n restaurant_id).filter(ActivityStats.status.in_(PAYLOG_STATUS_LIST))\n if activity_id is not None:\n q = q.filter(ActivityStats.activity_id == activity_id)\n if activity_category_id is not None:\n q = q.filter(ActivityStats.activity_category_id == activity_category_id\n )\n return len(q.all())\n\n\n<mask token>\n\n\n@zeus_db_handler\ndef get_subsidy_record_process_time(record_ids, status):\n return session.query(SubsidyProcessRecord.pay_record_id,\n SubsidyProcessRecord.processed_at).filter(SubsidyProcessRecord.\n pay_record_id.in_(record_ids)).filter(SubsidyProcessRecord.status ==\n status).all()\n\n\n<mask token>\n\n\ndef query_sms_send_info(start_time=None, end_time=None, phone=None,\n restaurant_id=None, card_num_tail=None, status=None):\n with walis_session() as session:\n query = session.query(NoticeRecord)\n if phone:\n query = query.filter(NoticeRecord.phone == phone)\n if restaurant_id:\n query = query.filter(NoticeRecord.restaurant_id == restaurant_id)\n if card_num_tail:\n query = query.filter(NoticeRecord.card_num_tail == card_num_tail)\n if status:\n query = query.filter(NoticeRecord.status == status)\n if not start_time:\n start_time = get_today_begin_time()\n if not end_time:\n end_time = get_today_end_time()\n query = query.filter(NoticeRecord.created_at > start_time).filter(\n NoticeRecord.created_at < end_time)\n return query.all()\n\n\ndef query_sms_send_count(start_time=None, end_time=None, status=None):\n with walis_session() as session:\n if not start_time:\n start_time = get_today_begin_time()\n if not end_time:\n end_time = get_today_end_time()\n query = session.query(func.count(NoticeRecord.record_id)).filter(\n NoticeRecord.created_at > start_time).filter(NoticeRecord.\n created_at < end_time)\n if status is not None:\n query = query.filter(NoticeRecord.status == status)\n return query.scalar()\n\n\n<mask token>\n\n\ndef _query_activity_stats(q, city_ids=None, restaurant_ids=None,\n activity_id=None, activity_category_id=None, from_date=None, to_date=\n None, statuses=None, with_subsidy=None, offset=None, limit=None):\n if activity_id is not None:\n q = q.filter(ActivityStats.activity_id == activity_id)\n if activity_category_id is not None:\n q = q.filter(ActivityStats.activity_category_id == activity_category_id\n )\n if city_ids is not None:\n q = q.filter(ActivityStats.city_id.in_(city_ids))\n if restaurant_ids is not None:\n q = q.filter(ActivityStats.restaurant_id.in_(restaurant_ids))\n if from_date is not None:\n q = q.filter(ActivityStats.date >= from_date)\n if to_date is not None:\n q = q.filter(ActivityStats.date <= to_date)\n if statuses is not None:\n q = q.filter(ActivityStats.status.in_(statuses))\n if with_subsidy is not None:\n if with_subsidy:\n q = q.filter(ActivityStats.total_subsidy > 0)\n else:\n q = q.filter(ActivityStats.total_subsidy == 0)\n if offset is not None:\n q = q.offset(offset)\n q = q.limit(1000)\n return q\n", "step-2": "<mask token>\n\n\ndef get_new_pay_records(process_at, limit=200):\n with zeus_session() as session:\n result = session.query(SubsidyPayRecord.id, SubsidyPayRecord.\n restaurant_id, SubsidyProcessRecord.card_id,\n SubsidyProcessRecord.processed_at, SubsidyPayRecord.status\n ).outerjoin(SubsidyProcessRecord, SubsidyProcessRecord.\n pay_record_id == SubsidyPayRecord.id).filter(SubsidyPayRecord.\n id > process_at).filter(SubsidyProcessRecord.status !=\n SubsidyProcessRecord.STATUS_FAIL).order_by(SubsidyPayRecord.id.\n asc()).limit(limit).all()\n return result\n\n\n<mask token>\n\n\ndef get_activity_stats(pay_record_id):\n with zeus_session() as session:\n results = session.query(ActivityStats.activity_id, ActivityStats.\n activity_category_id, func.sum(ActivityStats.total_subsidy),\n func.min(ActivityStats.date), func.max(ActivityStats.date),\n func.sum(ActivityStats.quantity)).group_by(ActivityStats.\n restaurant_id, ActivityStats.activity_id, ActivityStats.\n activity_category_id).filter(ActivityStats.pay_record_id ==\n pay_record_id).filter(ActivityStats.status == ActivityStats.\n STATUS_PAY_SUCCESS).all()\n return results\n\n\ndef get_success_record_ids_by_restaurant(restaurant_id, activity_id=None,\n activity_category_id=None):\n with zeus_session() as session:\n query = session.query(SubsidyPayRecord.id).filter(SubsidyPayRecord.\n restaurant_id == restaurant_id).filter(SubsidyPayRecord.status ==\n SubsidyPayRecord.STATUS_SUCCESS)\n if activity_id is not None:\n query.filter(SubsidyPayRecord.activity_id == activity_id)\n if activity_category_id is not None:\n query.filter(SubsidyPayRecord.activity_category_id ==\n activity_category_id)\n record_ids = query.all()\n return [r[0] for r in record_ids]\n\n\n<mask token>\n\n\n@zeus_db_handler\ndef query_paylog_by_rst(restaurant_id, activity_id=None,\n activity_category_id=None, offset=None, limit=None):\n \"\"\" Except ActivityStats.STATUS_PENDING (未审核状态)\n \"\"\"\n q = session.query(ActivityStats.pay_record_id, ActivityStats.\n activity_id, ActivityStats.activity_category_id, ActivityStats.\n status, func.min(ActivityStats.date), func.max(ActivityStats.date),\n func.sum(ActivityStats.quantity), func.sum(ActivityStats.\n total_subsidy), SubsidyPayRecord.created_at, func.max(\n SubsidyProcessRecord.id)).group_by(ActivityStats.pay_record_id,\n ActivityStats.activity_id, ActivityStats.activity_category_id\n ).outerjoin(SubsidyPayRecord, SubsidyPayRecord.id == ActivityStats.\n pay_record_id).outerjoin(SubsidyProcessRecord, SubsidyProcessRecord\n .pay_record_id == SubsidyPayRecord.id).filter(ActivityStats.\n restaurant_id == restaurant_id).filter(ActivityStats.status.in_(\n PAYLOG_STATUS_LIST)).order_by(SubsidyPayRecord.created_at.desc())\n if activity_id is not None:\n q = q.filter(ActivityStats.activity_id == activity_id)\n if activity_category_id is not None:\n q = q.filter(ActivityStats.activity_category_id == activity_category_id\n )\n if limit is not None:\n q = q.limit(min(limit, MAX_LIST_SIZE))\n else:\n q = q.limit(DEFAULT_LIST_SIZE)\n if offset is not None:\n q = q.offset(offset)\n return q\n\n\n@zeus_db_handler\ndef query_pay_records(restaurant_id, offset=None, limit=None):\n q = session.query(SubsidyPayRecord).filter(SubsidyPayRecord.\n restaurant_id == restaurant_id).order_by(SubsidyPayRecord.\n created_at.desc())\n if limit is not None:\n q = q.limit(min(limit, MAX_LIST_SIZE))\n else:\n q = q.limit(DEFAULT_LIST_SIZE)\n if offset is not None:\n q = q.offset(offset)\n return q.all()\n\n\n@zeus_db_handler\ndef query_paylog(pay_record_ids, activity_id=None, activity_category_id=\n None, offset=None, limit=None):\n q = session.query(ActivityStats.pay_record_id, ActivityStats.\n activity_id, ActivityStats.activity_category_id, ActivityStats.\n status, func.min(ActivityStats.date), func.max(ActivityStats.date),\n func.sum(ActivityStats.quantity), func.sum(ActivityStats.total_subsidy)\n ).group_by(ActivityStats.pay_record_id, ActivityStats.activity_id,\n ActivityStats.activity_category_id).filter(ActivityStats.\n pay_record_id.in_(pay_record_ids)).filter(ActivityStats.status.in_(\n PAYLOG_STATUS_LIST)).order_by(ActivityStats.created_at.desc())\n if activity_id is not None:\n q = q.filter(ActivityStats.activity_id == activity_id)\n if activity_category_id is not None:\n q = q.filter(ActivityStats.activity_category_id == activity_category_id\n )\n if limit is not None:\n q = q.limit(min(limit, MAX_LIST_SIZE))\n else:\n q = q.limit(DEFAULT_LIST_SIZE)\n if offset is not None:\n q = q.offset(offset)\n return q\n\n\n<mask token>\n\n\n@zeus_db_handler\ndef count_paylog_by_rst(restaurant_id, activity_id=None,\n activity_category_id=None):\n \"\"\" Except ActivityStats.STATUS_PENDING (未审核状态)\n \"\"\"\n q = session.query(ActivityStats.id).group_by(ActivityStats.\n pay_record_id, ActivityStats.activity_id, ActivityStats.\n activity_category_id).filter(ActivityStats.restaurant_id ==\n restaurant_id).filter(ActivityStats.status.in_(PAYLOG_STATUS_LIST))\n if activity_id is not None:\n q = q.filter(ActivityStats.activity_id == activity_id)\n if activity_category_id is not None:\n q = q.filter(ActivityStats.activity_category_id == activity_category_id\n )\n return len(q.all())\n\n\n<mask token>\n\n\n@zeus_db_handler\ndef get_subsidy_record_process_time(record_ids, status):\n return session.query(SubsidyProcessRecord.pay_record_id,\n SubsidyProcessRecord.processed_at).filter(SubsidyProcessRecord.\n pay_record_id.in_(record_ids)).filter(SubsidyProcessRecord.status ==\n status).all()\n\n\ndef get_pay_activities_by_restaurant(rst_id):\n with zeus_session() as session:\n query = session.query(ActivityStats.activity_id, ActivityStats.\n activity_category_id).group_by(ActivityStats.activity_id,\n ActivityStats.activity_category_id).filter(ActivityStats.\n restaurant_id == rst_id)\n return query.all()\n\n\ndef query_sms_send_info(start_time=None, end_time=None, phone=None,\n restaurant_id=None, card_num_tail=None, status=None):\n with walis_session() as session:\n query = session.query(NoticeRecord)\n if phone:\n query = query.filter(NoticeRecord.phone == phone)\n if restaurant_id:\n query = query.filter(NoticeRecord.restaurant_id == restaurant_id)\n if card_num_tail:\n query = query.filter(NoticeRecord.card_num_tail == card_num_tail)\n if status:\n query = query.filter(NoticeRecord.status == status)\n if not start_time:\n start_time = get_today_begin_time()\n if not end_time:\n end_time = get_today_end_time()\n query = query.filter(NoticeRecord.created_at > start_time).filter(\n NoticeRecord.created_at < end_time)\n return query.all()\n\n\ndef query_sms_send_count(start_time=None, end_time=None, status=None):\n with walis_session() as session:\n if not start_time:\n start_time = get_today_begin_time()\n if not end_time:\n end_time = get_today_end_time()\n query = session.query(func.count(NoticeRecord.record_id)).filter(\n NoticeRecord.created_at > start_time).filter(NoticeRecord.\n created_at < end_time)\n if status is not None:\n query = query.filter(NoticeRecord.status == status)\n return query.scalar()\n\n\n@zeus_db_handler\ndef query_auto_pay_activity_stats_result(city_ids=None, restaurant_ids=None,\n activity_id=None, activity_category_id=None, from_date=None, to_date=\n None, statuses=None, offset=None, limit=None, with_subsidy=None):\n q = session.query(ActivityStats.restaurant_id, ActivityStats.\n activity_id, ActivityStats.activity_category_id, func.sum(\n ActivityStats.quantity), func.sum(ActivityStats.total_subsidy),\n func.min(ActivityStats.date), func.max(ActivityStats.date)).group_by(\n ActivityStats.restaurant_id, ActivityStats.activity_id,\n ActivityStats.activity_category_id).order_by(ActivityStats.\n restaurant_id.desc())\n return _query_activity_stats(q, city_ids, restaurant_ids, activity_id,\n activity_category_id, from_date, to_date, statuses, with_subsidy,\n offset, limit)\n\n\ndef _query_activity_stats(q, city_ids=None, restaurant_ids=None,\n activity_id=None, activity_category_id=None, from_date=None, to_date=\n None, statuses=None, with_subsidy=None, offset=None, limit=None):\n if activity_id is not None:\n q = q.filter(ActivityStats.activity_id == activity_id)\n if activity_category_id is not None:\n q = q.filter(ActivityStats.activity_category_id == activity_category_id\n )\n if city_ids is not None:\n q = q.filter(ActivityStats.city_id.in_(city_ids))\n if restaurant_ids is not None:\n q = q.filter(ActivityStats.restaurant_id.in_(restaurant_ids))\n if from_date is not None:\n q = q.filter(ActivityStats.date >= from_date)\n if to_date is not None:\n q = q.filter(ActivityStats.date <= to_date)\n if statuses is not None:\n q = q.filter(ActivityStats.status.in_(statuses))\n if with_subsidy is not None:\n if with_subsidy:\n q = q.filter(ActivityStats.total_subsidy > 0)\n else:\n q = q.filter(ActivityStats.total_subsidy == 0)\n if offset is not None:\n q = q.offset(offset)\n q = q.limit(1000)\n return q\n", "step-3": "<mask token>\n\n\ndef get_new_pay_records(process_at, limit=200):\n with zeus_session() as session:\n result = session.query(SubsidyPayRecord.id, SubsidyPayRecord.\n restaurant_id, SubsidyProcessRecord.card_id,\n SubsidyProcessRecord.processed_at, SubsidyPayRecord.status\n ).outerjoin(SubsidyProcessRecord, SubsidyProcessRecord.\n pay_record_id == SubsidyPayRecord.id).filter(SubsidyPayRecord.\n id > process_at).filter(SubsidyProcessRecord.status !=\n SubsidyProcessRecord.STATUS_FAIL).order_by(SubsidyPayRecord.id.\n asc()).limit(limit).all()\n return result\n\n\ndef get_success_pay_records(record_ids):\n with zeus_session() as session:\n result = session.query(SubsidyPayRecord.id, SubsidyPayRecord.\n restaurant_id, SubsidyProcessRecord.card_id,\n SubsidyProcessRecord.processed_at).outerjoin(SubsidyProcessRecord,\n SubsidyProcessRecord.pay_record_id == SubsidyPayRecord.id).filter(\n SubsidyPayRecord.status == SubsidyPayRecord.STATUS_SUCCESS).filter(\n SubsidyProcessRecord.status != SubsidyProcessRecord.STATUS_FAIL\n ).filter(SubsidyPayRecord.id.in_(record_ids)).all()\n return result\n\n\ndef get_activity_stats(pay_record_id):\n with zeus_session() as session:\n results = session.query(ActivityStats.activity_id, ActivityStats.\n activity_category_id, func.sum(ActivityStats.total_subsidy),\n func.min(ActivityStats.date), func.max(ActivityStats.date),\n func.sum(ActivityStats.quantity)).group_by(ActivityStats.\n restaurant_id, ActivityStats.activity_id, ActivityStats.\n activity_category_id).filter(ActivityStats.pay_record_id ==\n pay_record_id).filter(ActivityStats.status == ActivityStats.\n STATUS_PAY_SUCCESS).all()\n return results\n\n\ndef get_success_record_ids_by_restaurant(restaurant_id, activity_id=None,\n activity_category_id=None):\n with zeus_session() as session:\n query = session.query(SubsidyPayRecord.id).filter(SubsidyPayRecord.\n restaurant_id == restaurant_id).filter(SubsidyPayRecord.status ==\n SubsidyPayRecord.STATUS_SUCCESS)\n if activity_id is not None:\n query.filter(SubsidyPayRecord.activity_id == activity_id)\n if activity_category_id is not None:\n query.filter(SubsidyPayRecord.activity_category_id ==\n activity_category_id)\n record_ids = query.all()\n return [r[0] for r in record_ids]\n\n\n<mask token>\n\n\n@zeus_db_handler\ndef query_paylog_by_rst(restaurant_id, activity_id=None,\n activity_category_id=None, offset=None, limit=None):\n \"\"\" Except ActivityStats.STATUS_PENDING (未审核状态)\n \"\"\"\n q = session.query(ActivityStats.pay_record_id, ActivityStats.\n activity_id, ActivityStats.activity_category_id, ActivityStats.\n status, func.min(ActivityStats.date), func.max(ActivityStats.date),\n func.sum(ActivityStats.quantity), func.sum(ActivityStats.\n total_subsidy), SubsidyPayRecord.created_at, func.max(\n SubsidyProcessRecord.id)).group_by(ActivityStats.pay_record_id,\n ActivityStats.activity_id, ActivityStats.activity_category_id\n ).outerjoin(SubsidyPayRecord, SubsidyPayRecord.id == ActivityStats.\n pay_record_id).outerjoin(SubsidyProcessRecord, SubsidyProcessRecord\n .pay_record_id == SubsidyPayRecord.id).filter(ActivityStats.\n restaurant_id == restaurant_id).filter(ActivityStats.status.in_(\n PAYLOG_STATUS_LIST)).order_by(SubsidyPayRecord.created_at.desc())\n if activity_id is not None:\n q = q.filter(ActivityStats.activity_id == activity_id)\n if activity_category_id is not None:\n q = q.filter(ActivityStats.activity_category_id == activity_category_id\n )\n if limit is not None:\n q = q.limit(min(limit, MAX_LIST_SIZE))\n else:\n q = q.limit(DEFAULT_LIST_SIZE)\n if offset is not None:\n q = q.offset(offset)\n return q\n\n\n@zeus_db_handler\ndef query_pay_records(restaurant_id, offset=None, limit=None):\n q = session.query(SubsidyPayRecord).filter(SubsidyPayRecord.\n restaurant_id == restaurant_id).order_by(SubsidyPayRecord.\n created_at.desc())\n if limit is not None:\n q = q.limit(min(limit, MAX_LIST_SIZE))\n else:\n q = q.limit(DEFAULT_LIST_SIZE)\n if offset is not None:\n q = q.offset(offset)\n return q.all()\n\n\n@zeus_db_handler\ndef query_paylog(pay_record_ids, activity_id=None, activity_category_id=\n None, offset=None, limit=None):\n q = session.query(ActivityStats.pay_record_id, ActivityStats.\n activity_id, ActivityStats.activity_category_id, ActivityStats.\n status, func.min(ActivityStats.date), func.max(ActivityStats.date),\n func.sum(ActivityStats.quantity), func.sum(ActivityStats.total_subsidy)\n ).group_by(ActivityStats.pay_record_id, ActivityStats.activity_id,\n ActivityStats.activity_category_id).filter(ActivityStats.\n pay_record_id.in_(pay_record_ids)).filter(ActivityStats.status.in_(\n PAYLOG_STATUS_LIST)).order_by(ActivityStats.created_at.desc())\n if activity_id is not None:\n q = q.filter(ActivityStats.activity_id == activity_id)\n if activity_category_id is not None:\n q = q.filter(ActivityStats.activity_category_id == activity_category_id\n )\n if limit is not None:\n q = q.limit(min(limit, MAX_LIST_SIZE))\n else:\n q = q.limit(DEFAULT_LIST_SIZE)\n if offset is not None:\n q = q.offset(offset)\n return q\n\n\n<mask token>\n\n\n@zeus_db_handler\ndef count_paylog_by_rst(restaurant_id, activity_id=None,\n activity_category_id=None):\n \"\"\" Except ActivityStats.STATUS_PENDING (未审核状态)\n \"\"\"\n q = session.query(ActivityStats.id).group_by(ActivityStats.\n pay_record_id, ActivityStats.activity_id, ActivityStats.\n activity_category_id).filter(ActivityStats.restaurant_id ==\n restaurant_id).filter(ActivityStats.status.in_(PAYLOG_STATUS_LIST))\n if activity_id is not None:\n q = q.filter(ActivityStats.activity_id == activity_id)\n if activity_category_id is not None:\n q = q.filter(ActivityStats.activity_category_id == activity_category_id\n )\n return len(q.all())\n\n\n<mask token>\n\n\n@zeus_db_handler\ndef get_subsidy_record_process_time(record_ids, status):\n return session.query(SubsidyProcessRecord.pay_record_id,\n SubsidyProcessRecord.processed_at).filter(SubsidyProcessRecord.\n pay_record_id.in_(record_ids)).filter(SubsidyProcessRecord.status ==\n status).all()\n\n\ndef get_pay_activities_by_restaurant(rst_id):\n with zeus_session() as session:\n query = session.query(ActivityStats.activity_id, ActivityStats.\n activity_category_id).group_by(ActivityStats.activity_id,\n ActivityStats.activity_category_id).filter(ActivityStats.\n restaurant_id == rst_id)\n return query.all()\n\n\ndef query_sms_send_info(start_time=None, end_time=None, phone=None,\n restaurant_id=None, card_num_tail=None, status=None):\n with walis_session() as session:\n query = session.query(NoticeRecord)\n if phone:\n query = query.filter(NoticeRecord.phone == phone)\n if restaurant_id:\n query = query.filter(NoticeRecord.restaurant_id == restaurant_id)\n if card_num_tail:\n query = query.filter(NoticeRecord.card_num_tail == card_num_tail)\n if status:\n query = query.filter(NoticeRecord.status == status)\n if not start_time:\n start_time = get_today_begin_time()\n if not end_time:\n end_time = get_today_end_time()\n query = query.filter(NoticeRecord.created_at > start_time).filter(\n NoticeRecord.created_at < end_time)\n return query.all()\n\n\ndef query_sms_send_count(start_time=None, end_time=None, status=None):\n with walis_session() as session:\n if not start_time:\n start_time = get_today_begin_time()\n if not end_time:\n end_time = get_today_end_time()\n query = session.query(func.count(NoticeRecord.record_id)).filter(\n NoticeRecord.created_at > start_time).filter(NoticeRecord.\n created_at < end_time)\n if status is not None:\n query = query.filter(NoticeRecord.status == status)\n return query.scalar()\n\n\n@zeus_db_handler\ndef query_auto_pay_activity_stats_result(city_ids=None, restaurant_ids=None,\n activity_id=None, activity_category_id=None, from_date=None, to_date=\n None, statuses=None, offset=None, limit=None, with_subsidy=None):\n q = session.query(ActivityStats.restaurant_id, ActivityStats.\n activity_id, ActivityStats.activity_category_id, func.sum(\n ActivityStats.quantity), func.sum(ActivityStats.total_subsidy),\n func.min(ActivityStats.date), func.max(ActivityStats.date)).group_by(\n ActivityStats.restaurant_id, ActivityStats.activity_id,\n ActivityStats.activity_category_id).order_by(ActivityStats.\n restaurant_id.desc())\n return _query_activity_stats(q, city_ids, restaurant_ids, activity_id,\n activity_category_id, from_date, to_date, statuses, with_subsidy,\n offset, limit)\n\n\ndef _query_activity_stats(q, city_ids=None, restaurant_ids=None,\n activity_id=None, activity_category_id=None, from_date=None, to_date=\n None, statuses=None, with_subsidy=None, offset=None, limit=None):\n if activity_id is not None:\n q = q.filter(ActivityStats.activity_id == activity_id)\n if activity_category_id is not None:\n q = q.filter(ActivityStats.activity_category_id == activity_category_id\n )\n if city_ids is not None:\n q = q.filter(ActivityStats.city_id.in_(city_ids))\n if restaurant_ids is not None:\n q = q.filter(ActivityStats.restaurant_id.in_(restaurant_ids))\n if from_date is not None:\n q = q.filter(ActivityStats.date >= from_date)\n if to_date is not None:\n q = q.filter(ActivityStats.date <= to_date)\n if statuses is not None:\n q = q.filter(ActivityStats.status.in_(statuses))\n if with_subsidy is not None:\n if with_subsidy:\n q = q.filter(ActivityStats.total_subsidy > 0)\n else:\n q = q.filter(ActivityStats.total_subsidy == 0)\n if offset is not None:\n q = q.offset(offset)\n q = q.limit(1000)\n return q\n", "step-4": "<mask token>\n\n\ndef get_new_pay_records(process_at, limit=200):\n with zeus_session() as session:\n result = session.query(SubsidyPayRecord.id, SubsidyPayRecord.\n restaurant_id, SubsidyProcessRecord.card_id,\n SubsidyProcessRecord.processed_at, SubsidyPayRecord.status\n ).outerjoin(SubsidyProcessRecord, SubsidyProcessRecord.\n pay_record_id == SubsidyPayRecord.id).filter(SubsidyPayRecord.\n id > process_at).filter(SubsidyProcessRecord.status !=\n SubsidyProcessRecord.STATUS_FAIL).order_by(SubsidyPayRecord.id.\n asc()).limit(limit).all()\n return result\n\n\ndef get_success_pay_records(record_ids):\n with zeus_session() as session:\n result = session.query(SubsidyPayRecord.id, SubsidyPayRecord.\n restaurant_id, SubsidyProcessRecord.card_id,\n SubsidyProcessRecord.processed_at).outerjoin(SubsidyProcessRecord,\n SubsidyProcessRecord.pay_record_id == SubsidyPayRecord.id).filter(\n SubsidyPayRecord.status == SubsidyPayRecord.STATUS_SUCCESS).filter(\n SubsidyProcessRecord.status != SubsidyProcessRecord.STATUS_FAIL\n ).filter(SubsidyPayRecord.id.in_(record_ids)).all()\n return result\n\n\ndef get_activity_stats(pay_record_id):\n with zeus_session() as session:\n results = session.query(ActivityStats.activity_id, ActivityStats.\n activity_category_id, func.sum(ActivityStats.total_subsidy),\n func.min(ActivityStats.date), func.max(ActivityStats.date),\n func.sum(ActivityStats.quantity)).group_by(ActivityStats.\n restaurant_id, ActivityStats.activity_id, ActivityStats.\n activity_category_id).filter(ActivityStats.pay_record_id ==\n pay_record_id).filter(ActivityStats.status == ActivityStats.\n STATUS_PAY_SUCCESS).all()\n return results\n\n\ndef get_success_record_ids_by_restaurant(restaurant_id, activity_id=None,\n activity_category_id=None):\n with zeus_session() as session:\n query = session.query(SubsidyPayRecord.id).filter(SubsidyPayRecord.\n restaurant_id == restaurant_id).filter(SubsidyPayRecord.status ==\n SubsidyPayRecord.STATUS_SUCCESS)\n if activity_id is not None:\n query.filter(SubsidyPayRecord.activity_id == activity_id)\n if activity_category_id is not None:\n query.filter(SubsidyPayRecord.activity_category_id ==\n activity_category_id)\n record_ids = query.all()\n return [r[0] for r in record_ids]\n\n\n<mask token>\n\n\n@zeus_db_handler\ndef query_paylog_by_rst(restaurant_id, activity_id=None,\n activity_category_id=None, offset=None, limit=None):\n \"\"\" Except ActivityStats.STATUS_PENDING (未审核状态)\n \"\"\"\n q = session.query(ActivityStats.pay_record_id, ActivityStats.\n activity_id, ActivityStats.activity_category_id, ActivityStats.\n status, func.min(ActivityStats.date), func.max(ActivityStats.date),\n func.sum(ActivityStats.quantity), func.sum(ActivityStats.\n total_subsidy), SubsidyPayRecord.created_at, func.max(\n SubsidyProcessRecord.id)).group_by(ActivityStats.pay_record_id,\n ActivityStats.activity_id, ActivityStats.activity_category_id\n ).outerjoin(SubsidyPayRecord, SubsidyPayRecord.id == ActivityStats.\n pay_record_id).outerjoin(SubsidyProcessRecord, SubsidyProcessRecord\n .pay_record_id == SubsidyPayRecord.id).filter(ActivityStats.\n restaurant_id == restaurant_id).filter(ActivityStats.status.in_(\n PAYLOG_STATUS_LIST)).order_by(SubsidyPayRecord.created_at.desc())\n if activity_id is not None:\n q = q.filter(ActivityStats.activity_id == activity_id)\n if activity_category_id is not None:\n q = q.filter(ActivityStats.activity_category_id == activity_category_id\n )\n if limit is not None:\n q = q.limit(min(limit, MAX_LIST_SIZE))\n else:\n q = q.limit(DEFAULT_LIST_SIZE)\n if offset is not None:\n q = q.offset(offset)\n return q\n\n\n@zeus_db_handler\ndef query_pay_records(restaurant_id, offset=None, limit=None):\n q = session.query(SubsidyPayRecord).filter(SubsidyPayRecord.\n restaurant_id == restaurant_id).order_by(SubsidyPayRecord.\n created_at.desc())\n if limit is not None:\n q = q.limit(min(limit, MAX_LIST_SIZE))\n else:\n q = q.limit(DEFAULT_LIST_SIZE)\n if offset is not None:\n q = q.offset(offset)\n return q.all()\n\n\n@zeus_db_handler\ndef query_paylog(pay_record_ids, activity_id=None, activity_category_id=\n None, offset=None, limit=None):\n q = session.query(ActivityStats.pay_record_id, ActivityStats.\n activity_id, ActivityStats.activity_category_id, ActivityStats.\n status, func.min(ActivityStats.date), func.max(ActivityStats.date),\n func.sum(ActivityStats.quantity), func.sum(ActivityStats.total_subsidy)\n ).group_by(ActivityStats.pay_record_id, ActivityStats.activity_id,\n ActivityStats.activity_category_id).filter(ActivityStats.\n pay_record_id.in_(pay_record_ids)).filter(ActivityStats.status.in_(\n PAYLOG_STATUS_LIST)).order_by(ActivityStats.created_at.desc())\n if activity_id is not None:\n q = q.filter(ActivityStats.activity_id == activity_id)\n if activity_category_id is not None:\n q = q.filter(ActivityStats.activity_category_id == activity_category_id\n )\n if limit is not None:\n q = q.limit(min(limit, MAX_LIST_SIZE))\n else:\n q = q.limit(DEFAULT_LIST_SIZE)\n if offset is not None:\n q = q.offset(offset)\n return q\n\n\n@zeus_db_handler\ndef get_max_subsidy_process_record_ids(pay_record_ids):\n q = session.query(func.max(SubsidyProcessRecord.id)).group_by(\n SubsidyProcessRecord.pay_record_id).filter(SubsidyProcessRecord.\n pay_record_id.in_(pay_record_ids))\n return q\n\n\n@zeus_db_handler\ndef count_paylog_by_rst(restaurant_id, activity_id=None,\n activity_category_id=None):\n \"\"\" Except ActivityStats.STATUS_PENDING (未审核状态)\n \"\"\"\n q = session.query(ActivityStats.id).group_by(ActivityStats.\n pay_record_id, ActivityStats.activity_id, ActivityStats.\n activity_category_id).filter(ActivityStats.restaurant_id ==\n restaurant_id).filter(ActivityStats.status.in_(PAYLOG_STATUS_LIST))\n if activity_id is not None:\n q = q.filter(ActivityStats.activity_id == activity_id)\n if activity_category_id is not None:\n q = q.filter(ActivityStats.activity_category_id == activity_category_id\n )\n return len(q.all())\n\n\n@zeus_db_handler\ndef query_process_records_by_ids(process_ids):\n query = session.query(SubsidyProcessRecord).filter(SubsidyProcessRecord\n .id.in_(process_ids))\n return query.all()\n\n\n@zeus_db_handler\ndef get_subsidy_record_process_time(record_ids, status):\n return session.query(SubsidyProcessRecord.pay_record_id,\n SubsidyProcessRecord.processed_at).filter(SubsidyProcessRecord.\n pay_record_id.in_(record_ids)).filter(SubsidyProcessRecord.status ==\n status).all()\n\n\ndef get_pay_activities_by_restaurant(rst_id):\n with zeus_session() as session:\n query = session.query(ActivityStats.activity_id, ActivityStats.\n activity_category_id).group_by(ActivityStats.activity_id,\n ActivityStats.activity_category_id).filter(ActivityStats.\n restaurant_id == rst_id)\n return query.all()\n\n\ndef query_sms_send_info(start_time=None, end_time=None, phone=None,\n restaurant_id=None, card_num_tail=None, status=None):\n with walis_session() as session:\n query = session.query(NoticeRecord)\n if phone:\n query = query.filter(NoticeRecord.phone == phone)\n if restaurant_id:\n query = query.filter(NoticeRecord.restaurant_id == restaurant_id)\n if card_num_tail:\n query = query.filter(NoticeRecord.card_num_tail == card_num_tail)\n if status:\n query = query.filter(NoticeRecord.status == status)\n if not start_time:\n start_time = get_today_begin_time()\n if not end_time:\n end_time = get_today_end_time()\n query = query.filter(NoticeRecord.created_at > start_time).filter(\n NoticeRecord.created_at < end_time)\n return query.all()\n\n\ndef query_sms_send_count(start_time=None, end_time=None, status=None):\n with walis_session() as session:\n if not start_time:\n start_time = get_today_begin_time()\n if not end_time:\n end_time = get_today_end_time()\n query = session.query(func.count(NoticeRecord.record_id)).filter(\n NoticeRecord.created_at > start_time).filter(NoticeRecord.\n created_at < end_time)\n if status is not None:\n query = query.filter(NoticeRecord.status == status)\n return query.scalar()\n\n\n@zeus_db_handler\ndef query_auto_pay_activity_stats_result(city_ids=None, restaurant_ids=None,\n activity_id=None, activity_category_id=None, from_date=None, to_date=\n None, statuses=None, offset=None, limit=None, with_subsidy=None):\n q = session.query(ActivityStats.restaurant_id, ActivityStats.\n activity_id, ActivityStats.activity_category_id, func.sum(\n ActivityStats.quantity), func.sum(ActivityStats.total_subsidy),\n func.min(ActivityStats.date), func.max(ActivityStats.date)).group_by(\n ActivityStats.restaurant_id, ActivityStats.activity_id,\n ActivityStats.activity_category_id).order_by(ActivityStats.\n restaurant_id.desc())\n return _query_activity_stats(q, city_ids, restaurant_ids, activity_id,\n activity_category_id, from_date, to_date, statuses, with_subsidy,\n offset, limit)\n\n\ndef _query_activity_stats(q, city_ids=None, restaurant_ids=None,\n activity_id=None, activity_category_id=None, from_date=None, to_date=\n None, statuses=None, with_subsidy=None, offset=None, limit=None):\n if activity_id is not None:\n q = q.filter(ActivityStats.activity_id == activity_id)\n if activity_category_id is not None:\n q = q.filter(ActivityStats.activity_category_id == activity_category_id\n )\n if city_ids is not None:\n q = q.filter(ActivityStats.city_id.in_(city_ids))\n if restaurant_ids is not None:\n q = q.filter(ActivityStats.restaurant_id.in_(restaurant_ids))\n if from_date is not None:\n q = q.filter(ActivityStats.date >= from_date)\n if to_date is not None:\n q = q.filter(ActivityStats.date <= to_date)\n if statuses is not None:\n q = q.filter(ActivityStats.status.in_(statuses))\n if with_subsidy is not None:\n if with_subsidy:\n q = q.filter(ActivityStats.total_subsidy > 0)\n else:\n q = q.filter(ActivityStats.total_subsidy == 0)\n if offset is not None:\n q = q.offset(offset)\n q = q.limit(1000)\n return q\n", "step-5": "#!/usr/bin/env python2\n# coding=utf8\n\nfrom __future__ import absolute_import, division, print_function\n\nfrom sqlalchemy import func\n\nfrom walis.model.walis import walis_session\nfrom walis.model.zeus import zeus_session, zeus_db_handler\nfrom walis.model.zeus.activity import (\n SubsidyProcessRecord,\n SubsidyPayRecord,\n ActivityStats,\n)\nfrom walis.model.walis.activity import PaymentNoticeRecord as NoticeRecord\nfrom walis.utils.time import get_today_begin_time, get_today_end_time\n\n\nMAX_LIST_SIZE = 1000\nDEFAULT_LIST_SIZE = 200\n\n\ndef get_new_pay_records(process_at, limit=200):\n with zeus_session() as session:\n result = session.query(SubsidyPayRecord.id,\n SubsidyPayRecord.restaurant_id,\n SubsidyProcessRecord.card_id,\n SubsidyProcessRecord.processed_at,\n SubsidyPayRecord.status). \\\n outerjoin(SubsidyProcessRecord,\n SubsidyProcessRecord.pay_record_id == SubsidyPayRecord.id). \\\n filter(SubsidyPayRecord.id > process_at). \\\n filter(SubsidyProcessRecord.status != SubsidyProcessRecord.STATUS_FAIL). \\\n order_by(SubsidyPayRecord.id.asc()).limit(limit).all()\n\n return result\n\n\ndef get_success_pay_records(record_ids):\n with zeus_session() as session:\n result = session.query(SubsidyPayRecord.id,\n SubsidyPayRecord.restaurant_id,\n SubsidyProcessRecord.card_id,\n SubsidyProcessRecord.processed_at,). \\\n outerjoin(SubsidyProcessRecord,\n SubsidyProcessRecord.pay_record_id == SubsidyPayRecord.id). \\\n filter(SubsidyPayRecord.status == SubsidyPayRecord.STATUS_SUCCESS). \\\n filter(SubsidyProcessRecord.status != SubsidyProcessRecord.STATUS_FAIL). \\\n filter(SubsidyPayRecord.id.in_(record_ids)).all()\n\n return result\n\n\ndef get_activity_stats(pay_record_id):\n with zeus_session() as session:\n results = session.query(ActivityStats.activity_id,\n ActivityStats.activity_category_id,\n func.sum(ActivityStats.total_subsidy),\n func.min(ActivityStats.date),\n func.max(ActivityStats.date),\n func.sum(ActivityStats.quantity), ).group_by(\n ActivityStats.restaurant_id, ActivityStats.activity_id,\n ActivityStats.activity_category_id). \\\n filter(ActivityStats.pay_record_id == pay_record_id). \\\n filter(ActivityStats.status == ActivityStats.STATUS_PAY_SUCCESS).all()\n\n return results\n\n\ndef get_success_record_ids_by_restaurant(\n restaurant_id, activity_id=None, activity_category_id=None):\n with zeus_session() as session:\n query = session.query(SubsidyPayRecord.id). \\\n filter(SubsidyPayRecord.restaurant_id == restaurant_id). \\\n filter(SubsidyPayRecord.status == SubsidyPayRecord.STATUS_SUCCESS)\n\n if activity_id is not None:\n query.filter(SubsidyPayRecord.activity_id == activity_id)\n\n if activity_category_id is not None:\n query.filter(\n SubsidyPayRecord.activity_category_id == activity_category_id)\n record_ids = query.all()\n\n return [r[0] for r in record_ids]\n\n\nPAYLOG_STATUS_LIST = {\n ActivityStats.STATUS_PAY_RECORD_GENERATED,\n ActivityStats.STATUS_PAY_SUCCESS,\n ActivityStats.STATUS_PAY_FAIL,\n}\n\n\n@zeus_db_handler\ndef query_paylog_by_rst(restaurant_id, activity_id=None,\n activity_category_id=None, offset=None, limit=None):\n \"\"\" Except ActivityStats.STATUS_PENDING (未审核状态)\n \"\"\"\n q = session.query(\n ActivityStats.pay_record_id,\n ActivityStats.activity_id,\n ActivityStats.activity_category_id,\n ActivityStats.status,\n func.min(ActivityStats.date),\n func.max(ActivityStats.date),\n func.sum(ActivityStats.quantity),\n func.sum(ActivityStats.total_subsidy),\n SubsidyPayRecord.created_at,\n func.max(SubsidyProcessRecord.id)). \\\n group_by(ActivityStats.pay_record_id,\n ActivityStats.activity_id,\n ActivityStats.activity_category_id). \\\n outerjoin(SubsidyPayRecord,\n SubsidyPayRecord.id == ActivityStats.pay_record_id). \\\n outerjoin(SubsidyProcessRecord,\n SubsidyProcessRecord.pay_record_id == SubsidyPayRecord.id). \\\n filter(ActivityStats.restaurant_id == restaurant_id).\\\n filter(ActivityStats.status.in_(PAYLOG_STATUS_LIST)).\\\n order_by(SubsidyPayRecord.created_at.desc())\n\n if activity_id is not None:\n q = q.filter(ActivityStats.activity_id == activity_id)\n\n if activity_category_id is not None:\n q = q.filter(ActivityStats.activity_category_id == activity_category_id)\n\n if limit is not None:\n q = q.limit(min(limit, MAX_LIST_SIZE))\n else:\n q = q.limit(DEFAULT_LIST_SIZE)\n\n if offset is not None:\n q = q.offset(offset)\n\n return q\n\n\n@zeus_db_handler\ndef query_pay_records(restaurant_id, offset=None, limit=None):\n q = session.query(SubsidyPayRecord).\\\n filter(SubsidyPayRecord.restaurant_id == restaurant_id).\\\n order_by(SubsidyPayRecord.created_at.desc())\n\n if limit is not None:\n q = q.limit(min(limit, MAX_LIST_SIZE))\n else:\n q = q.limit(DEFAULT_LIST_SIZE)\n\n if offset is not None:\n q = q.offset(offset)\n\n return q.all()\n\n\n@zeus_db_handler\ndef query_paylog(pay_record_ids, activity_id=None, activity_category_id=None,\n offset=None, limit=None):\n q = session.query(\n ActivityStats.pay_record_id,\n ActivityStats.activity_id,\n ActivityStats.activity_category_id,\n ActivityStats.status,\n func.min(ActivityStats.date),\n func.max(ActivityStats.date),\n func.sum(ActivityStats.quantity),\n func.sum(ActivityStats.total_subsidy)).\\\n group_by(ActivityStats.pay_record_id,\n ActivityStats.activity_id,\n ActivityStats.activity_category_id). \\\n filter(ActivityStats.pay_record_id.in_(pay_record_ids)).\\\n filter(ActivityStats.status.in_(PAYLOG_STATUS_LIST)).\\\n order_by(ActivityStats.created_at.desc())\n\n if activity_id is not None:\n q = q.filter(ActivityStats.activity_id == activity_id)\n\n if activity_category_id is not None:\n q = q.filter(ActivityStats.activity_category_id == activity_category_id)\n\n if limit is not None:\n q = q.limit(min(limit, MAX_LIST_SIZE))\n else:\n q = q.limit(DEFAULT_LIST_SIZE)\n\n if offset is not None:\n q = q.offset(offset)\n\n return q\n\n\n@zeus_db_handler\ndef get_max_subsidy_process_record_ids(pay_record_ids):\n q = session.query(func.max(SubsidyProcessRecord.id)).\\\n group_by(SubsidyProcessRecord.pay_record_id).\\\n filter(SubsidyProcessRecord.pay_record_id.in_(pay_record_ids))\n\n return q\n\n\n@zeus_db_handler\ndef count_paylog_by_rst(restaurant_id, activity_id=None,\n activity_category_id=None):\n \"\"\" Except ActivityStats.STATUS_PENDING (未审核状态)\n \"\"\"\n q = session.query(ActivityStats.id). \\\n group_by(ActivityStats.pay_record_id,\n ActivityStats.activity_id,\n ActivityStats.activity_category_id). \\\n filter(ActivityStats.restaurant_id == restaurant_id).\\\n filter(ActivityStats.status.in_(PAYLOG_STATUS_LIST))\n\n if activity_id is not None:\n q = q.filter(ActivityStats.activity_id == activity_id)\n\n if activity_category_id is not None:\n q = q.filter(ActivityStats.activity_category_id == activity_category_id)\n\n return len(q.all())\n\n\n@zeus_db_handler\ndef query_process_records_by_ids(process_ids):\n query = session.query(SubsidyProcessRecord).\\\n filter(SubsidyProcessRecord.id.in_(process_ids))\n return query.all()\n\n\n@zeus_db_handler\ndef get_subsidy_record_process_time(record_ids, status):\n return session.query(\n SubsidyProcessRecord.pay_record_id,\n SubsidyProcessRecord.processed_at).\\\n filter(SubsidyProcessRecord.pay_record_id.in_(record_ids)).\\\n filter(SubsidyProcessRecord.status == status).all()\n\n\ndef get_pay_activities_by_restaurant(rst_id):\n with zeus_session() as session:\n query = session.query(\n ActivityStats.activity_id,\n ActivityStats.activity_category_id,). \\\n group_by(ActivityStats.activity_id,\n ActivityStats.activity_category_id). \\\n filter(ActivityStats.restaurant_id == rst_id)\n\n return query.all()\n\n\n# javis model begins\ndef query_sms_send_info(start_time=None, end_time=None, phone=None,\n restaurant_id=None, card_num_tail=None, status=None):\n\n with walis_session() as session:\n query = session.query(NoticeRecord)\n\n if phone:\n query = query.filter(NoticeRecord.phone == phone)\n\n if restaurant_id:\n query = query.filter(NoticeRecord.restaurant_id == restaurant_id)\n\n if card_num_tail:\n query = query.filter(NoticeRecord.card_num_tail == card_num_tail)\n\n if status:\n query = query.filter(NoticeRecord.status == status)\n\n if not start_time:\n start_time = get_today_begin_time()\n\n if not end_time:\n end_time = get_today_end_time()\n\n query = query.filter(NoticeRecord.created_at > start_time).\\\n filter(NoticeRecord.created_at < end_time)\n\n return query.all()\n\n\ndef query_sms_send_count(start_time=None, end_time=None, status=None):\n with walis_session() as session:\n\n if not start_time:\n start_time = get_today_begin_time()\n\n if not end_time:\n end_time = get_today_end_time()\n\n query = session.query(func.count(NoticeRecord.record_id)).\\\n filter(NoticeRecord.created_at > start_time).\\\n filter(NoticeRecord.created_at < end_time)\n\n if status is not None:\n query = query.filter(NoticeRecord.status == status)\n\n return query.scalar()\n\n\n@zeus_db_handler\ndef query_auto_pay_activity_stats_result(\n city_ids=None, restaurant_ids=None, activity_id=None,\n activity_category_id=None, from_date=None, to_date=None, statuses=None,\n offset=None, limit=None, with_subsidy=None):\n q = session.query(ActivityStats.restaurant_id,\n ActivityStats.activity_id,\n ActivityStats.activity_category_id,\n func.sum(ActivityStats.quantity),\n func.sum(ActivityStats.total_subsidy),\n func.min(ActivityStats.date),\n func.max(ActivityStats.date)).\\\n group_by(ActivityStats.restaurant_id,\n ActivityStats.activity_id,\n ActivityStats.activity_category_id).\\\n order_by(ActivityStats.restaurant_id.desc())\n\n return _query_activity_stats(\n q, city_ids, restaurant_ids, activity_id,\n activity_category_id, from_date, to_date, statuses,\n with_subsidy, offset, limit)\n\n\ndef _query_activity_stats(\n q, city_ids=None, restaurant_ids=None, activity_id=None,\n activity_category_id=None, from_date=None, to_date=None, statuses=None,\n with_subsidy=None, offset=None, limit=None):\n if activity_id is not None:\n q = q.filter(ActivityStats.activity_id == activity_id)\n\n if activity_category_id is not None:\n q = q.filter(ActivityStats.activity_category_id == activity_category_id) # noqa\n\n if city_ids is not None:\n q = q.filter(ActivityStats.city_id.in_(city_ids))\n\n if restaurant_ids is not None:\n q = q.filter(ActivityStats.restaurant_id.in_(restaurant_ids))\n\n if from_date is not None:\n q = q.filter(ActivityStats.date >= from_date)\n\n if to_date is not None:\n q = q.filter(ActivityStats.date <= to_date)\n\n if statuses is not None:\n q = q.filter(ActivityStats.status.in_(statuses))\n\n if with_subsidy is not None:\n if with_subsidy:\n q = q.filter(ActivityStats.total_subsidy > 0)\n else:\n q = q.filter(ActivityStats.total_subsidy == 0)\n\n if offset is not None:\n q = q.offset(offset)\n\n q = q.limit(1000)\n\n return q\n", "step-ids": [ 11, 13, 14, 16, 19 ] }
[ 11, 13, 14, 16, 19 ]
#!/usr/bin/python # -*- coding: UTF-8 -*- from connect import Connect class Resource: def __init__(self, row: tuple): self.video_path = row[0] self.pic_path = row[1]
normal
{ "blob_id": "65aa27addaec6014fe5fd66df2c0d3632231a314", "index": 3124, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Resource:\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Resource:\n\n def __init__(self, row: tuple):\n self.video_path = row[0]\n self.pic_path = row[1]\n", "step-4": "from connect import Connect\n\n\nclass Resource:\n\n def __init__(self, row: tuple):\n self.video_path = row[0]\n self.pic_path = row[1]\n", "step-5": "#!/usr/bin/python\n# -*- coding: UTF-8 -*-\nfrom connect import Connect\n\n\nclass Resource:\n def __init__(self, row: tuple):\n self.video_path = row[0]\n self.pic_path = row[1]\n\n\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
''' Problem Description Given two numbers n1 and n2 1. Find prime numbers between n1 and n2, then 2. Make all possible unique combinations of numbers from the prime numbers list you found in step 1. 3. From this new list, again find all prime numbers. 4. Find smallest (a) and largest (b) number from the 2nd generated list, also count of this list. 5. Consider smallest and largest number as the 1st and 2nd number to generate Fibonacci series respectively till the count (number of primes in the 2nd list). 6. Print the last number of a Fibonacci series as an output Constraints 2 <= n1, n2 <= 100 n2 - n1 >= 35 Input Format One line containing two space separated integers n1 and n2. Output Last number of a generated Fibonacci series. Timeout 1 Test Case Example 1 Input : 2 40 Output : 13158006689 Explanation : 1st prime list = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37] Combination of all the primes = [23, 25, 27, 211, 213, 217, 219, 223, 229, 231, 32, 35, 37, 311, 313, 319, 323, 329, 331, 337, 52, 53, 57, 511, 513, 517, 519, 523, 529, 531, 537, 72, 73, 75, 711, 713, 717, 719, 723, 729, 731, 737, 112, 113, 115, 117, 1113, 1117, 1119, 1123, 1129, 1131, 1137, 132, 133, 135, 137, 1311, 1317, 1319, 1323, 1329, 1331, 1337, 172, 173, 175, 177, 1711, 1713, 1719, 1723, 1729, 1731, 1737, 192, 193, 195, 197, 1911, 1913, 1917, 1923, 1929, 1931, 1937, 232, 233, 235, 237, 2311, 2313, 2317, 2319, 2329, 2331, 2337, 292, 293, 295, 297, 2911, 2913, 2917, 2919, 2923, 2931, 2937, 312, 315, 317, 3111, 3113, 3117, 3119, 3123, 3129, 3137, 372, 373, 375, 377, 3711, 3713, 3717, 3719, 3723, 3729, 3731] 2nd prime list=[193, 3137, 197, 2311, 3719, 73, 137, 331, 523, 1931, 719, 337, 211, 23, 1117, 223, 1123, 229, 37, 293, 2917, 1319, 1129, 233, 173, 3119, 113, 53, 373, 311, 313, 1913, 1723, 317] smallest (a) = 23 largest (b) = 3719 Therefore, the last number of a Fibonacci series i.e. 34th Fibonacci number in the series that has 23 and 3719 as the first 2 numbers is 13158006689 Example 2 Input : 30 70 Output : 2027041 Explanation 1st prime list=[31, 37, 41, 43, 47, 53, 59, 61, 67] 2nd prime list generated form combination of 1st prime list = [3137, 5953, 5347, 6761, 3761, 4337, 6737, 6131, 3767, 4759, 4153, 3167, 4159, 6143] smallest prime in 2nd list=3137 largest prime in 2nd list=6761 Therefore, the last number of a Fibonacci series i.e. 14th Fibonacci number in the series that has 3137 and 6761 as the first 2 numbers is 2027041 ''' # test cases passed , private cases failed # https://www.rookieslab.com/posts/fastest-way-to-check-if-a-number-is-prime-or-not # seive of Eratosthenes method # N = 100 # is_prime = [1]*N # is_prime[0] = 0 # is_prime[1] = 0 # https://www.geeksforgeeks.org/python-program-to-check-whether-a-number-is-prime-or-not/ def isPrime(n): # use to find if number is prime in 2nd list # Corner cases if (n <= 1) : return False if (n <= 3) : return True # This is checked so that we can skip # middle five numbers in below loop if (n % 2 == 0 or n % 3 == 0) : return False i = 5 while(i * i <= n) : if (n % i == 0 or n % (i + 2) == 0) : return False i = i + 6 return True def primeList(n1, n2): l = [] for n in range(n1, n2+1): if isPrime(n): l.append(n) return l n1, n2 = map(int, input().split()) l1 = primeList(n1,n2) # print(l1) - check if first list of prime numbers matches #combining l2 = list() l = len(l1) for i in range(l): for j in range(l): if i == j: continue l2.append(str(l1[i])+str(l1[j])) l3 = primeList(int(l2[0]),int(l2[-1])) # list of primes from the second list l4 = [] for i in l3: if str(i) in l2: l4.append(i) # print(l4) - check if secin list of prime numbers matches x = min(l4) y = max(l4) count = len(l4) # print(x,y,count) - check if smallest, largest prime and count match for i in range(2,count): f = x + y x = y y = f print(y)
normal
{ "blob_id": "fe5050fdf010ce1c4d458b8a52ac92485a7d8cea", "index": 5706, "step-1": "<mask token>\n\n\ndef isPrime(n):\n if n <= 1:\n return False\n if n <= 3:\n return True\n if n % 2 == 0 or n % 3 == 0:\n return False\n i = 5\n while i * i <= n:\n if n % i == 0 or n % (i + 2) == 0:\n return False\n i = i + 6\n return True\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef isPrime(n):\n if n <= 1:\n return False\n if n <= 3:\n return True\n if n % 2 == 0 or n % 3 == 0:\n return False\n i = 5\n while i * i <= n:\n if n % i == 0 or n % (i + 2) == 0:\n return False\n i = i + 6\n return True\n\n\ndef primeList(n1, n2):\n l = []\n for n in range(n1, n2 + 1):\n if isPrime(n):\n l.append(n)\n return l\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef isPrime(n):\n if n <= 1:\n return False\n if n <= 3:\n return True\n if n % 2 == 0 or n % 3 == 0:\n return False\n i = 5\n while i * i <= n:\n if n % i == 0 or n % (i + 2) == 0:\n return False\n i = i + 6\n return True\n\n\ndef primeList(n1, n2):\n l = []\n for n in range(n1, n2 + 1):\n if isPrime(n):\n l.append(n)\n return l\n\n\n<mask token>\nfor i in range(l):\n for j in range(l):\n if i == j:\n continue\n l2.append(str(l1[i]) + str(l1[j]))\n<mask token>\nfor i in l3:\n if str(i) in l2:\n l4.append(i)\n<mask token>\nfor i in range(2, count):\n f = x + y\n x = y\n y = f\nprint(y)\n", "step-4": "<mask token>\n\n\ndef isPrime(n):\n if n <= 1:\n return False\n if n <= 3:\n return True\n if n % 2 == 0 or n % 3 == 0:\n return False\n i = 5\n while i * i <= n:\n if n % i == 0 or n % (i + 2) == 0:\n return False\n i = i + 6\n return True\n\n\ndef primeList(n1, n2):\n l = []\n for n in range(n1, n2 + 1):\n if isPrime(n):\n l.append(n)\n return l\n\n\nn1, n2 = map(int, input().split())\nl1 = primeList(n1, n2)\nl2 = list()\nl = len(l1)\nfor i in range(l):\n for j in range(l):\n if i == j:\n continue\n l2.append(str(l1[i]) + str(l1[j]))\nl3 = primeList(int(l2[0]), int(l2[-1]))\nl4 = []\nfor i in l3:\n if str(i) in l2:\n l4.append(i)\nx = min(l4)\ny = max(l4)\ncount = len(l4)\nfor i in range(2, count):\n f = x + y\n x = y\n y = f\nprint(y)\n", "step-5": "'''\nProblem Description\nGiven two numbers n1 and n2\n\n1. Find prime numbers between n1 and n2, then\n\n2. Make all possible unique combinations of numbers from the prime \nnumbers list you found in step 1. \n\n3. From this new list, again find all prime numbers.\n\n4. Find smallest (a) and largest (b) number from the 2nd generated \nlist, also count of this list.\n\n5. Consider smallest and largest number as the 1st and 2nd number \nto generate Fibonacci series respectively till the count \n(number of primes in the 2nd list).\n\n6. Print the last number of a Fibonacci series as an output\n\nConstraints\n2 <= n1, n2 <= 100\n\nn2 - n1 >= 35\n\nInput Format\nOne line containing two space separated integers n1 and n2.\n\nOutput\nLast number of a generated Fibonacci series.\n\nTimeout\n1\n\n\nTest Case\nExample 1\nInput : 2 40\nOutput : 13158006689\n\nExplanation :\n\n1st prime list = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37]\n\nCombination of all the primes = [23, 25, 27, 211, 213, 217, 219, \n223, 229, 231, 32, 35, 37, 311, 313, 319, 323, 329, 331, 337, 52, \n53, 57, 511, 513, 517, 519, 523, 529, 531, 537, 72, 73, 75, 711, \n713, 717, 719, 723, 729, 731, 737, 112, 113, 115, 117, 1113, 1117, \n1119, 1123, 1129, 1131, 1137, 132, 133, 135, 137, 1311, 1317, 1319, \n1323, 1329, 1331, 1337, 172, 173, 175, 177, 1711, 1713, 1719, 1723, \n1729, 1731, 1737, 192, 193, 195, 197, 1911, 1913, 1917, 1923, 1929, \n1931, 1937, 232, 233, 235, 237, 2311, 2313, 2317, 2319, 2329, 2331, \n2337, 292, 293, 295, 297, 2911, 2913, 2917, 2919, 2923, 2931, 2937, \n312, 315, 317, 3111, 3113, 3117, 3119, 3123, 3129, 3137, 372, 373, \n375, 377, 3711, 3713, 3717, 3719, 3723, 3729, 3731]\n\n2nd prime list=[193, 3137, 197, 2311, 3719, 73, 137, 331, 523, \n1931, 719, 337, 211, 23, 1117, 223, 1123, 229, 37, 293, 2917, \n1319, 1129, 233, 173, 3119, 113, 53, 373, 311, 313, 1913, 1723, \n317]\n\nsmallest (a) = 23\n\nlargest (b) = 3719\n\nTherefore, the last number of a Fibonacci series i.e. 34th \nFibonacci number in the series that has 23 and 3719 as the first \n2 numbers is 13158006689\n\nExample 2\nInput : 30 70\nOutput : 2027041 \n\nExplanation\n\n1st prime list=[31, 37, 41, 43, 47, 53, 59, 61, 67]\n\n2nd prime list generated form combination of 1st prime list = [3137, \n5953, 5347, 6761, 3761, 4337, 6737, 6131, 3767, 4759, 4153, 3167, \n4159, 6143]\n\nsmallest prime in 2nd list=3137\nlargest prime in 2nd list=6761\n\nTherefore, the last number of a Fibonacci series i.e. 14th \nFibonacci number in the series that has 3137 and 6761 as the first \n2 numbers is 2027041\n'''\n\n# test cases passed , private cases failed\n\n# https://www.rookieslab.com/posts/fastest-way-to-check-if-a-number-is-prime-or-not\n# seive of Eratosthenes method\n\n# N = 100\n# is_prime = [1]*N\n# is_prime[0] = 0\n# is_prime[1] = 0\n\n# https://www.geeksforgeeks.org/python-program-to-check-whether-a-number-is-prime-or-not/\ndef isPrime(n): # use to find if number is prime in 2nd list\n \n # Corner cases \n if (n <= 1) : \n return False\n if (n <= 3) : \n return True\n \n # This is checked so that we can skip \n # middle five numbers in below loop \n if (n % 2 == 0 or n % 3 == 0) : \n return False\n \n i = 5\n while(i * i <= n) : \n if (n % i == 0 or n % (i + 2) == 0) : \n return False\n i = i + 6\n \n return True\n\ndef primeList(n1, n2):\n l = []\n for n in range(n1, n2+1):\n if isPrime(n):\n l.append(n)\n return l\n\nn1, n2 = map(int, input().split())\nl1 = primeList(n1,n2)\n# print(l1) - check if first list of prime numbers matches\n\n#combining\nl2 = list()\nl = len(l1)\nfor i in range(l):\n for j in range(l):\n if i == j:\n continue\n l2.append(str(l1[i])+str(l1[j]))\n\nl3 = primeList(int(l2[0]),int(l2[-1])) \n# list of primes from the second list\nl4 = []\nfor i in l3:\n if str(i) in l2:\n l4.append(i)\n# print(l4) - check if secin list of prime numbers matches\n\nx = min(l4)\ny = max(l4)\ncount = len(l4)\n# print(x,y,count) - check if smallest, largest prime and count match\nfor i in range(2,count):\n f = x + y\n x = y\n y = f\nprint(y)", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
import pymysql db = pymysql.connect( "localhost", "root", "", "order_db", use_unicode=True, charset="utf8") cursor = db.cursor() sql = "DROP TABLE custdetail" cursor.execute(sql) db.close()
normal
{ "blob_id": "1aa2bff245322a34438cc836e23f430926dfac6c", "index": 3414, "step-1": "<mask token>\n", "step-2": "<mask token>\ncursor.execute(sql)\ndb.close()\n", "step-3": "<mask token>\ndb = pymysql.connect('localhost', 'root', '', 'order_db', use_unicode=True,\n charset='utf8')\ncursor = db.cursor()\nsql = 'DROP TABLE custdetail'\ncursor.execute(sql)\ndb.close()\n", "step-4": "import pymysql\ndb = pymysql.connect('localhost', 'root', '', 'order_db', use_unicode=True,\n charset='utf8')\ncursor = db.cursor()\nsql = 'DROP TABLE custdetail'\ncursor.execute(sql)\ndb.close()\n", "step-5": "import pymysql\ndb = pymysql.connect( \"localhost\", \"root\", \"\", \"order_db\",\n use_unicode=True, charset=\"utf8\") \ncursor = db.cursor()\nsql = \"DROP TABLE custdetail\"\ncursor.execute(sql)\ndb.close()\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from collections import defaultdict from typing import List, Dict, Optional, Tuple import torch.jit from torch import nn as nn from parlai.core.dict import DictionaryAgent from parlai.core.torch_agent import TorchAgent from parlai.utils.bpe import Gpt2BpeHelper class TorchScriptGreedySearch(nn.Module): """ A helper class for exporting simple greedy-search models via TorchScript. Models with extra inputs will need to override to include more variables. """ # We currently only support these specific dictionary settings CAIRAOKE_DICT_PARAMS = { "dict_class": "parlai.core.dict:DictionaryAgent", "dict_initpath": None, "dict_language": "english", "dict_max_ngram_size": -1, "dict_minfreq": 0, "dict_maxtokens": -1, "dict_tokenizer": "gpt2", "dict_lower": False, "dict_textfields": "text,labels", "dict_loaded": True, 'bpe_debug': False, } def __init__(self, agent: TorchAgent): super().__init__() self.is_bart = agent.opt['model'] == 'bart' # Dictionary/tokenization setup for key, val in self.CAIRAOKE_DICT_PARAMS.items(): assert ( agent.opt.get(key, val) == val ), f'The only currently supported value of "{key}" is {val}!' orig_dict: DictionaryAgent = agent.dict orig_bpe: Gpt2BpeHelper = orig_dict.bpe assert all(len(key) == 2 for key in orig_bpe.bpe_ranks.keys()) assert not any( i for key in orig_bpe.bpe_ranks.keys() for i in key if '\n' in i ), "We need to temporarily merge the bpe_ranks dict's keys with a newline character in order to use it as a TorchScript arg, but at least one of the dict's keys contains a newline character already!" fused_key_bpe_ranks = { '\n'.join(key): float(val) for key, val in orig_bpe.bpe_ranks.items() } # Cast the values as floats to be able to compare to float('inf') when doing BPE # splitting self.dict = ScriptableDictionaryAgent( null_token=orig_dict.null_token, end_token=orig_dict.end_token, unk_token=orig_dict.unk_token, start_token=orig_dict.start_token, freq=orig_dict.freq, tok2ind=orig_dict.tok2ind, ind2tok=orig_dict.ind2tok, bpe_add_prefix_space=agent.opt['bpe_add_prefix_space'], bpe_encoder=orig_bpe.encoder, bpe_byte_encoder=orig_bpe.byte_encoder, fused_key_bpe_ranks=fused_key_bpe_ranks, special_tokens=agent._get_special_tokens(), ) # History tracking and start/end tokens self.delimiter_tok = agent.history.delimiter_tok self.history_size = agent.opt['history_size'] if agent.opt.get('history_add_global_end_token', None) is not None: self.global_end_token = agent.dict[agent.dict.end_token] else: self.global_end_token = None self.text_truncate = agent.opt.get('text_truncate') or agent.opt['truncate'] self.text_truncate = self.text_truncate if self.text_truncate >= 0 else None self.start_idx = agent.model.START_IDX self.end_idx = agent.model.END_IDX self.null_idx = agent.model.NULL_IDX if self.is_bart: self.initial_decoder_input = [self.end_idx, self.start_idx] else: self.initial_decoder_input = [self.start_idx] agent.model.eval() # Create versions of the model and decoder that will flatten the incremental # state dict, as required by TorchScript wrapped_decoder = DecoderIncrStateFlattener(agent.model.decoder) wrapped_model = ModelIncrStateFlattener(agent.model) # Create sample inputs for tracing sample_tokens = torch.tensor([[1, 2, 3, 4, 5]], dtype=torch.long) encoder_states = agent.model.encoder(sample_tokens) initial_generations = self._get_initial_decoder_input(sample_tokens) latent, initial_incr_state = wrapped_decoder( initial_generations, encoder_states ) logits = agent.model.output(latent[:, -1:, :]) _, preds = logits.max(dim=2) incr_state = {k: torch.clone(v) for k, v in initial_incr_state.items()} # Copy the initial incremental state, used when tracing the # .reorder_decoder_incremental_state() method below, to avoid having it be # mutated by the following line incr_state = wrapped_model.reorder_decoder_incremental_state( incr_state, torch.tensor([0], dtype=torch.long, device=sample_tokens.device) ) generations = torch.cat([initial_generations, preds], dim=1) # Do tracing self.encoder = torch.jit.trace(agent.model.encoder, sample_tokens) self.decoder_first_pass = torch.jit.trace( wrapped_decoder, (initial_generations, encoder_states), strict=False ) # We do strict=False to avoid an error when passing a Dict out of # decoder.forward() self.partially_traced_model = torch.jit.trace_module( wrapped_model, { 'output': (latent[:, -1:, :]), 'reorder_decoder_incremental_state': ( initial_incr_state, torch.tensor([0], dtype=torch.long, device=sample_tokens.device), ), }, strict=False, ) self.decoder_later_pass = torch.jit.trace( wrapped_decoder, (generations, encoder_states, incr_state), strict=False ) def _get_initial_decoder_input(self, x: torch.Tensor) -> torch.Tensor: """ Workaround because we can't use TGM._get_initial_decoder_input() directly. When we try to call that function, we get a "RuntimeError: Type 'Tuple[int, int]' cannot be traced. Only Tensors and (possibly nested) Lists, Dicts, and Tuples of Tensors can be traced" error. """ bsz = x.size(0) return ( torch.tensor(self.initial_decoder_input, dtype=torch.long) .expand(bsz, len(self.initial_decoder_input)) .to(x.device) ) def parse(self, text: str) -> List[int]: return self.dict.txt2vec(text) def _v2t(self, vec: List[int]) -> str: """ Convert token indices to string of tokens. """ new_vec: List[int] = [] for i in vec: if i == self.end_idx: break elif i != self.start_idx: new_vec.append(i) return self.dict.vec2txt(new_vec) def forward(self, context: str, max_len: int = 128) -> str: # Vectorize all lines of context history_vecs: List[List[int]] = [] context_lines = context.split('\n') if self.history_size > 0: context_lines = context_lines[-self.history_size :] for line in context_lines: history_vecs.append(self.parse(line)) # Get full history vec text_vecs: List[List[int]] = [] for vec in history_vecs[:-1]: text_vecs += [vec] text_vecs += [self.delimiter_tok] text_vecs += [history_vecs[-1]] if self.global_end_token is not None: text_vecs += [[self.global_end_token]] # Flatten text_vecs flattened_text_vec: List[int] = [] for vec in text_vecs: for token in vec: flattened_text_vec.append(token) # Format history vec given various logic if self.text_truncate is not None: if self.is_bart: truncate_length = self.text_truncate - 2 # Start and end tokens else: truncate_length = self.text_truncate if len(flattened_text_vec) > truncate_length: flattened_text_vec = flattened_text_vec[-truncate_length:] flattened_text_vec = torch.tensor(flattened_text_vec, dtype=torch.long) if self.is_bart: flattened_text_vec = torch.cat( [ torch.tensor([self.start_idx], dtype=torch.long), flattened_text_vec, torch.tensor([self.end_idx], dtype=torch.long), ], dim=0, ) # Pass through the encoder and decoder to generate tokens batch_text_vec = torch.unsqueeze(flattened_text_vec, dim=0) # Add batch dim encoder_states = self.encoder(batch_text_vec) generations = self._get_initial_decoder_input(batch_text_vec) # keep track of early stopping if all generations finish seen_end = torch.zeros( batch_text_vec.size(0), device=batch_text_vec.device, dtype=torch.bool ) incr_state: Dict[str, torch.Tensor] = {} for token_idx in range(max_len): if token_idx == 0: latent, incr_state = self.decoder_first_pass( generations, encoder_states ) else: latent, incr_state = self.decoder_later_pass( generations, encoder_states, incr_state ) logits = self.partially_traced_model.output(latent[:, -1:, :]) _, preds = logits.max(dim=2) incr_state = self.partially_traced_model.reorder_decoder_incremental_state( incr_state, torch.tensor([0], dtype=torch.long, device=batch_text_vec.device), ) seen_end = seen_end + (preds == self.end_idx).squeeze(1) generations = torch.cat([generations, preds], dim=1) if torch.all(seen_end): break # Get the label from the generated tokens and update the history if self.is_bart: assert generations[0, 0].item() == self.end_idx generations = generations[:, 1:] # Hack: remove initial end token. I haven't found in the code where this is # done, but it seems to happen early on during generation generation_tokens: List[int] = generations[0].tolist() label = self._v2t(generation_tokens) return label class BaseIncrStateFlattener(nn.Module): """ Flatten/unflatten the incremental state for use with TorchScripting. Typically, the incremental state will be stored as a Dict[int, Dict[str, Dict[str, torch.Tensor]]], where the 3 dictionary levels map decoder layer, attention type, and previous key/value/mask, respectively. However, TorchScript expects dicts to be of type Dict[str, torch.Tensor], and thus all input incremental states when TorchScripting will have to be of that type. We thus unflatten the input incremental state, already of type Dict[str, torch.Tensor], to pass it into whatever method needs it, and we flatten it again after the updated incremental state is passed back out. This is a base class that provides methods for flattening/unflattening: subclasses will call these methods as the incremental state is passed into and out of their own methods. """ def __init__(self, module: nn.Module): super().__init__() self.module = module def _unflatten_incr_state( self, flat_incr_state: Dict[str, torch.Tensor] ) -> Dict[int, Dict[str, Dict[str, torch.Tensor]]]: """ Unflatten the input incremental state. For instance, flat_incr_state['layer_0__self_attn__prev_key'] will be stored in structured_incr_state[0]['self_attn']['prev_key']. """ structured_incr_state = defaultdict(lambda: defaultdict(dict)) for key, state in flat_incr_state.items(): layer_idx_str, attn_type, state_type = key.split('__') structured_incr_state[int(layer_idx_str)][attn_type][state_type] = state return dict({k: dict(v) for k, v in structured_incr_state.items()}) # Turn the nested defaultdicts back into regular dicts def _flatten_incr_state( self, structured_incr_state: Dict[int, Dict[str, Dict[str, torch.Tensor]]] ) -> Dict[str, torch.Tensor]: """ Flatten the input incremental state. For instance, structured_incr_state[0]['self_attn']['prev_key'] will be stored in flat_incr_state['layer_0__self_attn__prev_key']. """ flat_incr_state = {} for layer_idx, dict1 in structured_incr_state.items(): for attn_type, dict2 in dict1.items(): for state_type, state in dict2.items(): key = f'{layer_idx:d}__{attn_type}__{state_type}' flat_incr_state[key] = state return flat_incr_state class DecoderIncrStateFlattener(BaseIncrStateFlattener): """ Wrapper for a TransformerDecoder that will unflatten/flatten the incremental state. Unflattening/flattening will occur before passing the incremental state into and out of .forward(). """ def forward( self, input_: torch.LongTensor, encoder_state: Tuple[torch.Tensor, torch.Tensor], flat_incr_state: Optional[Dict[str, torch.Tensor]] = None, ) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]: if flat_incr_state is not None: structured_incr_state = self._unflatten_incr_state(flat_incr_state) else: structured_incr_state = None tensor, new_structured_incr_state = self.module.forward( input=input_, encoder_state=encoder_state, incr_state=structured_incr_state ) new_flat_incr_state = self._flatten_incr_state(new_structured_incr_state) return tensor, new_flat_incr_state class ModelIncrStateFlattener(BaseIncrStateFlattener): """ Wrapper for a TransformerGeneratorModel to unflatten/flatten the incremental state. Unflattening/flattening will occur before passing the incremental state into and out of .reorder_decoder_incremental_state(). We also support .output(), which is also traced. """ def reorder_decoder_incremental_state( self, flat_incr_state: Dict[str, torch.Tensor], inds: torch.Tensor ) -> Dict[str, torch.Tensor]: structured_incr_state = self._unflatten_incr_state(flat_incr_state) new_structured_incr_state = self.module.reorder_decoder_incremental_state( incremental_state=structured_incr_state, inds=inds ) return self._flatten_incr_state(new_structured_incr_state) def output(self, tensor: torch.Tensor) -> torch.Tensor: return self.module.output(tensor) @torch.jit.script class ScriptableGpt2BpeHelper(object): """ Version of parlai.utils.bpe.Gpt2BpeHelper that can be TorchScripted. """ @classmethod def findall(cls, text: str) -> List[str]: """ Split tokens in a manner that replicates parlai.utils.bpe.Gpt2BpeHelper. """ contraction_endings = ['s', 't', 're', 've', 'm', 'll', 'd'] tokens: List[str] = [] idx = 0 num_passes = 0 while idx < len(text): num_passes += 1 if num_passes > 10000: return ['*** Infinite loop in ScriptableGpt2BpeHelper.findall()! ***'] if text[idx] == "'": # Capture contradiction suffixes captured_suffix = False for ending in contraction_endings: if text[idx + 1 : idx + 1 + len(ending)] == ending: tokens.append("'" + ending) idx += 1 + len(ending) captured_suffix = True break if captured_suffix: continue if not text[idx].isspace() or ( text[idx] == ' ' and idx + 1 < len(text) and not text[idx + 1].isspace() ): # Capture runs of one type of character if text[idx] == ' ': last_matching_idx = idx + 1 else: last_matching_idx = idx if text[last_matching_idx].isalpha(): while ( last_matching_idx + 1 < len(text) and text[last_matching_idx + 1].isalpha() ): last_matching_idx += 1 elif text[last_matching_idx].isnumeric(): while ( last_matching_idx + 1 < len(text) and text[last_matching_idx + 1].isnumeric() ): last_matching_idx += 1 else: while ( last_matching_idx + 1 < len(text) and not text[last_matching_idx + 1].isspace() and not text[last_matching_idx + 1].isalpha() and not text[last_matching_idx + 1].isnumeric() ): last_matching_idx += 1 tokens.append(text[idx : last_matching_idx + 1]) idx = last_matching_idx + 1 continue if idx + 1 < len(text) and text[idx + 1].isspace(): # Capture runs of space characters up until just before the final one last_space_idx = idx + 1 while ( last_space_idx + 1 < len(text) and text[last_space_idx + 1].isspace() ): last_space_idx += 1 if last_space_idx + 1 == len(text): # Include the last char, which is a space char tokens.append(text[idx : last_space_idx + 1]) idx = last_space_idx + 1 else: tokens.append(text[idx:last_space_idx]) idx = last_space_idx continue if True: # Capture runs of space characters last_space_idx = idx while ( last_space_idx + 1 < len(text) and text[last_space_idx + 1].isspace() ): last_space_idx += 1 tokens.append(text[idx : last_space_idx + 1]) idx = last_space_idx + 1 return tokens def __init__( self, add_prefix_space: bool, encoder: Dict[str, str], byte_encoder: Dict[int, str], fused_key_bpe_ranks: Dict[str, float], special_tokens: List[str], ): self.add_prefix_space = add_prefix_space self.encoder = encoder self.decoder: Dict[str, str] = {} for k, v in self.encoder.items(): self.decoder[v] = k self.byte_encoder = byte_encoder self.byte_decoder: Dict[str, int] = {} for k, v in self.byte_encoder.items(): self.byte_decoder[v] = k self.bpe_ranks = fused_key_bpe_ranks # special tokens self._special_tokens: Dict[str, int] = {} for st in special_tokens: self._special_tokens[st] = 1 def encode(self, text: str) -> List[str]: """ Tokenize text. Checks for add_prefix_space; handles accordingly. :param text: text to tokenize :return tokens: A list of tokens """ if self.add_prefix_space: text = f' {text}' # constants for readability FINAL = 1 SPLITABLE = 0 pieces: List[Tuple[str, int]] = [(text, SPLITABLE)] for special_token in self._special_tokens.keys(): i = 0 while i < len(pieces): subtext, status = pieces[i] if status == FINAL: i += 1 continue split = subtext.split(special_token) if len(split) > 1: # special token detected, replace the chunk with small subchunks # split by the special token pieces.pop(i) for j, piece in enumerate(split): if j > 0: # add the special token as a delimiter pieces.insert(i + j, (special_token, FINAL)) pieces.insert(i + j + int(j > 0), (piece, SPLITABLE)) else: i += 1 output: List[str] = [] for piece, state in pieces: if state is FINAL: output.append(piece) else: output += self.helper_encode(piece) text = ''.join(output) return output def get_pairs(self, word: List[str]) -> List[Tuple[str, str]]: """ Return set of symbol pairs in a word. Word is represented as list of symbols (symbols being variable-length strings). :param word: word to symbolize :return pairs: set of tuples of symbols """ pairs: List[Tuple[str, str]] = [] prev_char = word[0] for char in word[1:]: pairs.append((prev_char, char)) prev_char = char return pairs def bpe(self, word: List[str]) -> List[str]: """ Convert token to BPE. :param word: list of tokens token to convert :return bpe_encoding: string bpe encoding """ pairs = self.get_pairs(word) if len(pairs) == 0: return word while True: min_rank = self.bpe_ranks.get('\n'.join(pairs[0]), float('inf')) bigram = pairs[0] for pair in pairs[1:]: current_rank = self.bpe_ranks.get('\n'.join(pair), float('inf')) if current_rank < min_rank: min_rank = current_rank bigram = pair if '\n'.join(bigram) not in self.bpe_ranks: break first, second = bigram new_word: List[str] = [] i = 0 while i < len(word): found = False for j in range(i, len(word)): if word[j] == first: new_word.extend(word[i:j]) i = j found = True break if not found: new_word.extend(word[i:]) break if word[i] == first and i < len(word) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 word = new_word.copy() if len(word) == 1: break else: pairs = self.get_pairs(word) return word def helper_encode(self, text: str) -> List[str]: """ Tokenize text. :param text: text to tokenize :return tokens: A list of tokens """ bpe_tokens: List[str] = [] for token in self.findall(text): byte_encoded: List[str] = [] for b in token: byte_encoded.append(self.byte_encoder[ord(b)]) encoded: List[str] = [] for bpe_token in self.bpe(byte_encoded): encoded.append(self.encoder[bpe_token]) bpe_tokens.extend(encoded) return bpe_tokens def decode(self, tokens: List[str]) -> str: """ Decode list of tokens into a text string. :param tokens: list of tokens :return text: decoded text """ output: List[str] = [] accum: List[str] = [] for token in tokens: if token in self._special_tokens: if len(accum) > 0: output.append(self.helper_decode(accum)) accum.clear() output.append(token) else: accum.append(token) if len(accum) > 0: output.append(self.helper_decode(accum)) text = ''.join(output) if self.add_prefix_space: assert text.startswith(' ') text = text.lstrip(' ') return text def helper_decode(self, tokens: List[str]) -> str: """ Decode list of tokens into text string. :param tokens: list of tokens :return: decoded text """ chars: List[str] = [] for token in tokens: decoded_token = self.decoder[token] token_chars = self.utf8_chars(decoded_token) for char in token_chars: if not torch.jit.is_scripting(): # We iterate over "char", which is supposed to be a single # character, because the TorchScripted version of the code # correctly splits a string into single characters in # self.utf8_chars() but the non-TorchScripted version doesn't chars.extend(list(char)) else: chars.append(char) decoded_chars: List[str] = [] for char in chars: decoded_chars.append(chr(self.byte_decoder[char])) return ''.join(decoded_chars) def utf8_chars(self, s: str) -> List[str]: """ An implementation of UTF8 character iteration in TorchScript. There are no bitwise operations in torchscript, so we compare directly to integer values. There isn't a lot of validation, for instance if you pass in an improperly encoded string with an out-of-place continuation byte, or with a non-left-to- right byte order, you'll get unexpected results and likely throw. Torch itself takes in unicode strings and encodes them as UTF8, so that should be actively hard to do. The logic is simple: looking at the current start-of-character byte. If its high bit is 0, it's a 1-byte character. Otherwise, the number of bytes is the number of leading 1s in its binary representation, so find that number by comparing it directly to ints with the appropriate representation, then append that many bytes as a character and move past them to the next start byte. From pytext.torchscript.utils. """ chars: List[str] = [] i = 0 while i < len(s): byte = ord(s[i]) if byte < 0b10000000: chars.append(s[i]) i += 1 else: if byte < 0b11100000: num_bytes = 2 elif byte < 0b11110000: num_bytes = 3 elif byte < 0b11111000: num_bytes = 4 elif byte < 0b11111100: num_bytes = 5 elif byte < 0b11111110: num_bytes = 6 elif byte < 0b11111111: num_bytes = 7 else: num_bytes = 8 chars.append(s[i : i + num_bytes]) i += num_bytes return chars @torch.jit.script class ScriptableDictionaryAgent: """ Builds and/or loads a dictionary. All code is TorchScriptable. """ def __init__( self, null_token: str, end_token: str, unk_token: str, start_token: str, freq: Dict[str, int], tok2ind: Dict[str, int], ind2tok: Dict[int, str], bpe_add_prefix_space: bool, bpe_encoder: Dict[str, str], bpe_byte_encoder: Dict[int, str], fused_key_bpe_ranks: Dict[str, float], special_tokens: List[str], ): self.null_token = null_token self.end_token = end_token self.unk_token = unk_token self.start_token = start_token self.freq = freq self.tok2ind = tok2ind self.ind2tok = ind2tok # cache unk token for later self._unk_token_idx = self.tok2ind[self.unk_token] # Initialize tokenizer self.bpe = ScriptableGpt2BpeHelper( add_prefix_space=bpe_add_prefix_space, encoder=bpe_encoder, byte_encoder=bpe_byte_encoder, fused_key_bpe_ranks=fused_key_bpe_ranks, special_tokens=special_tokens, ) def _word_lookup(self, key: str) -> int: """ Return index from token, or unk_token's index, or None. """ if key in self.tok2ind: return self.tok2ind[key] else: return self._unk_token_idx def _index_lookup(self, key: int) -> str: """ Return token from index, or unk_token. """ if key in self.ind2tok: return self.ind2tok[key] else: return self.unk_token def gpt2_tokenize(self, text: str): """ Tokenize using Gpt2 BPE tokenizer. """ return self.bpe_tokenize(text) def tokenize(self, text: str) -> List[str]: """ Return a sequence of tokens from the iterable. Also handles special tokens for some tokenizers """ # calls the selected tokenizer function e.g. 're' => re_tokenize(text) word_tokens = self.gpt2_tokenize(text) return word_tokens def bpe_tokenize(self, text: str) -> List[str]: """ Return a sequence of BPE-tokens from the text. """ return self.bpe.encode(text) def txt2vec(self, text: str) -> List[int]: """ Convert a string to a vector (list of ints). First runs a sentence tokenizer, then a word tokenizer. """ itr: List[int] = [] for token in self.tokenize(str(text)): itr.append(self._word_lookup(token)) return itr def vec2txt(self, vector: List[int]) -> str: """ Convert a vector of IDs to a string. Converts a vector (iterable of ints) into a string, with each token separated by the delimiter (default ``' '``). """ tokens = [self._index_lookup(idx) for idx in vector] text = self.bpe.decode(tokens) return text
normal
{ "blob_id": "27d5ff5b0253eea36d6b492e929c4220f4b4a5eb", "index": 1564, "step-1": "<mask token>\n\n\nclass ModelIncrStateFlattener(BaseIncrStateFlattener):\n <mask token>\n\n def reorder_decoder_incremental_state(self, flat_incr_state: Dict[str,\n torch.Tensor], inds: torch.Tensor) ->Dict[str, torch.Tensor]:\n structured_incr_state = self._unflatten_incr_state(flat_incr_state)\n new_structured_incr_state = (self.module.\n reorder_decoder_incremental_state(incremental_state=\n structured_incr_state, inds=inds))\n return self._flatten_incr_state(new_structured_incr_state)\n\n def output(self, tensor: torch.Tensor) ->torch.Tensor:\n return self.module.output(tensor)\n\n\[email protected]\nclass ScriptableGpt2BpeHelper(object):\n \"\"\"\n Version of parlai.utils.bpe.Gpt2BpeHelper that can be TorchScripted.\n \"\"\"\n\n @classmethod\n def findall(cls, text: str) ->List[str]:\n \"\"\"\n Split tokens in a manner that replicates parlai.utils.bpe.Gpt2BpeHelper.\n \"\"\"\n contraction_endings = ['s', 't', 're', 've', 'm', 'll', 'd']\n tokens: List[str] = []\n idx = 0\n num_passes = 0\n while idx < len(text):\n num_passes += 1\n if num_passes > 10000:\n return [\n '*** Infinite loop in ScriptableGpt2BpeHelper.findall()! ***'\n ]\n if text[idx] == \"'\":\n captured_suffix = False\n for ending in contraction_endings:\n if text[idx + 1:idx + 1 + len(ending)] == ending:\n tokens.append(\"'\" + ending)\n idx += 1 + len(ending)\n captured_suffix = True\n break\n if captured_suffix:\n continue\n if not text[idx].isspace() or text[idx] == ' ' and idx + 1 < len(\n text) and not text[idx + 1].isspace():\n if text[idx] == ' ':\n last_matching_idx = idx + 1\n else:\n last_matching_idx = idx\n if text[last_matching_idx].isalpha():\n while last_matching_idx + 1 < len(text) and text[\n last_matching_idx + 1].isalpha():\n last_matching_idx += 1\n elif text[last_matching_idx].isnumeric():\n while last_matching_idx + 1 < len(text) and text[\n last_matching_idx + 1].isnumeric():\n last_matching_idx += 1\n else:\n while last_matching_idx + 1 < len(text) and not text[\n last_matching_idx + 1].isspace() and not text[\n last_matching_idx + 1].isalpha() and not text[\n last_matching_idx + 1].isnumeric():\n last_matching_idx += 1\n tokens.append(text[idx:last_matching_idx + 1])\n idx = last_matching_idx + 1\n continue\n if idx + 1 < len(text) and text[idx + 1].isspace():\n last_space_idx = idx + 1\n while last_space_idx + 1 < len(text) and text[\n last_space_idx + 1].isspace():\n last_space_idx += 1\n if last_space_idx + 1 == len(text):\n tokens.append(text[idx:last_space_idx + 1])\n idx = last_space_idx + 1\n else:\n tokens.append(text[idx:last_space_idx])\n idx = last_space_idx\n continue\n if True:\n last_space_idx = idx\n while last_space_idx + 1 < len(text) and text[\n last_space_idx + 1].isspace():\n last_space_idx += 1\n tokens.append(text[idx:last_space_idx + 1])\n idx = last_space_idx + 1\n return tokens\n\n def __init__(self, add_prefix_space: bool, encoder: Dict[str, str],\n byte_encoder: Dict[int, str], fused_key_bpe_ranks: Dict[str, float],\n special_tokens: List[str]):\n self.add_prefix_space = add_prefix_space\n self.encoder = encoder\n self.decoder: Dict[str, str] = {}\n for k, v in self.encoder.items():\n self.decoder[v] = k\n self.byte_encoder = byte_encoder\n self.byte_decoder: Dict[str, int] = {}\n for k, v in self.byte_encoder.items():\n self.byte_decoder[v] = k\n self.bpe_ranks = fused_key_bpe_ranks\n self._special_tokens: Dict[str, int] = {}\n for st in special_tokens:\n self._special_tokens[st] = 1\n\n def encode(self, text: str) ->List[str]:\n \"\"\"\n Tokenize text.\n\n Checks for add_prefix_space; handles accordingly.\n\n :param text:\n text to tokenize\n\n :return tokens:\n A list of tokens\n \"\"\"\n if self.add_prefix_space:\n text = f' {text}'\n FINAL = 1\n SPLITABLE = 0\n pieces: List[Tuple[str, int]] = [(text, SPLITABLE)]\n for special_token in self._special_tokens.keys():\n i = 0\n while i < len(pieces):\n subtext, status = pieces[i]\n if status == FINAL:\n i += 1\n continue\n split = subtext.split(special_token)\n if len(split) > 1:\n pieces.pop(i)\n for j, piece in enumerate(split):\n if j > 0:\n pieces.insert(i + j, (special_token, FINAL))\n pieces.insert(i + j + int(j > 0), (piece, SPLITABLE))\n else:\n i += 1\n output: List[str] = []\n for piece, state in pieces:\n if state is FINAL:\n output.append(piece)\n else:\n output += self.helper_encode(piece)\n text = ''.join(output)\n return output\n\n def get_pairs(self, word: List[str]) ->List[Tuple[str, str]]:\n \"\"\"\n Return set of symbol pairs in a word.\n\n Word is represented as list of symbols (symbols being variable-length strings).\n\n :param word:\n word to symbolize\n\n :return pairs:\n set of tuples of symbols\n \"\"\"\n pairs: List[Tuple[str, str]] = []\n prev_char = word[0]\n for char in word[1:]:\n pairs.append((prev_char, char))\n prev_char = char\n return pairs\n\n def bpe(self, word: List[str]) ->List[str]:\n \"\"\"\n Convert token to BPE.\n\n :param word:\n list of tokens token to convert\n\n :return bpe_encoding:\n string bpe encoding\n \"\"\"\n pairs = self.get_pairs(word)\n if len(pairs) == 0:\n return word\n while True:\n min_rank = self.bpe_ranks.get('\\n'.join(pairs[0]), float('inf'))\n bigram = pairs[0]\n for pair in pairs[1:]:\n current_rank = self.bpe_ranks.get('\\n'.join(pair), float('inf')\n )\n if current_rank < min_rank:\n min_rank = current_rank\n bigram = pair\n if '\\n'.join(bigram) not in self.bpe_ranks:\n break\n first, second = bigram\n new_word: List[str] = []\n i = 0\n while i < len(word):\n found = False\n for j in range(i, len(word)):\n if word[j] == first:\n new_word.extend(word[i:j])\n i = j\n found = True\n break\n if not found:\n new_word.extend(word[i:])\n break\n if word[i] == first and i < len(word) - 1 and word[i + 1\n ] == second:\n new_word.append(first + second)\n i += 2\n else:\n new_word.append(word[i])\n i += 1\n word = new_word.copy()\n if len(word) == 1:\n break\n else:\n pairs = self.get_pairs(word)\n return word\n\n def helper_encode(self, text: str) ->List[str]:\n \"\"\"\n Tokenize text.\n\n :param text:\n text to tokenize\n\n :return tokens:\n A list of tokens\n \"\"\"\n bpe_tokens: List[str] = []\n for token in self.findall(text):\n byte_encoded: List[str] = []\n for b in token:\n byte_encoded.append(self.byte_encoder[ord(b)])\n encoded: List[str] = []\n for bpe_token in self.bpe(byte_encoded):\n encoded.append(self.encoder[bpe_token])\n bpe_tokens.extend(encoded)\n return bpe_tokens\n\n def decode(self, tokens: List[str]) ->str:\n \"\"\"\n Decode list of tokens into a text string.\n\n :param tokens:\n list of tokens\n\n :return text:\n decoded text\n \"\"\"\n output: List[str] = []\n accum: List[str] = []\n for token in tokens:\n if token in self._special_tokens:\n if len(accum) > 0:\n output.append(self.helper_decode(accum))\n accum.clear()\n output.append(token)\n else:\n accum.append(token)\n if len(accum) > 0:\n output.append(self.helper_decode(accum))\n text = ''.join(output)\n if self.add_prefix_space:\n assert text.startswith(' ')\n text = text.lstrip(' ')\n return text\n\n def helper_decode(self, tokens: List[str]) ->str:\n \"\"\"\n Decode list of tokens into text string.\n\n :param tokens:\n list of tokens\n\n :return:\n decoded text\n \"\"\"\n chars: List[str] = []\n for token in tokens:\n decoded_token = self.decoder[token]\n token_chars = self.utf8_chars(decoded_token)\n for char in token_chars:\n if not torch.jit.is_scripting():\n chars.extend(list(char))\n else:\n chars.append(char)\n decoded_chars: List[str] = []\n for char in chars:\n decoded_chars.append(chr(self.byte_decoder[char]))\n return ''.join(decoded_chars)\n\n def utf8_chars(self, s: str) ->List[str]:\n \"\"\"\n An implementation of UTF8 character iteration in TorchScript. There are no\n bitwise operations in torchscript, so we compare directly to integer values.\n There isn't a lot of validation, for instance if you pass in an improperly\n encoded string with an out-of-place continuation byte, or with a non-left-to-\n right byte order, you'll get unexpected results and likely throw. Torch itself\n takes in unicode strings and encodes them as UTF8, so that should be actively\n hard to do.\n\n The logic is simple: looking at the current start-of-character byte.\n If its high bit is 0, it's a 1-byte character. Otherwise, the number of\n bytes is the number of leading 1s in its binary representation, so\n find that number by comparing it directly to ints with the appropriate\n representation, then append that many bytes as a character and move past\n them to the next start byte.\n\n From pytext.torchscript.utils.\n \"\"\"\n chars: List[str] = []\n i = 0\n while i < len(s):\n byte = ord(s[i])\n if byte < 128:\n chars.append(s[i])\n i += 1\n else:\n if byte < 224:\n num_bytes = 2\n elif byte < 240:\n num_bytes = 3\n elif byte < 248:\n num_bytes = 4\n elif byte < 252:\n num_bytes = 5\n elif byte < 254:\n num_bytes = 6\n elif byte < 255:\n num_bytes = 7\n else:\n num_bytes = 8\n chars.append(s[i:i + num_bytes])\n i += num_bytes\n return chars\n\n\[email protected]\nclass ScriptableDictionaryAgent:\n \"\"\"\n Builds and/or loads a dictionary.\n\n All code is TorchScriptable.\n \"\"\"\n\n def __init__(self, null_token: str, end_token: str, unk_token: str,\n start_token: str, freq: Dict[str, int], tok2ind: Dict[str, int],\n ind2tok: Dict[int, str], bpe_add_prefix_space: bool, bpe_encoder:\n Dict[str, str], bpe_byte_encoder: Dict[int, str],\n fused_key_bpe_ranks: Dict[str, float], special_tokens: List[str]):\n self.null_token = null_token\n self.end_token = end_token\n self.unk_token = unk_token\n self.start_token = start_token\n self.freq = freq\n self.tok2ind = tok2ind\n self.ind2tok = ind2tok\n self._unk_token_idx = self.tok2ind[self.unk_token]\n self.bpe = ScriptableGpt2BpeHelper(add_prefix_space=\n bpe_add_prefix_space, encoder=bpe_encoder, byte_encoder=\n bpe_byte_encoder, fused_key_bpe_ranks=fused_key_bpe_ranks,\n special_tokens=special_tokens)\n\n def _word_lookup(self, key: str) ->int:\n \"\"\"\n Return index from token, or unk_token's index, or None.\n \"\"\"\n if key in self.tok2ind:\n return self.tok2ind[key]\n else:\n return self._unk_token_idx\n\n def _index_lookup(self, key: int) ->str:\n \"\"\"\n Return token from index, or unk_token.\n \"\"\"\n if key in self.ind2tok:\n return self.ind2tok[key]\n else:\n return self.unk_token\n\n def gpt2_tokenize(self, text: str):\n \"\"\"\n Tokenize using Gpt2 BPE tokenizer.\n \"\"\"\n return self.bpe_tokenize(text)\n\n def tokenize(self, text: str) ->List[str]:\n \"\"\"\n Return a sequence of tokens from the iterable.\n\n Also handles special tokens for some tokenizers\n \"\"\"\n word_tokens = self.gpt2_tokenize(text)\n return word_tokens\n\n def bpe_tokenize(self, text: str) ->List[str]:\n \"\"\"\n Return a sequence of BPE-tokens from the text.\n \"\"\"\n return self.bpe.encode(text)\n\n def txt2vec(self, text: str) ->List[int]:\n \"\"\"\n Convert a string to a vector (list of ints).\n\n First runs a sentence tokenizer, then a word tokenizer.\n \"\"\"\n itr: List[int] = []\n for token in self.tokenize(str(text)):\n itr.append(self._word_lookup(token))\n return itr\n\n def vec2txt(self, vector: List[int]) ->str:\n \"\"\"\n Convert a vector of IDs to a string.\n\n Converts a vector (iterable of ints) into a string, with each token separated by\n the delimiter (default ``' '``).\n \"\"\"\n tokens = [self._index_lookup(idx) for idx in vector]\n text = self.bpe.decode(tokens)\n return text\n", "step-2": "<mask token>\n\n\nclass BaseIncrStateFlattener(nn.Module):\n <mask token>\n\n def __init__(self, module: nn.Module):\n super().__init__()\n self.module = module\n <mask token>\n\n def _flatten_incr_state(self, structured_incr_state: Dict[int, Dict[str,\n Dict[str, torch.Tensor]]]) ->Dict[str, torch.Tensor]:\n \"\"\"\n Flatten the input incremental state.\n\n For instance, structured_incr_state[0]['self_attn']['prev_key'] will be stored\n in flat_incr_state['layer_0__self_attn__prev_key'].\n \"\"\"\n flat_incr_state = {}\n for layer_idx, dict1 in structured_incr_state.items():\n for attn_type, dict2 in dict1.items():\n for state_type, state in dict2.items():\n key = f'{layer_idx:d}__{attn_type}__{state_type}'\n flat_incr_state[key] = state\n return flat_incr_state\n\n\nclass DecoderIncrStateFlattener(BaseIncrStateFlattener):\n \"\"\"\n Wrapper for a TransformerDecoder that will unflatten/flatten the incremental state.\n\n Unflattening/flattening will occur before passing the incremental state into and out\n of .forward().\n \"\"\"\n\n def forward(self, input_: torch.LongTensor, encoder_state: Tuple[torch.\n Tensor, torch.Tensor], flat_incr_state: Optional[Dict[str, torch.\n Tensor]]=None) ->Tuple[torch.Tensor, Dict[str, torch.Tensor]]:\n if flat_incr_state is not None:\n structured_incr_state = self._unflatten_incr_state(flat_incr_state)\n else:\n structured_incr_state = None\n tensor, new_structured_incr_state = self.module.forward(input=\n input_, encoder_state=encoder_state, incr_state=\n structured_incr_state)\n new_flat_incr_state = self._flatten_incr_state(\n new_structured_incr_state)\n return tensor, new_flat_incr_state\n\n\nclass ModelIncrStateFlattener(BaseIncrStateFlattener):\n \"\"\"\n Wrapper for a TransformerGeneratorModel to unflatten/flatten the incremental state.\n\n Unflattening/flattening will occur before passing the incremental state into and out\n of .reorder_decoder_incremental_state(). We also support .output(), which is also\n traced.\n \"\"\"\n\n def reorder_decoder_incremental_state(self, flat_incr_state: Dict[str,\n torch.Tensor], inds: torch.Tensor) ->Dict[str, torch.Tensor]:\n structured_incr_state = self._unflatten_incr_state(flat_incr_state)\n new_structured_incr_state = (self.module.\n reorder_decoder_incremental_state(incremental_state=\n structured_incr_state, inds=inds))\n return self._flatten_incr_state(new_structured_incr_state)\n\n def output(self, tensor: torch.Tensor) ->torch.Tensor:\n return self.module.output(tensor)\n\n\[email protected]\nclass ScriptableGpt2BpeHelper(object):\n \"\"\"\n Version of parlai.utils.bpe.Gpt2BpeHelper that can be TorchScripted.\n \"\"\"\n\n @classmethod\n def findall(cls, text: str) ->List[str]:\n \"\"\"\n Split tokens in a manner that replicates parlai.utils.bpe.Gpt2BpeHelper.\n \"\"\"\n contraction_endings = ['s', 't', 're', 've', 'm', 'll', 'd']\n tokens: List[str] = []\n idx = 0\n num_passes = 0\n while idx < len(text):\n num_passes += 1\n if num_passes > 10000:\n return [\n '*** Infinite loop in ScriptableGpt2BpeHelper.findall()! ***'\n ]\n if text[idx] == \"'\":\n captured_suffix = False\n for ending in contraction_endings:\n if text[idx + 1:idx + 1 + len(ending)] == ending:\n tokens.append(\"'\" + ending)\n idx += 1 + len(ending)\n captured_suffix = True\n break\n if captured_suffix:\n continue\n if not text[idx].isspace() or text[idx] == ' ' and idx + 1 < len(\n text) and not text[idx + 1].isspace():\n if text[idx] == ' ':\n last_matching_idx = idx + 1\n else:\n last_matching_idx = idx\n if text[last_matching_idx].isalpha():\n while last_matching_idx + 1 < len(text) and text[\n last_matching_idx + 1].isalpha():\n last_matching_idx += 1\n elif text[last_matching_idx].isnumeric():\n while last_matching_idx + 1 < len(text) and text[\n last_matching_idx + 1].isnumeric():\n last_matching_idx += 1\n else:\n while last_matching_idx + 1 < len(text) and not text[\n last_matching_idx + 1].isspace() and not text[\n last_matching_idx + 1].isalpha() and not text[\n last_matching_idx + 1].isnumeric():\n last_matching_idx += 1\n tokens.append(text[idx:last_matching_idx + 1])\n idx = last_matching_idx + 1\n continue\n if idx + 1 < len(text) and text[idx + 1].isspace():\n last_space_idx = idx + 1\n while last_space_idx + 1 < len(text) and text[\n last_space_idx + 1].isspace():\n last_space_idx += 1\n if last_space_idx + 1 == len(text):\n tokens.append(text[idx:last_space_idx + 1])\n idx = last_space_idx + 1\n else:\n tokens.append(text[idx:last_space_idx])\n idx = last_space_idx\n continue\n if True:\n last_space_idx = idx\n while last_space_idx + 1 < len(text) and text[\n last_space_idx + 1].isspace():\n last_space_idx += 1\n tokens.append(text[idx:last_space_idx + 1])\n idx = last_space_idx + 1\n return tokens\n\n def __init__(self, add_prefix_space: bool, encoder: Dict[str, str],\n byte_encoder: Dict[int, str], fused_key_bpe_ranks: Dict[str, float],\n special_tokens: List[str]):\n self.add_prefix_space = add_prefix_space\n self.encoder = encoder\n self.decoder: Dict[str, str] = {}\n for k, v in self.encoder.items():\n self.decoder[v] = k\n self.byte_encoder = byte_encoder\n self.byte_decoder: Dict[str, int] = {}\n for k, v in self.byte_encoder.items():\n self.byte_decoder[v] = k\n self.bpe_ranks = fused_key_bpe_ranks\n self._special_tokens: Dict[str, int] = {}\n for st in special_tokens:\n self._special_tokens[st] = 1\n\n def encode(self, text: str) ->List[str]:\n \"\"\"\n Tokenize text.\n\n Checks for add_prefix_space; handles accordingly.\n\n :param text:\n text to tokenize\n\n :return tokens:\n A list of tokens\n \"\"\"\n if self.add_prefix_space:\n text = f' {text}'\n FINAL = 1\n SPLITABLE = 0\n pieces: List[Tuple[str, int]] = [(text, SPLITABLE)]\n for special_token in self._special_tokens.keys():\n i = 0\n while i < len(pieces):\n subtext, status = pieces[i]\n if status == FINAL:\n i += 1\n continue\n split = subtext.split(special_token)\n if len(split) > 1:\n pieces.pop(i)\n for j, piece in enumerate(split):\n if j > 0:\n pieces.insert(i + j, (special_token, FINAL))\n pieces.insert(i + j + int(j > 0), (piece, SPLITABLE))\n else:\n i += 1\n output: List[str] = []\n for piece, state in pieces:\n if state is FINAL:\n output.append(piece)\n else:\n output += self.helper_encode(piece)\n text = ''.join(output)\n return output\n\n def get_pairs(self, word: List[str]) ->List[Tuple[str, str]]:\n \"\"\"\n Return set of symbol pairs in a word.\n\n Word is represented as list of symbols (symbols being variable-length strings).\n\n :param word:\n word to symbolize\n\n :return pairs:\n set of tuples of symbols\n \"\"\"\n pairs: List[Tuple[str, str]] = []\n prev_char = word[0]\n for char in word[1:]:\n pairs.append((prev_char, char))\n prev_char = char\n return pairs\n\n def bpe(self, word: List[str]) ->List[str]:\n \"\"\"\n Convert token to BPE.\n\n :param word:\n list of tokens token to convert\n\n :return bpe_encoding:\n string bpe encoding\n \"\"\"\n pairs = self.get_pairs(word)\n if len(pairs) == 0:\n return word\n while True:\n min_rank = self.bpe_ranks.get('\\n'.join(pairs[0]), float('inf'))\n bigram = pairs[0]\n for pair in pairs[1:]:\n current_rank = self.bpe_ranks.get('\\n'.join(pair), float('inf')\n )\n if current_rank < min_rank:\n min_rank = current_rank\n bigram = pair\n if '\\n'.join(bigram) not in self.bpe_ranks:\n break\n first, second = bigram\n new_word: List[str] = []\n i = 0\n while i < len(word):\n found = False\n for j in range(i, len(word)):\n if word[j] == first:\n new_word.extend(word[i:j])\n i = j\n found = True\n break\n if not found:\n new_word.extend(word[i:])\n break\n if word[i] == first and i < len(word) - 1 and word[i + 1\n ] == second:\n new_word.append(first + second)\n i += 2\n else:\n new_word.append(word[i])\n i += 1\n word = new_word.copy()\n if len(word) == 1:\n break\n else:\n pairs = self.get_pairs(word)\n return word\n\n def helper_encode(self, text: str) ->List[str]:\n \"\"\"\n Tokenize text.\n\n :param text:\n text to tokenize\n\n :return tokens:\n A list of tokens\n \"\"\"\n bpe_tokens: List[str] = []\n for token in self.findall(text):\n byte_encoded: List[str] = []\n for b in token:\n byte_encoded.append(self.byte_encoder[ord(b)])\n encoded: List[str] = []\n for bpe_token in self.bpe(byte_encoded):\n encoded.append(self.encoder[bpe_token])\n bpe_tokens.extend(encoded)\n return bpe_tokens\n\n def decode(self, tokens: List[str]) ->str:\n \"\"\"\n Decode list of tokens into a text string.\n\n :param tokens:\n list of tokens\n\n :return text:\n decoded text\n \"\"\"\n output: List[str] = []\n accum: List[str] = []\n for token in tokens:\n if token in self._special_tokens:\n if len(accum) > 0:\n output.append(self.helper_decode(accum))\n accum.clear()\n output.append(token)\n else:\n accum.append(token)\n if len(accum) > 0:\n output.append(self.helper_decode(accum))\n text = ''.join(output)\n if self.add_prefix_space:\n assert text.startswith(' ')\n text = text.lstrip(' ')\n return text\n\n def helper_decode(self, tokens: List[str]) ->str:\n \"\"\"\n Decode list of tokens into text string.\n\n :param tokens:\n list of tokens\n\n :return:\n decoded text\n \"\"\"\n chars: List[str] = []\n for token in tokens:\n decoded_token = self.decoder[token]\n token_chars = self.utf8_chars(decoded_token)\n for char in token_chars:\n if not torch.jit.is_scripting():\n chars.extend(list(char))\n else:\n chars.append(char)\n decoded_chars: List[str] = []\n for char in chars:\n decoded_chars.append(chr(self.byte_decoder[char]))\n return ''.join(decoded_chars)\n\n def utf8_chars(self, s: str) ->List[str]:\n \"\"\"\n An implementation of UTF8 character iteration in TorchScript. There are no\n bitwise operations in torchscript, so we compare directly to integer values.\n There isn't a lot of validation, for instance if you pass in an improperly\n encoded string with an out-of-place continuation byte, or with a non-left-to-\n right byte order, you'll get unexpected results and likely throw. Torch itself\n takes in unicode strings and encodes them as UTF8, so that should be actively\n hard to do.\n\n The logic is simple: looking at the current start-of-character byte.\n If its high bit is 0, it's a 1-byte character. Otherwise, the number of\n bytes is the number of leading 1s in its binary representation, so\n find that number by comparing it directly to ints with the appropriate\n representation, then append that many bytes as a character and move past\n them to the next start byte.\n\n From pytext.torchscript.utils.\n \"\"\"\n chars: List[str] = []\n i = 0\n while i < len(s):\n byte = ord(s[i])\n if byte < 128:\n chars.append(s[i])\n i += 1\n else:\n if byte < 224:\n num_bytes = 2\n elif byte < 240:\n num_bytes = 3\n elif byte < 248:\n num_bytes = 4\n elif byte < 252:\n num_bytes = 5\n elif byte < 254:\n num_bytes = 6\n elif byte < 255:\n num_bytes = 7\n else:\n num_bytes = 8\n chars.append(s[i:i + num_bytes])\n i += num_bytes\n return chars\n\n\[email protected]\nclass ScriptableDictionaryAgent:\n \"\"\"\n Builds and/or loads a dictionary.\n\n All code is TorchScriptable.\n \"\"\"\n\n def __init__(self, null_token: str, end_token: str, unk_token: str,\n start_token: str, freq: Dict[str, int], tok2ind: Dict[str, int],\n ind2tok: Dict[int, str], bpe_add_prefix_space: bool, bpe_encoder:\n Dict[str, str], bpe_byte_encoder: Dict[int, str],\n fused_key_bpe_ranks: Dict[str, float], special_tokens: List[str]):\n self.null_token = null_token\n self.end_token = end_token\n self.unk_token = unk_token\n self.start_token = start_token\n self.freq = freq\n self.tok2ind = tok2ind\n self.ind2tok = ind2tok\n self._unk_token_idx = self.tok2ind[self.unk_token]\n self.bpe = ScriptableGpt2BpeHelper(add_prefix_space=\n bpe_add_prefix_space, encoder=bpe_encoder, byte_encoder=\n bpe_byte_encoder, fused_key_bpe_ranks=fused_key_bpe_ranks,\n special_tokens=special_tokens)\n\n def _word_lookup(self, key: str) ->int:\n \"\"\"\n Return index from token, or unk_token's index, or None.\n \"\"\"\n if key in self.tok2ind:\n return self.tok2ind[key]\n else:\n return self._unk_token_idx\n\n def _index_lookup(self, key: int) ->str:\n \"\"\"\n Return token from index, or unk_token.\n \"\"\"\n if key in self.ind2tok:\n return self.ind2tok[key]\n else:\n return self.unk_token\n\n def gpt2_tokenize(self, text: str):\n \"\"\"\n Tokenize using Gpt2 BPE tokenizer.\n \"\"\"\n return self.bpe_tokenize(text)\n\n def tokenize(self, text: str) ->List[str]:\n \"\"\"\n Return a sequence of tokens from the iterable.\n\n Also handles special tokens for some tokenizers\n \"\"\"\n word_tokens = self.gpt2_tokenize(text)\n return word_tokens\n\n def bpe_tokenize(self, text: str) ->List[str]:\n \"\"\"\n Return a sequence of BPE-tokens from the text.\n \"\"\"\n return self.bpe.encode(text)\n\n def txt2vec(self, text: str) ->List[int]:\n \"\"\"\n Convert a string to a vector (list of ints).\n\n First runs a sentence tokenizer, then a word tokenizer.\n \"\"\"\n itr: List[int] = []\n for token in self.tokenize(str(text)):\n itr.append(self._word_lookup(token))\n return itr\n\n def vec2txt(self, vector: List[int]) ->str:\n \"\"\"\n Convert a vector of IDs to a string.\n\n Converts a vector (iterable of ints) into a string, with each token separated by\n the delimiter (default ``' '``).\n \"\"\"\n tokens = [self._index_lookup(idx) for idx in vector]\n text = self.bpe.decode(tokens)\n return text\n", "step-3": "<mask token>\n\n\nclass BaseIncrStateFlattener(nn.Module):\n <mask token>\n\n def __init__(self, module: nn.Module):\n super().__init__()\n self.module = module\n\n def _unflatten_incr_state(self, flat_incr_state: Dict[str, torch.Tensor]\n ) ->Dict[int, Dict[str, Dict[str, torch.Tensor]]]:\n \"\"\"\n Unflatten the input incremental state.\n\n For instance, flat_incr_state['layer_0__self_attn__prev_key'] will be stored in\n structured_incr_state[0]['self_attn']['prev_key'].\n \"\"\"\n structured_incr_state = defaultdict(lambda : defaultdict(dict))\n for key, state in flat_incr_state.items():\n layer_idx_str, attn_type, state_type = key.split('__')\n structured_incr_state[int(layer_idx_str)][attn_type][state_type\n ] = state\n return dict({k: dict(v) for k, v in structured_incr_state.items()})\n\n def _flatten_incr_state(self, structured_incr_state: Dict[int, Dict[str,\n Dict[str, torch.Tensor]]]) ->Dict[str, torch.Tensor]:\n \"\"\"\n Flatten the input incremental state.\n\n For instance, structured_incr_state[0]['self_attn']['prev_key'] will be stored\n in flat_incr_state['layer_0__self_attn__prev_key'].\n \"\"\"\n flat_incr_state = {}\n for layer_idx, dict1 in structured_incr_state.items():\n for attn_type, dict2 in dict1.items():\n for state_type, state in dict2.items():\n key = f'{layer_idx:d}__{attn_type}__{state_type}'\n flat_incr_state[key] = state\n return flat_incr_state\n\n\nclass DecoderIncrStateFlattener(BaseIncrStateFlattener):\n \"\"\"\n Wrapper for a TransformerDecoder that will unflatten/flatten the incremental state.\n\n Unflattening/flattening will occur before passing the incremental state into and out\n of .forward().\n \"\"\"\n\n def forward(self, input_: torch.LongTensor, encoder_state: Tuple[torch.\n Tensor, torch.Tensor], flat_incr_state: Optional[Dict[str, torch.\n Tensor]]=None) ->Tuple[torch.Tensor, Dict[str, torch.Tensor]]:\n if flat_incr_state is not None:\n structured_incr_state = self._unflatten_incr_state(flat_incr_state)\n else:\n structured_incr_state = None\n tensor, new_structured_incr_state = self.module.forward(input=\n input_, encoder_state=encoder_state, incr_state=\n structured_incr_state)\n new_flat_incr_state = self._flatten_incr_state(\n new_structured_incr_state)\n return tensor, new_flat_incr_state\n\n\nclass ModelIncrStateFlattener(BaseIncrStateFlattener):\n \"\"\"\n Wrapper for a TransformerGeneratorModel to unflatten/flatten the incremental state.\n\n Unflattening/flattening will occur before passing the incremental state into and out\n of .reorder_decoder_incremental_state(). We also support .output(), which is also\n traced.\n \"\"\"\n\n def reorder_decoder_incremental_state(self, flat_incr_state: Dict[str,\n torch.Tensor], inds: torch.Tensor) ->Dict[str, torch.Tensor]:\n structured_incr_state = self._unflatten_incr_state(flat_incr_state)\n new_structured_incr_state = (self.module.\n reorder_decoder_incremental_state(incremental_state=\n structured_incr_state, inds=inds))\n return self._flatten_incr_state(new_structured_incr_state)\n\n def output(self, tensor: torch.Tensor) ->torch.Tensor:\n return self.module.output(tensor)\n\n\[email protected]\nclass ScriptableGpt2BpeHelper(object):\n \"\"\"\n Version of parlai.utils.bpe.Gpt2BpeHelper that can be TorchScripted.\n \"\"\"\n\n @classmethod\n def findall(cls, text: str) ->List[str]:\n \"\"\"\n Split tokens in a manner that replicates parlai.utils.bpe.Gpt2BpeHelper.\n \"\"\"\n contraction_endings = ['s', 't', 're', 've', 'm', 'll', 'd']\n tokens: List[str] = []\n idx = 0\n num_passes = 0\n while idx < len(text):\n num_passes += 1\n if num_passes > 10000:\n return [\n '*** Infinite loop in ScriptableGpt2BpeHelper.findall()! ***'\n ]\n if text[idx] == \"'\":\n captured_suffix = False\n for ending in contraction_endings:\n if text[idx + 1:idx + 1 + len(ending)] == ending:\n tokens.append(\"'\" + ending)\n idx += 1 + len(ending)\n captured_suffix = True\n break\n if captured_suffix:\n continue\n if not text[idx].isspace() or text[idx] == ' ' and idx + 1 < len(\n text) and not text[idx + 1].isspace():\n if text[idx] == ' ':\n last_matching_idx = idx + 1\n else:\n last_matching_idx = idx\n if text[last_matching_idx].isalpha():\n while last_matching_idx + 1 < len(text) and text[\n last_matching_idx + 1].isalpha():\n last_matching_idx += 1\n elif text[last_matching_idx].isnumeric():\n while last_matching_idx + 1 < len(text) and text[\n last_matching_idx + 1].isnumeric():\n last_matching_idx += 1\n else:\n while last_matching_idx + 1 < len(text) and not text[\n last_matching_idx + 1].isspace() and not text[\n last_matching_idx + 1].isalpha() and not text[\n last_matching_idx + 1].isnumeric():\n last_matching_idx += 1\n tokens.append(text[idx:last_matching_idx + 1])\n idx = last_matching_idx + 1\n continue\n if idx + 1 < len(text) and text[idx + 1].isspace():\n last_space_idx = idx + 1\n while last_space_idx + 1 < len(text) and text[\n last_space_idx + 1].isspace():\n last_space_idx += 1\n if last_space_idx + 1 == len(text):\n tokens.append(text[idx:last_space_idx + 1])\n idx = last_space_idx + 1\n else:\n tokens.append(text[idx:last_space_idx])\n idx = last_space_idx\n continue\n if True:\n last_space_idx = idx\n while last_space_idx + 1 < len(text) and text[\n last_space_idx + 1].isspace():\n last_space_idx += 1\n tokens.append(text[idx:last_space_idx + 1])\n idx = last_space_idx + 1\n return tokens\n\n def __init__(self, add_prefix_space: bool, encoder: Dict[str, str],\n byte_encoder: Dict[int, str], fused_key_bpe_ranks: Dict[str, float],\n special_tokens: List[str]):\n self.add_prefix_space = add_prefix_space\n self.encoder = encoder\n self.decoder: Dict[str, str] = {}\n for k, v in self.encoder.items():\n self.decoder[v] = k\n self.byte_encoder = byte_encoder\n self.byte_decoder: Dict[str, int] = {}\n for k, v in self.byte_encoder.items():\n self.byte_decoder[v] = k\n self.bpe_ranks = fused_key_bpe_ranks\n self._special_tokens: Dict[str, int] = {}\n for st in special_tokens:\n self._special_tokens[st] = 1\n\n def encode(self, text: str) ->List[str]:\n \"\"\"\n Tokenize text.\n\n Checks for add_prefix_space; handles accordingly.\n\n :param text:\n text to tokenize\n\n :return tokens:\n A list of tokens\n \"\"\"\n if self.add_prefix_space:\n text = f' {text}'\n FINAL = 1\n SPLITABLE = 0\n pieces: List[Tuple[str, int]] = [(text, SPLITABLE)]\n for special_token in self._special_tokens.keys():\n i = 0\n while i < len(pieces):\n subtext, status = pieces[i]\n if status == FINAL:\n i += 1\n continue\n split = subtext.split(special_token)\n if len(split) > 1:\n pieces.pop(i)\n for j, piece in enumerate(split):\n if j > 0:\n pieces.insert(i + j, (special_token, FINAL))\n pieces.insert(i + j + int(j > 0), (piece, SPLITABLE))\n else:\n i += 1\n output: List[str] = []\n for piece, state in pieces:\n if state is FINAL:\n output.append(piece)\n else:\n output += self.helper_encode(piece)\n text = ''.join(output)\n return output\n\n def get_pairs(self, word: List[str]) ->List[Tuple[str, str]]:\n \"\"\"\n Return set of symbol pairs in a word.\n\n Word is represented as list of symbols (symbols being variable-length strings).\n\n :param word:\n word to symbolize\n\n :return pairs:\n set of tuples of symbols\n \"\"\"\n pairs: List[Tuple[str, str]] = []\n prev_char = word[0]\n for char in word[1:]:\n pairs.append((prev_char, char))\n prev_char = char\n return pairs\n\n def bpe(self, word: List[str]) ->List[str]:\n \"\"\"\n Convert token to BPE.\n\n :param word:\n list of tokens token to convert\n\n :return bpe_encoding:\n string bpe encoding\n \"\"\"\n pairs = self.get_pairs(word)\n if len(pairs) == 0:\n return word\n while True:\n min_rank = self.bpe_ranks.get('\\n'.join(pairs[0]), float('inf'))\n bigram = pairs[0]\n for pair in pairs[1:]:\n current_rank = self.bpe_ranks.get('\\n'.join(pair), float('inf')\n )\n if current_rank < min_rank:\n min_rank = current_rank\n bigram = pair\n if '\\n'.join(bigram) not in self.bpe_ranks:\n break\n first, second = bigram\n new_word: List[str] = []\n i = 0\n while i < len(word):\n found = False\n for j in range(i, len(word)):\n if word[j] == first:\n new_word.extend(word[i:j])\n i = j\n found = True\n break\n if not found:\n new_word.extend(word[i:])\n break\n if word[i] == first and i < len(word) - 1 and word[i + 1\n ] == second:\n new_word.append(first + second)\n i += 2\n else:\n new_word.append(word[i])\n i += 1\n word = new_word.copy()\n if len(word) == 1:\n break\n else:\n pairs = self.get_pairs(word)\n return word\n\n def helper_encode(self, text: str) ->List[str]:\n \"\"\"\n Tokenize text.\n\n :param text:\n text to tokenize\n\n :return tokens:\n A list of tokens\n \"\"\"\n bpe_tokens: List[str] = []\n for token in self.findall(text):\n byte_encoded: List[str] = []\n for b in token:\n byte_encoded.append(self.byte_encoder[ord(b)])\n encoded: List[str] = []\n for bpe_token in self.bpe(byte_encoded):\n encoded.append(self.encoder[bpe_token])\n bpe_tokens.extend(encoded)\n return bpe_tokens\n\n def decode(self, tokens: List[str]) ->str:\n \"\"\"\n Decode list of tokens into a text string.\n\n :param tokens:\n list of tokens\n\n :return text:\n decoded text\n \"\"\"\n output: List[str] = []\n accum: List[str] = []\n for token in tokens:\n if token in self._special_tokens:\n if len(accum) > 0:\n output.append(self.helper_decode(accum))\n accum.clear()\n output.append(token)\n else:\n accum.append(token)\n if len(accum) > 0:\n output.append(self.helper_decode(accum))\n text = ''.join(output)\n if self.add_prefix_space:\n assert text.startswith(' ')\n text = text.lstrip(' ')\n return text\n\n def helper_decode(self, tokens: List[str]) ->str:\n \"\"\"\n Decode list of tokens into text string.\n\n :param tokens:\n list of tokens\n\n :return:\n decoded text\n \"\"\"\n chars: List[str] = []\n for token in tokens:\n decoded_token = self.decoder[token]\n token_chars = self.utf8_chars(decoded_token)\n for char in token_chars:\n if not torch.jit.is_scripting():\n chars.extend(list(char))\n else:\n chars.append(char)\n decoded_chars: List[str] = []\n for char in chars:\n decoded_chars.append(chr(self.byte_decoder[char]))\n return ''.join(decoded_chars)\n\n def utf8_chars(self, s: str) ->List[str]:\n \"\"\"\n An implementation of UTF8 character iteration in TorchScript. There are no\n bitwise operations in torchscript, so we compare directly to integer values.\n There isn't a lot of validation, for instance if you pass in an improperly\n encoded string with an out-of-place continuation byte, or with a non-left-to-\n right byte order, you'll get unexpected results and likely throw. Torch itself\n takes in unicode strings and encodes them as UTF8, so that should be actively\n hard to do.\n\n The logic is simple: looking at the current start-of-character byte.\n If its high bit is 0, it's a 1-byte character. Otherwise, the number of\n bytes is the number of leading 1s in its binary representation, so\n find that number by comparing it directly to ints with the appropriate\n representation, then append that many bytes as a character and move past\n them to the next start byte.\n\n From pytext.torchscript.utils.\n \"\"\"\n chars: List[str] = []\n i = 0\n while i < len(s):\n byte = ord(s[i])\n if byte < 128:\n chars.append(s[i])\n i += 1\n else:\n if byte < 224:\n num_bytes = 2\n elif byte < 240:\n num_bytes = 3\n elif byte < 248:\n num_bytes = 4\n elif byte < 252:\n num_bytes = 5\n elif byte < 254:\n num_bytes = 6\n elif byte < 255:\n num_bytes = 7\n else:\n num_bytes = 8\n chars.append(s[i:i + num_bytes])\n i += num_bytes\n return chars\n\n\[email protected]\nclass ScriptableDictionaryAgent:\n \"\"\"\n Builds and/or loads a dictionary.\n\n All code is TorchScriptable.\n \"\"\"\n\n def __init__(self, null_token: str, end_token: str, unk_token: str,\n start_token: str, freq: Dict[str, int], tok2ind: Dict[str, int],\n ind2tok: Dict[int, str], bpe_add_prefix_space: bool, bpe_encoder:\n Dict[str, str], bpe_byte_encoder: Dict[int, str],\n fused_key_bpe_ranks: Dict[str, float], special_tokens: List[str]):\n self.null_token = null_token\n self.end_token = end_token\n self.unk_token = unk_token\n self.start_token = start_token\n self.freq = freq\n self.tok2ind = tok2ind\n self.ind2tok = ind2tok\n self._unk_token_idx = self.tok2ind[self.unk_token]\n self.bpe = ScriptableGpt2BpeHelper(add_prefix_space=\n bpe_add_prefix_space, encoder=bpe_encoder, byte_encoder=\n bpe_byte_encoder, fused_key_bpe_ranks=fused_key_bpe_ranks,\n special_tokens=special_tokens)\n\n def _word_lookup(self, key: str) ->int:\n \"\"\"\n Return index from token, or unk_token's index, or None.\n \"\"\"\n if key in self.tok2ind:\n return self.tok2ind[key]\n else:\n return self._unk_token_idx\n\n def _index_lookup(self, key: int) ->str:\n \"\"\"\n Return token from index, or unk_token.\n \"\"\"\n if key in self.ind2tok:\n return self.ind2tok[key]\n else:\n return self.unk_token\n\n def gpt2_tokenize(self, text: str):\n \"\"\"\n Tokenize using Gpt2 BPE tokenizer.\n \"\"\"\n return self.bpe_tokenize(text)\n\n def tokenize(self, text: str) ->List[str]:\n \"\"\"\n Return a sequence of tokens from the iterable.\n\n Also handles special tokens for some tokenizers\n \"\"\"\n word_tokens = self.gpt2_tokenize(text)\n return word_tokens\n\n def bpe_tokenize(self, text: str) ->List[str]:\n \"\"\"\n Return a sequence of BPE-tokens from the text.\n \"\"\"\n return self.bpe.encode(text)\n\n def txt2vec(self, text: str) ->List[int]:\n \"\"\"\n Convert a string to a vector (list of ints).\n\n First runs a sentence tokenizer, then a word tokenizer.\n \"\"\"\n itr: List[int] = []\n for token in self.tokenize(str(text)):\n itr.append(self._word_lookup(token))\n return itr\n\n def vec2txt(self, vector: List[int]) ->str:\n \"\"\"\n Convert a vector of IDs to a string.\n\n Converts a vector (iterable of ints) into a string, with each token separated by\n the delimiter (default ``' '``).\n \"\"\"\n tokens = [self._index_lookup(idx) for idx in vector]\n text = self.bpe.decode(tokens)\n return text\n", "step-4": "<mask token>\n\n\nclass TorchScriptGreedySearch(nn.Module):\n <mask token>\n <mask token>\n\n def __init__(self, agent: TorchAgent):\n super().__init__()\n self.is_bart = agent.opt['model'] == 'bart'\n for key, val in self.CAIRAOKE_DICT_PARAMS.items():\n assert agent.opt.get(key, val\n ) == val, f'The only currently supported value of \"{key}\" is {val}!'\n orig_dict: DictionaryAgent = agent.dict\n orig_bpe: Gpt2BpeHelper = orig_dict.bpe\n assert all(len(key) == 2 for key in orig_bpe.bpe_ranks.keys())\n assert not any(i for key in orig_bpe.bpe_ranks.keys() for i in key if\n '\\n' in i\n ), \"We need to temporarily merge the bpe_ranks dict's keys with a newline character in order to use it as a TorchScript arg, but at least one of the dict's keys contains a newline character already!\"\n fused_key_bpe_ranks = {'\\n'.join(key): float(val) for key, val in\n orig_bpe.bpe_ranks.items()}\n self.dict = ScriptableDictionaryAgent(null_token=orig_dict.\n null_token, end_token=orig_dict.end_token, unk_token=orig_dict.\n unk_token, start_token=orig_dict.start_token, freq=orig_dict.\n freq, tok2ind=orig_dict.tok2ind, ind2tok=orig_dict.ind2tok,\n bpe_add_prefix_space=agent.opt['bpe_add_prefix_space'],\n bpe_encoder=orig_bpe.encoder, bpe_byte_encoder=orig_bpe.\n byte_encoder, fused_key_bpe_ranks=fused_key_bpe_ranks,\n special_tokens=agent._get_special_tokens())\n self.delimiter_tok = agent.history.delimiter_tok\n self.history_size = agent.opt['history_size']\n if agent.opt.get('history_add_global_end_token', None) is not None:\n self.global_end_token = agent.dict[agent.dict.end_token]\n else:\n self.global_end_token = None\n self.text_truncate = agent.opt.get('text_truncate') or agent.opt[\n 'truncate']\n self.text_truncate = (self.text_truncate if self.text_truncate >= 0\n else None)\n self.start_idx = agent.model.START_IDX\n self.end_idx = agent.model.END_IDX\n self.null_idx = agent.model.NULL_IDX\n if self.is_bart:\n self.initial_decoder_input = [self.end_idx, self.start_idx]\n else:\n self.initial_decoder_input = [self.start_idx]\n agent.model.eval()\n wrapped_decoder = DecoderIncrStateFlattener(agent.model.decoder)\n wrapped_model = ModelIncrStateFlattener(agent.model)\n sample_tokens = torch.tensor([[1, 2, 3, 4, 5]], dtype=torch.long)\n encoder_states = agent.model.encoder(sample_tokens)\n initial_generations = self._get_initial_decoder_input(sample_tokens)\n latent, initial_incr_state = wrapped_decoder(initial_generations,\n encoder_states)\n logits = agent.model.output(latent[:, -1:, :])\n _, preds = logits.max(dim=2)\n incr_state = {k: torch.clone(v) for k, v in initial_incr_state.items()}\n incr_state = wrapped_model.reorder_decoder_incremental_state(incr_state\n , torch.tensor([0], dtype=torch.long, device=sample_tokens.device))\n generations = torch.cat([initial_generations, preds], dim=1)\n self.encoder = torch.jit.trace(agent.model.encoder, sample_tokens)\n self.decoder_first_pass = torch.jit.trace(wrapped_decoder, (\n initial_generations, encoder_states), strict=False)\n self.partially_traced_model = torch.jit.trace_module(wrapped_model,\n {'output': latent[:, -1:, :],\n 'reorder_decoder_incremental_state': (initial_incr_state, torch\n .tensor([0], dtype=torch.long, device=sample_tokens.device))},\n strict=False)\n self.decoder_later_pass = torch.jit.trace(wrapped_decoder, (\n generations, encoder_states, incr_state), strict=False)\n <mask token>\n <mask token>\n <mask token>\n\n def forward(self, context: str, max_len: int=128) ->str:\n history_vecs: List[List[int]] = []\n context_lines = context.split('\\n')\n if self.history_size > 0:\n context_lines = context_lines[-self.history_size:]\n for line in context_lines:\n history_vecs.append(self.parse(line))\n text_vecs: List[List[int]] = []\n for vec in history_vecs[:-1]:\n text_vecs += [vec]\n text_vecs += [self.delimiter_tok]\n text_vecs += [history_vecs[-1]]\n if self.global_end_token is not None:\n text_vecs += [[self.global_end_token]]\n flattened_text_vec: List[int] = []\n for vec in text_vecs:\n for token in vec:\n flattened_text_vec.append(token)\n if self.text_truncate is not None:\n if self.is_bart:\n truncate_length = self.text_truncate - 2\n else:\n truncate_length = self.text_truncate\n if len(flattened_text_vec) > truncate_length:\n flattened_text_vec = flattened_text_vec[-truncate_length:]\n flattened_text_vec = torch.tensor(flattened_text_vec, dtype=torch.long)\n if self.is_bart:\n flattened_text_vec = torch.cat([torch.tensor([self.start_idx],\n dtype=torch.long), flattened_text_vec, torch.tensor([self.\n end_idx], dtype=torch.long)], dim=0)\n batch_text_vec = torch.unsqueeze(flattened_text_vec, dim=0)\n encoder_states = self.encoder(batch_text_vec)\n generations = self._get_initial_decoder_input(batch_text_vec)\n seen_end = torch.zeros(batch_text_vec.size(0), device=\n batch_text_vec.device, dtype=torch.bool)\n incr_state: Dict[str, torch.Tensor] = {}\n for token_idx in range(max_len):\n if token_idx == 0:\n latent, incr_state = self.decoder_first_pass(generations,\n encoder_states)\n else:\n latent, incr_state = self.decoder_later_pass(generations,\n encoder_states, incr_state)\n logits = self.partially_traced_model.output(latent[:, -1:, :])\n _, preds = logits.max(dim=2)\n incr_state = (self.partially_traced_model.\n reorder_decoder_incremental_state(incr_state, torch.tensor(\n [0], dtype=torch.long, device=batch_text_vec.device)))\n seen_end = seen_end + (preds == self.end_idx).squeeze(1)\n generations = torch.cat([generations, preds], dim=1)\n if torch.all(seen_end):\n break\n if self.is_bart:\n assert generations[0, 0].item() == self.end_idx\n generations = generations[:, 1:]\n generation_tokens: List[int] = generations[0].tolist()\n label = self._v2t(generation_tokens)\n return label\n\n\nclass BaseIncrStateFlattener(nn.Module):\n \"\"\"\n Flatten/unflatten the incremental state for use with TorchScripting.\n\n Typically, the incremental state will be stored as a Dict[int, Dict[str, Dict[str,\n torch.Tensor]]], where the 3 dictionary levels map decoder layer, attention type,\n and previous key/value/mask, respectively. However, TorchScript expects dicts to be\n of type Dict[str, torch.Tensor], and thus all input incremental states when\n TorchScripting will have to be of that type. We thus unflatten the input incremental\n state, already of type Dict[str, torch.Tensor], to pass it into whatever method\n needs it, and we flatten it again after the updated incremental state is passed back\n out.\n\n This is a base class that provides methods for flattening/unflattening: subclasses\n will call these methods as the incremental state is passed into and out of their own\n methods.\n \"\"\"\n\n def __init__(self, module: nn.Module):\n super().__init__()\n self.module = module\n\n def _unflatten_incr_state(self, flat_incr_state: Dict[str, torch.Tensor]\n ) ->Dict[int, Dict[str, Dict[str, torch.Tensor]]]:\n \"\"\"\n Unflatten the input incremental state.\n\n For instance, flat_incr_state['layer_0__self_attn__prev_key'] will be stored in\n structured_incr_state[0]['self_attn']['prev_key'].\n \"\"\"\n structured_incr_state = defaultdict(lambda : defaultdict(dict))\n for key, state in flat_incr_state.items():\n layer_idx_str, attn_type, state_type = key.split('__')\n structured_incr_state[int(layer_idx_str)][attn_type][state_type\n ] = state\n return dict({k: dict(v) for k, v in structured_incr_state.items()})\n\n def _flatten_incr_state(self, structured_incr_state: Dict[int, Dict[str,\n Dict[str, torch.Tensor]]]) ->Dict[str, torch.Tensor]:\n \"\"\"\n Flatten the input incremental state.\n\n For instance, structured_incr_state[0]['self_attn']['prev_key'] will be stored\n in flat_incr_state['layer_0__self_attn__prev_key'].\n \"\"\"\n flat_incr_state = {}\n for layer_idx, dict1 in structured_incr_state.items():\n for attn_type, dict2 in dict1.items():\n for state_type, state in dict2.items():\n key = f'{layer_idx:d}__{attn_type}__{state_type}'\n flat_incr_state[key] = state\n return flat_incr_state\n\n\nclass DecoderIncrStateFlattener(BaseIncrStateFlattener):\n \"\"\"\n Wrapper for a TransformerDecoder that will unflatten/flatten the incremental state.\n\n Unflattening/flattening will occur before passing the incremental state into and out\n of .forward().\n \"\"\"\n\n def forward(self, input_: torch.LongTensor, encoder_state: Tuple[torch.\n Tensor, torch.Tensor], flat_incr_state: Optional[Dict[str, torch.\n Tensor]]=None) ->Tuple[torch.Tensor, Dict[str, torch.Tensor]]:\n if flat_incr_state is not None:\n structured_incr_state = self._unflatten_incr_state(flat_incr_state)\n else:\n structured_incr_state = None\n tensor, new_structured_incr_state = self.module.forward(input=\n input_, encoder_state=encoder_state, incr_state=\n structured_incr_state)\n new_flat_incr_state = self._flatten_incr_state(\n new_structured_incr_state)\n return tensor, new_flat_incr_state\n\n\nclass ModelIncrStateFlattener(BaseIncrStateFlattener):\n \"\"\"\n Wrapper for a TransformerGeneratorModel to unflatten/flatten the incremental state.\n\n Unflattening/flattening will occur before passing the incremental state into and out\n of .reorder_decoder_incremental_state(). We also support .output(), which is also\n traced.\n \"\"\"\n\n def reorder_decoder_incremental_state(self, flat_incr_state: Dict[str,\n torch.Tensor], inds: torch.Tensor) ->Dict[str, torch.Tensor]:\n structured_incr_state = self._unflatten_incr_state(flat_incr_state)\n new_structured_incr_state = (self.module.\n reorder_decoder_incremental_state(incremental_state=\n structured_incr_state, inds=inds))\n return self._flatten_incr_state(new_structured_incr_state)\n\n def output(self, tensor: torch.Tensor) ->torch.Tensor:\n return self.module.output(tensor)\n\n\[email protected]\nclass ScriptableGpt2BpeHelper(object):\n \"\"\"\n Version of parlai.utils.bpe.Gpt2BpeHelper that can be TorchScripted.\n \"\"\"\n\n @classmethod\n def findall(cls, text: str) ->List[str]:\n \"\"\"\n Split tokens in a manner that replicates parlai.utils.bpe.Gpt2BpeHelper.\n \"\"\"\n contraction_endings = ['s', 't', 're', 've', 'm', 'll', 'd']\n tokens: List[str] = []\n idx = 0\n num_passes = 0\n while idx < len(text):\n num_passes += 1\n if num_passes > 10000:\n return [\n '*** Infinite loop in ScriptableGpt2BpeHelper.findall()! ***'\n ]\n if text[idx] == \"'\":\n captured_suffix = False\n for ending in contraction_endings:\n if text[idx + 1:idx + 1 + len(ending)] == ending:\n tokens.append(\"'\" + ending)\n idx += 1 + len(ending)\n captured_suffix = True\n break\n if captured_suffix:\n continue\n if not text[idx].isspace() or text[idx] == ' ' and idx + 1 < len(\n text) and not text[idx + 1].isspace():\n if text[idx] == ' ':\n last_matching_idx = idx + 1\n else:\n last_matching_idx = idx\n if text[last_matching_idx].isalpha():\n while last_matching_idx + 1 < len(text) and text[\n last_matching_idx + 1].isalpha():\n last_matching_idx += 1\n elif text[last_matching_idx].isnumeric():\n while last_matching_idx + 1 < len(text) and text[\n last_matching_idx + 1].isnumeric():\n last_matching_idx += 1\n else:\n while last_matching_idx + 1 < len(text) and not text[\n last_matching_idx + 1].isspace() and not text[\n last_matching_idx + 1].isalpha() and not text[\n last_matching_idx + 1].isnumeric():\n last_matching_idx += 1\n tokens.append(text[idx:last_matching_idx + 1])\n idx = last_matching_idx + 1\n continue\n if idx + 1 < len(text) and text[idx + 1].isspace():\n last_space_idx = idx + 1\n while last_space_idx + 1 < len(text) and text[\n last_space_idx + 1].isspace():\n last_space_idx += 1\n if last_space_idx + 1 == len(text):\n tokens.append(text[idx:last_space_idx + 1])\n idx = last_space_idx + 1\n else:\n tokens.append(text[idx:last_space_idx])\n idx = last_space_idx\n continue\n if True:\n last_space_idx = idx\n while last_space_idx + 1 < len(text) and text[\n last_space_idx + 1].isspace():\n last_space_idx += 1\n tokens.append(text[idx:last_space_idx + 1])\n idx = last_space_idx + 1\n return tokens\n\n def __init__(self, add_prefix_space: bool, encoder: Dict[str, str],\n byte_encoder: Dict[int, str], fused_key_bpe_ranks: Dict[str, float],\n special_tokens: List[str]):\n self.add_prefix_space = add_prefix_space\n self.encoder = encoder\n self.decoder: Dict[str, str] = {}\n for k, v in self.encoder.items():\n self.decoder[v] = k\n self.byte_encoder = byte_encoder\n self.byte_decoder: Dict[str, int] = {}\n for k, v in self.byte_encoder.items():\n self.byte_decoder[v] = k\n self.bpe_ranks = fused_key_bpe_ranks\n self._special_tokens: Dict[str, int] = {}\n for st in special_tokens:\n self._special_tokens[st] = 1\n\n def encode(self, text: str) ->List[str]:\n \"\"\"\n Tokenize text.\n\n Checks for add_prefix_space; handles accordingly.\n\n :param text:\n text to tokenize\n\n :return tokens:\n A list of tokens\n \"\"\"\n if self.add_prefix_space:\n text = f' {text}'\n FINAL = 1\n SPLITABLE = 0\n pieces: List[Tuple[str, int]] = [(text, SPLITABLE)]\n for special_token in self._special_tokens.keys():\n i = 0\n while i < len(pieces):\n subtext, status = pieces[i]\n if status == FINAL:\n i += 1\n continue\n split = subtext.split(special_token)\n if len(split) > 1:\n pieces.pop(i)\n for j, piece in enumerate(split):\n if j > 0:\n pieces.insert(i + j, (special_token, FINAL))\n pieces.insert(i + j + int(j > 0), (piece, SPLITABLE))\n else:\n i += 1\n output: List[str] = []\n for piece, state in pieces:\n if state is FINAL:\n output.append(piece)\n else:\n output += self.helper_encode(piece)\n text = ''.join(output)\n return output\n\n def get_pairs(self, word: List[str]) ->List[Tuple[str, str]]:\n \"\"\"\n Return set of symbol pairs in a word.\n\n Word is represented as list of symbols (symbols being variable-length strings).\n\n :param word:\n word to symbolize\n\n :return pairs:\n set of tuples of symbols\n \"\"\"\n pairs: List[Tuple[str, str]] = []\n prev_char = word[0]\n for char in word[1:]:\n pairs.append((prev_char, char))\n prev_char = char\n return pairs\n\n def bpe(self, word: List[str]) ->List[str]:\n \"\"\"\n Convert token to BPE.\n\n :param word:\n list of tokens token to convert\n\n :return bpe_encoding:\n string bpe encoding\n \"\"\"\n pairs = self.get_pairs(word)\n if len(pairs) == 0:\n return word\n while True:\n min_rank = self.bpe_ranks.get('\\n'.join(pairs[0]), float('inf'))\n bigram = pairs[0]\n for pair in pairs[1:]:\n current_rank = self.bpe_ranks.get('\\n'.join(pair), float('inf')\n )\n if current_rank < min_rank:\n min_rank = current_rank\n bigram = pair\n if '\\n'.join(bigram) not in self.bpe_ranks:\n break\n first, second = bigram\n new_word: List[str] = []\n i = 0\n while i < len(word):\n found = False\n for j in range(i, len(word)):\n if word[j] == first:\n new_word.extend(word[i:j])\n i = j\n found = True\n break\n if not found:\n new_word.extend(word[i:])\n break\n if word[i] == first and i < len(word) - 1 and word[i + 1\n ] == second:\n new_word.append(first + second)\n i += 2\n else:\n new_word.append(word[i])\n i += 1\n word = new_word.copy()\n if len(word) == 1:\n break\n else:\n pairs = self.get_pairs(word)\n return word\n\n def helper_encode(self, text: str) ->List[str]:\n \"\"\"\n Tokenize text.\n\n :param text:\n text to tokenize\n\n :return tokens:\n A list of tokens\n \"\"\"\n bpe_tokens: List[str] = []\n for token in self.findall(text):\n byte_encoded: List[str] = []\n for b in token:\n byte_encoded.append(self.byte_encoder[ord(b)])\n encoded: List[str] = []\n for bpe_token in self.bpe(byte_encoded):\n encoded.append(self.encoder[bpe_token])\n bpe_tokens.extend(encoded)\n return bpe_tokens\n\n def decode(self, tokens: List[str]) ->str:\n \"\"\"\n Decode list of tokens into a text string.\n\n :param tokens:\n list of tokens\n\n :return text:\n decoded text\n \"\"\"\n output: List[str] = []\n accum: List[str] = []\n for token in tokens:\n if token in self._special_tokens:\n if len(accum) > 0:\n output.append(self.helper_decode(accum))\n accum.clear()\n output.append(token)\n else:\n accum.append(token)\n if len(accum) > 0:\n output.append(self.helper_decode(accum))\n text = ''.join(output)\n if self.add_prefix_space:\n assert text.startswith(' ')\n text = text.lstrip(' ')\n return text\n\n def helper_decode(self, tokens: List[str]) ->str:\n \"\"\"\n Decode list of tokens into text string.\n\n :param tokens:\n list of tokens\n\n :return:\n decoded text\n \"\"\"\n chars: List[str] = []\n for token in tokens:\n decoded_token = self.decoder[token]\n token_chars = self.utf8_chars(decoded_token)\n for char in token_chars:\n if not torch.jit.is_scripting():\n chars.extend(list(char))\n else:\n chars.append(char)\n decoded_chars: List[str] = []\n for char in chars:\n decoded_chars.append(chr(self.byte_decoder[char]))\n return ''.join(decoded_chars)\n\n def utf8_chars(self, s: str) ->List[str]:\n \"\"\"\n An implementation of UTF8 character iteration in TorchScript. There are no\n bitwise operations in torchscript, so we compare directly to integer values.\n There isn't a lot of validation, for instance if you pass in an improperly\n encoded string with an out-of-place continuation byte, or with a non-left-to-\n right byte order, you'll get unexpected results and likely throw. Torch itself\n takes in unicode strings and encodes them as UTF8, so that should be actively\n hard to do.\n\n The logic is simple: looking at the current start-of-character byte.\n If its high bit is 0, it's a 1-byte character. Otherwise, the number of\n bytes is the number of leading 1s in its binary representation, so\n find that number by comparing it directly to ints with the appropriate\n representation, then append that many bytes as a character and move past\n them to the next start byte.\n\n From pytext.torchscript.utils.\n \"\"\"\n chars: List[str] = []\n i = 0\n while i < len(s):\n byte = ord(s[i])\n if byte < 128:\n chars.append(s[i])\n i += 1\n else:\n if byte < 224:\n num_bytes = 2\n elif byte < 240:\n num_bytes = 3\n elif byte < 248:\n num_bytes = 4\n elif byte < 252:\n num_bytes = 5\n elif byte < 254:\n num_bytes = 6\n elif byte < 255:\n num_bytes = 7\n else:\n num_bytes = 8\n chars.append(s[i:i + num_bytes])\n i += num_bytes\n return chars\n\n\[email protected]\nclass ScriptableDictionaryAgent:\n \"\"\"\n Builds and/or loads a dictionary.\n\n All code is TorchScriptable.\n \"\"\"\n\n def __init__(self, null_token: str, end_token: str, unk_token: str,\n start_token: str, freq: Dict[str, int], tok2ind: Dict[str, int],\n ind2tok: Dict[int, str], bpe_add_prefix_space: bool, bpe_encoder:\n Dict[str, str], bpe_byte_encoder: Dict[int, str],\n fused_key_bpe_ranks: Dict[str, float], special_tokens: List[str]):\n self.null_token = null_token\n self.end_token = end_token\n self.unk_token = unk_token\n self.start_token = start_token\n self.freq = freq\n self.tok2ind = tok2ind\n self.ind2tok = ind2tok\n self._unk_token_idx = self.tok2ind[self.unk_token]\n self.bpe = ScriptableGpt2BpeHelper(add_prefix_space=\n bpe_add_prefix_space, encoder=bpe_encoder, byte_encoder=\n bpe_byte_encoder, fused_key_bpe_ranks=fused_key_bpe_ranks,\n special_tokens=special_tokens)\n\n def _word_lookup(self, key: str) ->int:\n \"\"\"\n Return index from token, or unk_token's index, or None.\n \"\"\"\n if key in self.tok2ind:\n return self.tok2ind[key]\n else:\n return self._unk_token_idx\n\n def _index_lookup(self, key: int) ->str:\n \"\"\"\n Return token from index, or unk_token.\n \"\"\"\n if key in self.ind2tok:\n return self.ind2tok[key]\n else:\n return self.unk_token\n\n def gpt2_tokenize(self, text: str):\n \"\"\"\n Tokenize using Gpt2 BPE tokenizer.\n \"\"\"\n return self.bpe_tokenize(text)\n\n def tokenize(self, text: str) ->List[str]:\n \"\"\"\n Return a sequence of tokens from the iterable.\n\n Also handles special tokens for some tokenizers\n \"\"\"\n word_tokens = self.gpt2_tokenize(text)\n return word_tokens\n\n def bpe_tokenize(self, text: str) ->List[str]:\n \"\"\"\n Return a sequence of BPE-tokens from the text.\n \"\"\"\n return self.bpe.encode(text)\n\n def txt2vec(self, text: str) ->List[int]:\n \"\"\"\n Convert a string to a vector (list of ints).\n\n First runs a sentence tokenizer, then a word tokenizer.\n \"\"\"\n itr: List[int] = []\n for token in self.tokenize(str(text)):\n itr.append(self._word_lookup(token))\n return itr\n\n def vec2txt(self, vector: List[int]) ->str:\n \"\"\"\n Convert a vector of IDs to a string.\n\n Converts a vector (iterable of ints) into a string, with each token separated by\n the delimiter (default ``' '``).\n \"\"\"\n tokens = [self._index_lookup(idx) for idx in vector]\n text = self.bpe.decode(tokens)\n return text\n", "step-5": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom collections import defaultdict\nfrom typing import List, Dict, Optional, Tuple\n\nimport torch.jit\nfrom torch import nn as nn\n\nfrom parlai.core.dict import DictionaryAgent\nfrom parlai.core.torch_agent import TorchAgent\nfrom parlai.utils.bpe import Gpt2BpeHelper\n\n\nclass TorchScriptGreedySearch(nn.Module):\n \"\"\"\n A helper class for exporting simple greedy-search models via TorchScript.\n\n Models with extra inputs will need to override to include more variables.\n \"\"\"\n\n # We currently only support these specific dictionary settings\n CAIRAOKE_DICT_PARAMS = {\n \"dict_class\": \"parlai.core.dict:DictionaryAgent\",\n \"dict_initpath\": None,\n \"dict_language\": \"english\",\n \"dict_max_ngram_size\": -1,\n \"dict_minfreq\": 0,\n \"dict_maxtokens\": -1,\n \"dict_tokenizer\": \"gpt2\",\n \"dict_lower\": False,\n \"dict_textfields\": \"text,labels\",\n \"dict_loaded\": True,\n 'bpe_debug': False,\n }\n\n def __init__(self, agent: TorchAgent):\n super().__init__()\n\n self.is_bart = agent.opt['model'] == 'bart'\n\n # Dictionary/tokenization setup\n for key, val in self.CAIRAOKE_DICT_PARAMS.items():\n assert (\n agent.opt.get(key, val) == val\n ), f'The only currently supported value of \"{key}\" is {val}!'\n orig_dict: DictionaryAgent = agent.dict\n orig_bpe: Gpt2BpeHelper = orig_dict.bpe\n assert all(len(key) == 2 for key in orig_bpe.bpe_ranks.keys())\n assert not any(\n i for key in orig_bpe.bpe_ranks.keys() for i in key if '\\n' in i\n ), \"We need to temporarily merge the bpe_ranks dict's keys with a newline character in order to use it as a TorchScript arg, but at least one of the dict's keys contains a newline character already!\"\n fused_key_bpe_ranks = {\n '\\n'.join(key): float(val) for key, val in orig_bpe.bpe_ranks.items()\n }\n # Cast the values as floats to be able to compare to float('inf') when doing BPE\n # splitting\n self.dict = ScriptableDictionaryAgent(\n null_token=orig_dict.null_token,\n end_token=orig_dict.end_token,\n unk_token=orig_dict.unk_token,\n start_token=orig_dict.start_token,\n freq=orig_dict.freq,\n tok2ind=orig_dict.tok2ind,\n ind2tok=orig_dict.ind2tok,\n bpe_add_prefix_space=agent.opt['bpe_add_prefix_space'],\n bpe_encoder=orig_bpe.encoder,\n bpe_byte_encoder=orig_bpe.byte_encoder,\n fused_key_bpe_ranks=fused_key_bpe_ranks,\n special_tokens=agent._get_special_tokens(),\n )\n\n # History tracking and start/end tokens\n self.delimiter_tok = agent.history.delimiter_tok\n self.history_size = agent.opt['history_size']\n if agent.opt.get('history_add_global_end_token', None) is not None:\n self.global_end_token = agent.dict[agent.dict.end_token]\n else:\n self.global_end_token = None\n self.text_truncate = agent.opt.get('text_truncate') or agent.opt['truncate']\n self.text_truncate = self.text_truncate if self.text_truncate >= 0 else None\n\n self.start_idx = agent.model.START_IDX\n self.end_idx = agent.model.END_IDX\n self.null_idx = agent.model.NULL_IDX\n if self.is_bart:\n self.initial_decoder_input = [self.end_idx, self.start_idx]\n else:\n self.initial_decoder_input = [self.start_idx]\n\n agent.model.eval()\n\n # Create versions of the model and decoder that will flatten the incremental\n # state dict, as required by TorchScript\n wrapped_decoder = DecoderIncrStateFlattener(agent.model.decoder)\n wrapped_model = ModelIncrStateFlattener(agent.model)\n\n # Create sample inputs for tracing\n sample_tokens = torch.tensor([[1, 2, 3, 4, 5]], dtype=torch.long)\n encoder_states = agent.model.encoder(sample_tokens)\n initial_generations = self._get_initial_decoder_input(sample_tokens)\n latent, initial_incr_state = wrapped_decoder(\n initial_generations, encoder_states\n )\n logits = agent.model.output(latent[:, -1:, :])\n _, preds = logits.max(dim=2)\n incr_state = {k: torch.clone(v) for k, v in initial_incr_state.items()}\n # Copy the initial incremental state, used when tracing the\n # .reorder_decoder_incremental_state() method below, to avoid having it be\n # mutated by the following line\n incr_state = wrapped_model.reorder_decoder_incremental_state(\n incr_state, torch.tensor([0], dtype=torch.long, device=sample_tokens.device)\n )\n generations = torch.cat([initial_generations, preds], dim=1)\n\n # Do tracing\n self.encoder = torch.jit.trace(agent.model.encoder, sample_tokens)\n self.decoder_first_pass = torch.jit.trace(\n wrapped_decoder, (initial_generations, encoder_states), strict=False\n )\n # We do strict=False to avoid an error when passing a Dict out of\n # decoder.forward()\n self.partially_traced_model = torch.jit.trace_module(\n wrapped_model,\n {\n 'output': (latent[:, -1:, :]),\n 'reorder_decoder_incremental_state': (\n initial_incr_state,\n torch.tensor([0], dtype=torch.long, device=sample_tokens.device),\n ),\n },\n strict=False,\n )\n self.decoder_later_pass = torch.jit.trace(\n wrapped_decoder, (generations, encoder_states, incr_state), strict=False\n )\n\n def _get_initial_decoder_input(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Workaround because we can't use TGM._get_initial_decoder_input() directly.\n\n When we try to call that function, we get a \"RuntimeError: Type 'Tuple[int,\n int]' cannot be traced. Only Tensors and (possibly nested) Lists, Dicts, and\n Tuples of Tensors can be traced\" error.\n \"\"\"\n bsz = x.size(0)\n return (\n torch.tensor(self.initial_decoder_input, dtype=torch.long)\n .expand(bsz, len(self.initial_decoder_input))\n .to(x.device)\n )\n\n def parse(self, text: str) -> List[int]:\n return self.dict.txt2vec(text)\n\n def _v2t(self, vec: List[int]) -> str:\n \"\"\"\n Convert token indices to string of tokens.\n \"\"\"\n new_vec: List[int] = []\n for i in vec:\n if i == self.end_idx:\n break\n elif i != self.start_idx:\n new_vec.append(i)\n return self.dict.vec2txt(new_vec)\n\n def forward(self, context: str, max_len: int = 128) -> str:\n\n # Vectorize all lines of context\n history_vecs: List[List[int]] = []\n context_lines = context.split('\\n')\n if self.history_size > 0:\n context_lines = context_lines[-self.history_size :]\n for line in context_lines:\n history_vecs.append(self.parse(line))\n\n # Get full history vec\n text_vecs: List[List[int]] = []\n for vec in history_vecs[:-1]:\n text_vecs += [vec]\n text_vecs += [self.delimiter_tok]\n text_vecs += [history_vecs[-1]]\n if self.global_end_token is not None:\n text_vecs += [[self.global_end_token]]\n\n # Flatten text_vecs\n flattened_text_vec: List[int] = []\n for vec in text_vecs:\n for token in vec:\n flattened_text_vec.append(token)\n\n # Format history vec given various logic\n if self.text_truncate is not None:\n if self.is_bart:\n truncate_length = self.text_truncate - 2 # Start and end tokens\n else:\n truncate_length = self.text_truncate\n if len(flattened_text_vec) > truncate_length:\n flattened_text_vec = flattened_text_vec[-truncate_length:]\n flattened_text_vec = torch.tensor(flattened_text_vec, dtype=torch.long)\n if self.is_bart:\n flattened_text_vec = torch.cat(\n [\n torch.tensor([self.start_idx], dtype=torch.long),\n flattened_text_vec,\n torch.tensor([self.end_idx], dtype=torch.long),\n ],\n dim=0,\n )\n\n # Pass through the encoder and decoder to generate tokens\n batch_text_vec = torch.unsqueeze(flattened_text_vec, dim=0) # Add batch dim\n encoder_states = self.encoder(batch_text_vec)\n generations = self._get_initial_decoder_input(batch_text_vec)\n # keep track of early stopping if all generations finish\n seen_end = torch.zeros(\n batch_text_vec.size(0), device=batch_text_vec.device, dtype=torch.bool\n )\n incr_state: Dict[str, torch.Tensor] = {}\n for token_idx in range(max_len):\n if token_idx == 0:\n latent, incr_state = self.decoder_first_pass(\n generations, encoder_states\n )\n else:\n latent, incr_state = self.decoder_later_pass(\n generations, encoder_states, incr_state\n )\n logits = self.partially_traced_model.output(latent[:, -1:, :])\n _, preds = logits.max(dim=2)\n incr_state = self.partially_traced_model.reorder_decoder_incremental_state(\n incr_state,\n torch.tensor([0], dtype=torch.long, device=batch_text_vec.device),\n )\n seen_end = seen_end + (preds == self.end_idx).squeeze(1)\n generations = torch.cat([generations, preds], dim=1)\n if torch.all(seen_end):\n break\n\n # Get the label from the generated tokens and update the history\n if self.is_bart:\n assert generations[0, 0].item() == self.end_idx\n generations = generations[:, 1:]\n # Hack: remove initial end token. I haven't found in the code where this is\n # done, but it seems to happen early on during generation\n generation_tokens: List[int] = generations[0].tolist()\n label = self._v2t(generation_tokens)\n\n return label\n\n\nclass BaseIncrStateFlattener(nn.Module):\n \"\"\"\n Flatten/unflatten the incremental state for use with TorchScripting.\n\n Typically, the incremental state will be stored as a Dict[int, Dict[str, Dict[str,\n torch.Tensor]]], where the 3 dictionary levels map decoder layer, attention type,\n and previous key/value/mask, respectively. However, TorchScript expects dicts to be\n of type Dict[str, torch.Tensor], and thus all input incremental states when\n TorchScripting will have to be of that type. We thus unflatten the input incremental\n state, already of type Dict[str, torch.Tensor], to pass it into whatever method\n needs it, and we flatten it again after the updated incremental state is passed back\n out.\n\n This is a base class that provides methods for flattening/unflattening: subclasses\n will call these methods as the incremental state is passed into and out of their own\n methods.\n \"\"\"\n\n def __init__(self, module: nn.Module):\n super().__init__()\n self.module = module\n\n def _unflatten_incr_state(\n self, flat_incr_state: Dict[str, torch.Tensor]\n ) -> Dict[int, Dict[str, Dict[str, torch.Tensor]]]:\n \"\"\"\n Unflatten the input incremental state.\n\n For instance, flat_incr_state['layer_0__self_attn__prev_key'] will be stored in\n structured_incr_state[0]['self_attn']['prev_key'].\n \"\"\"\n structured_incr_state = defaultdict(lambda: defaultdict(dict))\n for key, state in flat_incr_state.items():\n layer_idx_str, attn_type, state_type = key.split('__')\n structured_incr_state[int(layer_idx_str)][attn_type][state_type] = state\n return dict({k: dict(v) for k, v in structured_incr_state.items()})\n # Turn the nested defaultdicts back into regular dicts\n\n def _flatten_incr_state(\n self, structured_incr_state: Dict[int, Dict[str, Dict[str, torch.Tensor]]]\n ) -> Dict[str, torch.Tensor]:\n \"\"\"\n Flatten the input incremental state.\n\n For instance, structured_incr_state[0]['self_attn']['prev_key'] will be stored\n in flat_incr_state['layer_0__self_attn__prev_key'].\n \"\"\"\n flat_incr_state = {}\n for layer_idx, dict1 in structured_incr_state.items():\n for attn_type, dict2 in dict1.items():\n for state_type, state in dict2.items():\n key = f'{layer_idx:d}__{attn_type}__{state_type}'\n flat_incr_state[key] = state\n return flat_incr_state\n\n\nclass DecoderIncrStateFlattener(BaseIncrStateFlattener):\n \"\"\"\n Wrapper for a TransformerDecoder that will unflatten/flatten the incremental state.\n\n Unflattening/flattening will occur before passing the incremental state into and out\n of .forward().\n \"\"\"\n\n def forward(\n self,\n input_: torch.LongTensor,\n encoder_state: Tuple[torch.Tensor, torch.Tensor],\n flat_incr_state: Optional[Dict[str, torch.Tensor]] = None,\n ) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:\n if flat_incr_state is not None:\n structured_incr_state = self._unflatten_incr_state(flat_incr_state)\n else:\n structured_incr_state = None\n tensor, new_structured_incr_state = self.module.forward(\n input=input_, encoder_state=encoder_state, incr_state=structured_incr_state\n )\n new_flat_incr_state = self._flatten_incr_state(new_structured_incr_state)\n return tensor, new_flat_incr_state\n\n\nclass ModelIncrStateFlattener(BaseIncrStateFlattener):\n \"\"\"\n Wrapper for a TransformerGeneratorModel to unflatten/flatten the incremental state.\n\n Unflattening/flattening will occur before passing the incremental state into and out\n of .reorder_decoder_incremental_state(). We also support .output(), which is also\n traced.\n \"\"\"\n\n def reorder_decoder_incremental_state(\n self, flat_incr_state: Dict[str, torch.Tensor], inds: torch.Tensor\n ) -> Dict[str, torch.Tensor]:\n structured_incr_state = self._unflatten_incr_state(flat_incr_state)\n new_structured_incr_state = self.module.reorder_decoder_incremental_state(\n incremental_state=structured_incr_state, inds=inds\n )\n return self._flatten_incr_state(new_structured_incr_state)\n\n def output(self, tensor: torch.Tensor) -> torch.Tensor:\n return self.module.output(tensor)\n\n\[email protected]\nclass ScriptableGpt2BpeHelper(object):\n \"\"\"\n Version of parlai.utils.bpe.Gpt2BpeHelper that can be TorchScripted.\n \"\"\"\n\n @classmethod\n def findall(cls, text: str) -> List[str]:\n \"\"\"\n Split tokens in a manner that replicates parlai.utils.bpe.Gpt2BpeHelper.\n \"\"\"\n contraction_endings = ['s', 't', 're', 've', 'm', 'll', 'd']\n\n tokens: List[str] = []\n idx = 0\n num_passes = 0\n while idx < len(text):\n num_passes += 1\n if num_passes > 10000:\n return ['*** Infinite loop in ScriptableGpt2BpeHelper.findall()! ***']\n if text[idx] == \"'\":\n # Capture contradiction suffixes\n captured_suffix = False\n for ending in contraction_endings:\n if text[idx + 1 : idx + 1 + len(ending)] == ending:\n tokens.append(\"'\" + ending)\n idx += 1 + len(ending)\n captured_suffix = True\n break\n if captured_suffix:\n continue\n if not text[idx].isspace() or (\n text[idx] == ' ' and idx + 1 < len(text) and not text[idx + 1].isspace()\n ):\n # Capture runs of one type of character\n if text[idx] == ' ':\n last_matching_idx = idx + 1\n else:\n last_matching_idx = idx\n if text[last_matching_idx].isalpha():\n while (\n last_matching_idx + 1 < len(text)\n and text[last_matching_idx + 1].isalpha()\n ):\n last_matching_idx += 1\n elif text[last_matching_idx].isnumeric():\n while (\n last_matching_idx + 1 < len(text)\n and text[last_matching_idx + 1].isnumeric()\n ):\n last_matching_idx += 1\n else:\n while (\n last_matching_idx + 1 < len(text)\n and not text[last_matching_idx + 1].isspace()\n and not text[last_matching_idx + 1].isalpha()\n and not text[last_matching_idx + 1].isnumeric()\n ):\n last_matching_idx += 1\n tokens.append(text[idx : last_matching_idx + 1])\n idx = last_matching_idx + 1\n continue\n if idx + 1 < len(text) and text[idx + 1].isspace():\n # Capture runs of space characters up until just before the final one\n last_space_idx = idx + 1\n while (\n last_space_idx + 1 < len(text)\n and text[last_space_idx + 1].isspace()\n ):\n last_space_idx += 1\n if last_space_idx + 1 == len(text):\n # Include the last char, which is a space char\n tokens.append(text[idx : last_space_idx + 1])\n idx = last_space_idx + 1\n else:\n tokens.append(text[idx:last_space_idx])\n idx = last_space_idx\n continue\n if True:\n # Capture runs of space characters\n last_space_idx = idx\n while (\n last_space_idx + 1 < len(text)\n and text[last_space_idx + 1].isspace()\n ):\n last_space_idx += 1\n tokens.append(text[idx : last_space_idx + 1])\n idx = last_space_idx + 1\n return tokens\n\n def __init__(\n self,\n add_prefix_space: bool,\n encoder: Dict[str, str],\n byte_encoder: Dict[int, str],\n fused_key_bpe_ranks: Dict[str, float],\n special_tokens: List[str],\n ):\n\n self.add_prefix_space = add_prefix_space\n\n self.encoder = encoder\n self.decoder: Dict[str, str] = {}\n for k, v in self.encoder.items():\n self.decoder[v] = k\n\n self.byte_encoder = byte_encoder\n self.byte_decoder: Dict[str, int] = {}\n for k, v in self.byte_encoder.items():\n self.byte_decoder[v] = k\n\n self.bpe_ranks = fused_key_bpe_ranks\n\n # special tokens\n self._special_tokens: Dict[str, int] = {}\n for st in special_tokens:\n self._special_tokens[st] = 1\n\n def encode(self, text: str) -> List[str]:\n \"\"\"\n Tokenize text.\n\n Checks for add_prefix_space; handles accordingly.\n\n :param text:\n text to tokenize\n\n :return tokens:\n A list of tokens\n \"\"\"\n if self.add_prefix_space:\n text = f' {text}'\n\n # constants for readability\n FINAL = 1\n SPLITABLE = 0\n pieces: List[Tuple[str, int]] = [(text, SPLITABLE)]\n\n for special_token in self._special_tokens.keys():\n i = 0\n while i < len(pieces):\n subtext, status = pieces[i]\n if status == FINAL:\n i += 1\n continue\n split = subtext.split(special_token)\n if len(split) > 1:\n # special token detected, replace the chunk with small subchunks\n # split by the special token\n pieces.pop(i)\n for j, piece in enumerate(split):\n if j > 0:\n # add the special token as a delimiter\n pieces.insert(i + j, (special_token, FINAL))\n pieces.insert(i + j + int(j > 0), (piece, SPLITABLE))\n else:\n i += 1\n\n output: List[str] = []\n for piece, state in pieces:\n if state is FINAL:\n output.append(piece)\n else:\n output += self.helper_encode(piece)\n text = ''.join(output)\n\n return output\n\n def get_pairs(self, word: List[str]) -> List[Tuple[str, str]]:\n \"\"\"\n Return set of symbol pairs in a word.\n\n Word is represented as list of symbols (symbols being variable-length strings).\n\n :param word:\n word to symbolize\n\n :return pairs:\n set of tuples of symbols\n \"\"\"\n pairs: List[Tuple[str, str]] = []\n prev_char = word[0]\n for char in word[1:]:\n pairs.append((prev_char, char))\n prev_char = char\n return pairs\n\n def bpe(self, word: List[str]) -> List[str]:\n \"\"\"\n Convert token to BPE.\n\n :param word:\n list of tokens token to convert\n\n :return bpe_encoding:\n string bpe encoding\n \"\"\"\n pairs = self.get_pairs(word)\n\n if len(pairs) == 0:\n return word\n\n while True:\n min_rank = self.bpe_ranks.get('\\n'.join(pairs[0]), float('inf'))\n bigram = pairs[0]\n for pair in pairs[1:]:\n current_rank = self.bpe_ranks.get('\\n'.join(pair), float('inf'))\n if current_rank < min_rank:\n min_rank = current_rank\n bigram = pair\n if '\\n'.join(bigram) not in self.bpe_ranks:\n break\n first, second = bigram\n new_word: List[str] = []\n i = 0\n while i < len(word):\n found = False\n for j in range(i, len(word)):\n if word[j] == first:\n new_word.extend(word[i:j])\n i = j\n found = True\n break\n if not found:\n new_word.extend(word[i:])\n break\n\n if word[i] == first and i < len(word) - 1 and word[i + 1] == second:\n new_word.append(first + second)\n i += 2\n else:\n new_word.append(word[i])\n i += 1\n word = new_word.copy()\n if len(word) == 1:\n break\n else:\n pairs = self.get_pairs(word)\n return word\n\n def helper_encode(self, text: str) -> List[str]:\n \"\"\"\n Tokenize text.\n\n :param text:\n text to tokenize\n\n :return tokens:\n A list of tokens\n \"\"\"\n bpe_tokens: List[str] = []\n for token in self.findall(text):\n byte_encoded: List[str] = []\n for b in token:\n byte_encoded.append(self.byte_encoder[ord(b)])\n encoded: List[str] = []\n for bpe_token in self.bpe(byte_encoded):\n encoded.append(self.encoder[bpe_token])\n bpe_tokens.extend(encoded)\n return bpe_tokens\n\n def decode(self, tokens: List[str]) -> str:\n \"\"\"\n Decode list of tokens into a text string.\n\n :param tokens:\n list of tokens\n\n :return text:\n decoded text\n \"\"\"\n output: List[str] = []\n accum: List[str] = []\n for token in tokens:\n if token in self._special_tokens:\n if len(accum) > 0:\n output.append(self.helper_decode(accum))\n accum.clear()\n output.append(token)\n else:\n accum.append(token)\n if len(accum) > 0:\n output.append(self.helper_decode(accum))\n\n text = ''.join(output)\n if self.add_prefix_space:\n assert text.startswith(' ')\n text = text.lstrip(' ')\n return text\n\n def helper_decode(self, tokens: List[str]) -> str:\n \"\"\"\n Decode list of tokens into text string.\n\n :param tokens:\n list of tokens\n\n :return:\n decoded text\n \"\"\"\n chars: List[str] = []\n for token in tokens:\n decoded_token = self.decoder[token]\n token_chars = self.utf8_chars(decoded_token)\n for char in token_chars:\n if not torch.jit.is_scripting():\n # We iterate over \"char\", which is supposed to be a single\n # character, because the TorchScripted version of the code\n # correctly splits a string into single characters in\n # self.utf8_chars() but the non-TorchScripted version doesn't\n chars.extend(list(char))\n else:\n chars.append(char)\n decoded_chars: List[str] = []\n for char in chars:\n decoded_chars.append(chr(self.byte_decoder[char]))\n return ''.join(decoded_chars)\n\n def utf8_chars(self, s: str) -> List[str]:\n \"\"\"\n An implementation of UTF8 character iteration in TorchScript. There are no\n bitwise operations in torchscript, so we compare directly to integer values.\n There isn't a lot of validation, for instance if you pass in an improperly\n encoded string with an out-of-place continuation byte, or with a non-left-to-\n right byte order, you'll get unexpected results and likely throw. Torch itself\n takes in unicode strings and encodes them as UTF8, so that should be actively\n hard to do.\n\n The logic is simple: looking at the current start-of-character byte.\n If its high bit is 0, it's a 1-byte character. Otherwise, the number of\n bytes is the number of leading 1s in its binary representation, so\n find that number by comparing it directly to ints with the appropriate\n representation, then append that many bytes as a character and move past\n them to the next start byte.\n\n From pytext.torchscript.utils.\n \"\"\"\n chars: List[str] = []\n i = 0\n while i < len(s):\n byte = ord(s[i])\n if byte < 0b10000000:\n chars.append(s[i])\n i += 1\n else:\n if byte < 0b11100000:\n num_bytes = 2\n elif byte < 0b11110000:\n num_bytes = 3\n elif byte < 0b11111000:\n num_bytes = 4\n elif byte < 0b11111100:\n num_bytes = 5\n elif byte < 0b11111110:\n num_bytes = 6\n elif byte < 0b11111111:\n num_bytes = 7\n else:\n num_bytes = 8\n chars.append(s[i : i + num_bytes])\n i += num_bytes\n return chars\n\n\[email protected]\nclass ScriptableDictionaryAgent:\n \"\"\"\n Builds and/or loads a dictionary.\n\n All code is TorchScriptable.\n \"\"\"\n\n def __init__(\n self,\n null_token: str,\n end_token: str,\n unk_token: str,\n start_token: str,\n freq: Dict[str, int],\n tok2ind: Dict[str, int],\n ind2tok: Dict[int, str],\n bpe_add_prefix_space: bool,\n bpe_encoder: Dict[str, str],\n bpe_byte_encoder: Dict[int, str],\n fused_key_bpe_ranks: Dict[str, float],\n special_tokens: List[str],\n ):\n\n self.null_token = null_token\n self.end_token = end_token\n self.unk_token = unk_token\n self.start_token = start_token\n\n self.freq = freq\n self.tok2ind = tok2ind\n self.ind2tok = ind2tok\n\n # cache unk token for later\n self._unk_token_idx = self.tok2ind[self.unk_token]\n\n # Initialize tokenizer\n self.bpe = ScriptableGpt2BpeHelper(\n add_prefix_space=bpe_add_prefix_space,\n encoder=bpe_encoder,\n byte_encoder=bpe_byte_encoder,\n fused_key_bpe_ranks=fused_key_bpe_ranks,\n special_tokens=special_tokens,\n )\n\n def _word_lookup(self, key: str) -> int:\n \"\"\"\n Return index from token, or unk_token's index, or None.\n \"\"\"\n if key in self.tok2ind:\n return self.tok2ind[key]\n else:\n return self._unk_token_idx\n\n def _index_lookup(self, key: int) -> str:\n \"\"\"\n Return token from index, or unk_token.\n \"\"\"\n if key in self.ind2tok:\n return self.ind2tok[key]\n else:\n return self.unk_token\n\n def gpt2_tokenize(self, text: str):\n \"\"\"\n Tokenize using Gpt2 BPE tokenizer.\n \"\"\"\n return self.bpe_tokenize(text)\n\n def tokenize(self, text: str) -> List[str]:\n \"\"\"\n Return a sequence of tokens from the iterable.\n\n Also handles special tokens for some tokenizers\n \"\"\"\n\n # calls the selected tokenizer function e.g. 're' => re_tokenize(text)\n word_tokens = self.gpt2_tokenize(text)\n\n return word_tokens\n\n def bpe_tokenize(self, text: str) -> List[str]:\n \"\"\"\n Return a sequence of BPE-tokens from the text.\n \"\"\"\n return self.bpe.encode(text)\n\n def txt2vec(self, text: str) -> List[int]:\n \"\"\"\n Convert a string to a vector (list of ints).\n\n First runs a sentence tokenizer, then a word tokenizer.\n \"\"\"\n itr: List[int] = []\n for token in self.tokenize(str(text)):\n itr.append(self._word_lookup(token))\n return itr\n\n def vec2txt(self, vector: List[int]) -> str:\n \"\"\"\n Convert a vector of IDs to a string.\n\n Converts a vector (iterable of ints) into a string, with each token separated by\n the delimiter (default ``' '``).\n \"\"\"\n tokens = [self._index_lookup(idx) for idx in vector]\n text = self.bpe.decode(tokens)\n return text\n", "step-ids": [ 24, 31, 32, 36, 43 ] }
[ 24, 31, 32, 36, 43 ]
#C:\utils\Python\Python27\python.exe incompletosClean.py incompletos\inc.dat incompletos\out.dat import sys import os import os.path bfTmp = '' lsOutTmp = [] InFileName = [] lsHTMLName = [] fileNameIn= sys.argv[1] fileNameOu= sys.argv[2] fo = open(fileNameIn) InFileName += [x.replace('\n', '') for x in fo.readlines()] fo.close() for bfMatFile in InFileName: if os.path.isfile(bfMatFile): lsHTMLName = [] fo = open(bfMatFile) lsHTMLName += [x.replace('\n', '') for x in fo.readlines()] fo.close() bfRow = '' for rowHTML in lsHTMLName: iPosic = rowHTML.find('<td><p>') if iPosic > 0: bfRowPart = rowHTML[iPosic + len('<td><p>'):] bfRow += ((bfRowPart[:bfRowPart.index('</p></td>')] + ',').replace('&nbsp;', ',')).strip() if bfRow != '': lsOutTmp.append(bfRow[:len(bfRow)-1] + ';') bufferTmp = '\n' bufferTmp = bufferTmp.join(lsOutTmp) fo= open(fileNameOu, 'w') fo.write(bufferTmp) fo.close()
normal
{ "blob_id": "031727fa42b87260abb671518b2baeff1c9524f9", "index": 8913, "step-1": "<mask token>\n", "step-2": "<mask token>\nInFileName += [x.replace('\\n', '') for x in fo.readlines()]\nfo.close()\nfor bfMatFile in InFileName:\n if os.path.isfile(bfMatFile):\n lsHTMLName = []\n fo = open(bfMatFile)\n lsHTMLName += [x.replace('\\n', '') for x in fo.readlines()]\n fo.close()\n bfRow = ''\n for rowHTML in lsHTMLName:\n iPosic = rowHTML.find('<td><p>')\n if iPosic > 0:\n bfRowPart = rowHTML[iPosic + len('<td><p>'):]\n bfRow += (bfRowPart[:bfRowPart.index('</p></td>')] + ','\n ).replace('&nbsp;', ',').strip()\n if bfRow != '':\n lsOutTmp.append(bfRow[:len(bfRow) - 1] + ';')\n<mask token>\nfo.write(bufferTmp)\nfo.close()\n", "step-3": "<mask token>\nbfTmp = ''\nlsOutTmp = []\nInFileName = []\nlsHTMLName = []\nfileNameIn = sys.argv[1]\nfileNameOu = sys.argv[2]\nfo = open(fileNameIn)\nInFileName += [x.replace('\\n', '') for x in fo.readlines()]\nfo.close()\nfor bfMatFile in InFileName:\n if os.path.isfile(bfMatFile):\n lsHTMLName = []\n fo = open(bfMatFile)\n lsHTMLName += [x.replace('\\n', '') for x in fo.readlines()]\n fo.close()\n bfRow = ''\n for rowHTML in lsHTMLName:\n iPosic = rowHTML.find('<td><p>')\n if iPosic > 0:\n bfRowPart = rowHTML[iPosic + len('<td><p>'):]\n bfRow += (bfRowPart[:bfRowPart.index('</p></td>')] + ','\n ).replace('&nbsp;', ',').strip()\n if bfRow != '':\n lsOutTmp.append(bfRow[:len(bfRow) - 1] + ';')\nbufferTmp = '\\n'\nbufferTmp = bufferTmp.join(lsOutTmp)\nfo = open(fileNameOu, 'w')\nfo.write(bufferTmp)\nfo.close()\n", "step-4": "import sys\nimport os\nimport os.path\nbfTmp = ''\nlsOutTmp = []\nInFileName = []\nlsHTMLName = []\nfileNameIn = sys.argv[1]\nfileNameOu = sys.argv[2]\nfo = open(fileNameIn)\nInFileName += [x.replace('\\n', '') for x in fo.readlines()]\nfo.close()\nfor bfMatFile in InFileName:\n if os.path.isfile(bfMatFile):\n lsHTMLName = []\n fo = open(bfMatFile)\n lsHTMLName += [x.replace('\\n', '') for x in fo.readlines()]\n fo.close()\n bfRow = ''\n for rowHTML in lsHTMLName:\n iPosic = rowHTML.find('<td><p>')\n if iPosic > 0:\n bfRowPart = rowHTML[iPosic + len('<td><p>'):]\n bfRow += (bfRowPart[:bfRowPart.index('</p></td>')] + ','\n ).replace('&nbsp;', ',').strip()\n if bfRow != '':\n lsOutTmp.append(bfRow[:len(bfRow) - 1] + ';')\nbufferTmp = '\\n'\nbufferTmp = bufferTmp.join(lsOutTmp)\nfo = open(fileNameOu, 'w')\nfo.write(bufferTmp)\nfo.close()\n", "step-5": "#C:\\utils\\Python\\Python27\\python.exe incompletosClean.py incompletos\\inc.dat incompletos\\out.dat\r\n\r\nimport sys\r\nimport os\r\nimport os.path\r\n\r\nbfTmp = ''\r\nlsOutTmp = []\r\nInFileName = []\r\nlsHTMLName = []\r\n\r\nfileNameIn= sys.argv[1]\r\nfileNameOu= sys.argv[2]\r\n\r\nfo = open(fileNameIn)\r\nInFileName += [x.replace('\\n', '') for x in fo.readlines()]\r\nfo.close()\r\n\r\nfor bfMatFile in InFileName:\r\n if os.path.isfile(bfMatFile):\r\n lsHTMLName = []\r\n fo = open(bfMatFile)\r\n lsHTMLName += [x.replace('\\n', '') for x in fo.readlines()]\r\n fo.close()\r\n\r\n bfRow = ''\r\n for rowHTML in lsHTMLName:\r\n iPosic = rowHTML.find('<td><p>')\r\n if iPosic > 0:\r\n bfRowPart = rowHTML[iPosic + len('<td><p>'):]\r\n bfRow += ((bfRowPart[:bfRowPart.index('</p></td>')] + ',').replace('&nbsp;', ',')).strip()\r\n\r\n if bfRow != '':\r\n lsOutTmp.append(bfRow[:len(bfRow)-1] + ';')\r\n\r\nbufferTmp = '\\n'\r\nbufferTmp = bufferTmp.join(lsOutTmp)\r\nfo= open(fileNameOu, 'w')\r\nfo.write(bufferTmp)\r\nfo.close()\r\n\r\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
from Socket import Socket import threading class Server(Socket): def __init__(self): super(Server, self).__init__() print("server listening") self.users = [] def set_up(self): self.bind(("192.168.0.109", 1337)) self.listen(0) self.accept_sockets() def send_data(self, data): for user in self.users: try: user.send(data) except ConnectionResetError: self.users.pop(self.users.index(user)) pass def listen_socket(self, listened_socket=None): countForDel = 0 while True: data = listened_socket.recv(2048) if data.decode("utf-8")[0:-2] == '': countForDel += 1 if countForDel > 5: print("deleting user: Antispam") self.users.pop(self.users.index(listened_socket)) raise ConnectionResetError print(f"User sent {data}") self.send_data(data) def accept_sockets(self): while True: user_socket, address = self.accept() print(f"User <{address[0]}> connected!") self.users.append(user_socket) # добавляется юзер print(len(self.users)) listen_accepted_user = threading.Thread( target=self.listen_socket, args=(user_socket,)) listen_accepted_user.start() if __name__ == '__main__': server = Server() server.set_up()
normal
{ "blob_id": "2027904401e5be7b1c95eebec3a1e6a88c25660c", "index": 9338, "step-1": "<mask token>\n\n\nclass Server(Socket):\n\n def __init__(self):\n super(Server, self).__init__()\n print('server listening')\n self.users = []\n\n def set_up(self):\n self.bind(('192.168.0.109', 1337))\n self.listen(0)\n self.accept_sockets()\n\n def send_data(self, data):\n for user in self.users:\n try:\n user.send(data)\n except ConnectionResetError:\n self.users.pop(self.users.index(user))\n pass\n <mask token>\n\n def accept_sockets(self):\n while True:\n user_socket, address = self.accept()\n print(f'User <{address[0]}> connected!')\n self.users.append(user_socket)\n print(len(self.users))\n listen_accepted_user = threading.Thread(target=self.\n listen_socket, args=(user_socket,))\n listen_accepted_user.start()\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass Server(Socket):\n\n def __init__(self):\n super(Server, self).__init__()\n print('server listening')\n self.users = []\n\n def set_up(self):\n self.bind(('192.168.0.109', 1337))\n self.listen(0)\n self.accept_sockets()\n\n def send_data(self, data):\n for user in self.users:\n try:\n user.send(data)\n except ConnectionResetError:\n self.users.pop(self.users.index(user))\n pass\n\n def listen_socket(self, listened_socket=None):\n countForDel = 0\n while True:\n data = listened_socket.recv(2048)\n if data.decode('utf-8')[0:-2] == '':\n countForDel += 1\n if countForDel > 5:\n print('deleting user: Antispam')\n self.users.pop(self.users.index(listened_socket))\n raise ConnectionResetError\n print(f'User sent {data}')\n self.send_data(data)\n\n def accept_sockets(self):\n while True:\n user_socket, address = self.accept()\n print(f'User <{address[0]}> connected!')\n self.users.append(user_socket)\n print(len(self.users))\n listen_accepted_user = threading.Thread(target=self.\n listen_socket, args=(user_socket,))\n listen_accepted_user.start()\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass Server(Socket):\n\n def __init__(self):\n super(Server, self).__init__()\n print('server listening')\n self.users = []\n\n def set_up(self):\n self.bind(('192.168.0.109', 1337))\n self.listen(0)\n self.accept_sockets()\n\n def send_data(self, data):\n for user in self.users:\n try:\n user.send(data)\n except ConnectionResetError:\n self.users.pop(self.users.index(user))\n pass\n\n def listen_socket(self, listened_socket=None):\n countForDel = 0\n while True:\n data = listened_socket.recv(2048)\n if data.decode('utf-8')[0:-2] == '':\n countForDel += 1\n if countForDel > 5:\n print('deleting user: Antispam')\n self.users.pop(self.users.index(listened_socket))\n raise ConnectionResetError\n print(f'User sent {data}')\n self.send_data(data)\n\n def accept_sockets(self):\n while True:\n user_socket, address = self.accept()\n print(f'User <{address[0]}> connected!')\n self.users.append(user_socket)\n print(len(self.users))\n listen_accepted_user = threading.Thread(target=self.\n listen_socket, args=(user_socket,))\n listen_accepted_user.start()\n\n\nif __name__ == '__main__':\n server = Server()\n server.set_up()\n", "step-4": "from Socket import Socket\nimport threading\n\n\nclass Server(Socket):\n\n def __init__(self):\n super(Server, self).__init__()\n print('server listening')\n self.users = []\n\n def set_up(self):\n self.bind(('192.168.0.109', 1337))\n self.listen(0)\n self.accept_sockets()\n\n def send_data(self, data):\n for user in self.users:\n try:\n user.send(data)\n except ConnectionResetError:\n self.users.pop(self.users.index(user))\n pass\n\n def listen_socket(self, listened_socket=None):\n countForDel = 0\n while True:\n data = listened_socket.recv(2048)\n if data.decode('utf-8')[0:-2] == '':\n countForDel += 1\n if countForDel > 5:\n print('deleting user: Antispam')\n self.users.pop(self.users.index(listened_socket))\n raise ConnectionResetError\n print(f'User sent {data}')\n self.send_data(data)\n\n def accept_sockets(self):\n while True:\n user_socket, address = self.accept()\n print(f'User <{address[0]}> connected!')\n self.users.append(user_socket)\n print(len(self.users))\n listen_accepted_user = threading.Thread(target=self.\n listen_socket, args=(user_socket,))\n listen_accepted_user.start()\n\n\nif __name__ == '__main__':\n server = Server()\n server.set_up()\n", "step-5": "from Socket import Socket\nimport threading\n\nclass Server(Socket):\n def __init__(self):\n super(Server, self).__init__()\n\n print(\"server listening\")\n\n self.users = []\n\n def set_up(self):\n self.bind((\"192.168.0.109\", 1337))\n self.listen(0)\n self.accept_sockets()\n\n def send_data(self, data):\n for user in self.users:\n try:\n user.send(data)\n except ConnectionResetError:\n self.users.pop(self.users.index(user))\n pass\n\n def listen_socket(self, listened_socket=None):\n countForDel = 0\n while True:\n data = listened_socket.recv(2048)\n if data.decode(\"utf-8\")[0:-2] == '':\n countForDel += 1\n if countForDel > 5:\n print(\"deleting user: Antispam\")\n self.users.pop(self.users.index(listened_socket))\n raise ConnectionResetError\n \n print(f\"User sent {data}\")\n self.send_data(data)\n\n def accept_sockets(self):\n while True:\n user_socket, address = self.accept()\n print(f\"User <{address[0]}> connected!\")\n self.users.append(user_socket) # добавляется юзер\n print(len(self.users))\n\n listen_accepted_user = threading.Thread(\n target=self.listen_socket,\n args=(user_socket,))\n\n listen_accepted_user.start()\n\n\nif __name__ == '__main__':\n server = Server()\n server.set_up()\n", "step-ids": [ 5, 6, 7, 8, 9 ] }
[ 5, 6, 7, 8, 9 ]
# Copyright 2022 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ import numpy as np import pytest import mindspore import mindspore.nn as nn from mindspore.ops.operations import _grad_ops as G from mindspore import Tensor, context class ConcatOffsetNet(nn.Cell): def __init__(self, axis): super(ConcatOffsetNet, self).__init__() self.op = G.ConcatOffset(2, axis) def construct(self, x0, x1): return self.op((x0, x1)) def run_case(run_mode): context.set_context(mode=run_mode) x0 = Tensor(np.random.uniform(10, 20, (4, 2, 16)).astype(np.float32)) x1 = Tensor(np.random.uniform(10, 20, (4, 6, 16)).astype(np.float32)) expect = np.array([[0, 0, 0], [0, 2, 0]]).astype(np.int64) x0_dyn = Tensor(shape=[None, None, 16], dtype=mindspore.float32) x1_dyn = Tensor(shape=[None, None, 16], dtype=mindspore.float32) net = ConcatOffsetNet(1) net.set_inputs(x0_dyn, x1_dyn) output = net(x0, x1) if run_mode == context.GRAPH_MODE: assert np.allclose(expect, output.asnumpy()) else: # In PyNative, set_inputs will be ignored. Static shape for ConcatOffset # infer output is not a tensor, get constant value output. assert np.allclose(expect, output) @pytest.mark.level0 @pytest.mark.env_onecard @pytest.mark.platform_arm_ascend_training @pytest.mark.platform_x86_ascend_training def test_concat_offset(): """ Feature: aicpu ConcatOffset Description: test ConcatOffset on Ascend. Expectation: output compares success with expect. """ context.set_context(device_target="Ascend") run_case(context.GRAPH_MODE) run_case(context.PYNATIVE_MODE)
normal
{ "blob_id": "2064fe029bc7db14505a5b38750e324b55556abb", "index": 7032, "step-1": "<mask token>\n\n\nclass ConcatOffsetNet(nn.Cell):\n\n def __init__(self, axis):\n super(ConcatOffsetNet, self).__init__()\n self.op = G.ConcatOffset(2, axis)\n\n def construct(self, x0, x1):\n return self.op((x0, x1))\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass ConcatOffsetNet(nn.Cell):\n\n def __init__(self, axis):\n super(ConcatOffsetNet, self).__init__()\n self.op = G.ConcatOffset(2, axis)\n\n def construct(self, x0, x1):\n return self.op((x0, x1))\n\n\n<mask token>\n\n\[email protected]\[email protected]_onecard\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\ndef test_concat_offset():\n \"\"\"\n Feature: aicpu ConcatOffset\n Description: test ConcatOffset on Ascend.\n Expectation: output compares success with expect.\n \"\"\"\n context.set_context(device_target='Ascend')\n run_case(context.GRAPH_MODE)\n run_case(context.PYNATIVE_MODE)\n", "step-3": "<mask token>\n\n\nclass ConcatOffsetNet(nn.Cell):\n\n def __init__(self, axis):\n super(ConcatOffsetNet, self).__init__()\n self.op = G.ConcatOffset(2, axis)\n\n def construct(self, x0, x1):\n return self.op((x0, x1))\n\n\ndef run_case(run_mode):\n context.set_context(mode=run_mode)\n x0 = Tensor(np.random.uniform(10, 20, (4, 2, 16)).astype(np.float32))\n x1 = Tensor(np.random.uniform(10, 20, (4, 6, 16)).astype(np.float32))\n expect = np.array([[0, 0, 0], [0, 2, 0]]).astype(np.int64)\n x0_dyn = Tensor(shape=[None, None, 16], dtype=mindspore.float32)\n x1_dyn = Tensor(shape=[None, None, 16], dtype=mindspore.float32)\n net = ConcatOffsetNet(1)\n net.set_inputs(x0_dyn, x1_dyn)\n output = net(x0, x1)\n if run_mode == context.GRAPH_MODE:\n assert np.allclose(expect, output.asnumpy())\n else:\n assert np.allclose(expect, output)\n\n\[email protected]\[email protected]_onecard\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\ndef test_concat_offset():\n \"\"\"\n Feature: aicpu ConcatOffset\n Description: test ConcatOffset on Ascend.\n Expectation: output compares success with expect.\n \"\"\"\n context.set_context(device_target='Ascend')\n run_case(context.GRAPH_MODE)\n run_case(context.PYNATIVE_MODE)\n", "step-4": "import numpy as np\nimport pytest\nimport mindspore\nimport mindspore.nn as nn\nfrom mindspore.ops.operations import _grad_ops as G\nfrom mindspore import Tensor, context\n\n\nclass ConcatOffsetNet(nn.Cell):\n\n def __init__(self, axis):\n super(ConcatOffsetNet, self).__init__()\n self.op = G.ConcatOffset(2, axis)\n\n def construct(self, x0, x1):\n return self.op((x0, x1))\n\n\ndef run_case(run_mode):\n context.set_context(mode=run_mode)\n x0 = Tensor(np.random.uniform(10, 20, (4, 2, 16)).astype(np.float32))\n x1 = Tensor(np.random.uniform(10, 20, (4, 6, 16)).astype(np.float32))\n expect = np.array([[0, 0, 0], [0, 2, 0]]).astype(np.int64)\n x0_dyn = Tensor(shape=[None, None, 16], dtype=mindspore.float32)\n x1_dyn = Tensor(shape=[None, None, 16], dtype=mindspore.float32)\n net = ConcatOffsetNet(1)\n net.set_inputs(x0_dyn, x1_dyn)\n output = net(x0, x1)\n if run_mode == context.GRAPH_MODE:\n assert np.allclose(expect, output.asnumpy())\n else:\n assert np.allclose(expect, output)\n\n\[email protected]\[email protected]_onecard\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\ndef test_concat_offset():\n \"\"\"\n Feature: aicpu ConcatOffset\n Description: test ConcatOffset on Ascend.\n Expectation: output compares success with expect.\n \"\"\"\n context.set_context(device_target='Ascend')\n run_case(context.GRAPH_MODE)\n run_case(context.PYNATIVE_MODE)\n", "step-5": "# Copyright 2022 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\nimport numpy as np\nimport pytest\nimport mindspore\nimport mindspore.nn as nn\nfrom mindspore.ops.operations import _grad_ops as G\nfrom mindspore import Tensor, context\n\n\nclass ConcatOffsetNet(nn.Cell):\n def __init__(self, axis):\n super(ConcatOffsetNet, self).__init__()\n self.op = G.ConcatOffset(2, axis)\n\n def construct(self, x0, x1):\n return self.op((x0, x1))\n\n\ndef run_case(run_mode):\n context.set_context(mode=run_mode)\n x0 = Tensor(np.random.uniform(10, 20, (4, 2, 16)).astype(np.float32))\n x1 = Tensor(np.random.uniform(10, 20, (4, 6, 16)).astype(np.float32))\n expect = np.array([[0, 0, 0], [0, 2, 0]]).astype(np.int64)\n x0_dyn = Tensor(shape=[None, None, 16], dtype=mindspore.float32)\n x1_dyn = Tensor(shape=[None, None, 16], dtype=mindspore.float32)\n net = ConcatOffsetNet(1)\n net.set_inputs(x0_dyn, x1_dyn)\n output = net(x0, x1)\n if run_mode == context.GRAPH_MODE:\n assert np.allclose(expect, output.asnumpy())\n else:\n # In PyNative, set_inputs will be ignored. Static shape for ConcatOffset\n # infer output is not a tensor, get constant value output.\n assert np.allclose(expect, output)\n\n\[email protected]\[email protected]_onecard\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\ndef test_concat_offset():\n \"\"\"\n Feature: aicpu ConcatOffset\n Description: test ConcatOffset on Ascend.\n Expectation: output compares success with expect.\n \"\"\"\n context.set_context(device_target=\"Ascend\")\n run_case(context.GRAPH_MODE)\n run_case(context.PYNATIVE_MODE)\n", "step-ids": [ 3, 4, 5, 6, 7 ] }
[ 3, 4, 5, 6, 7 ]
#!/usr/bin/python3 import RPi.GPIO as GPIO import time # motor_EN_A: Pin7 | motor_EN_B: Pin11 # motor_A: Pin8,Pin10 | motor_B: Pin13,Pin12 #Motor_A_EN = 7 Motor_B_EN = 11 #Motor_A_Pin1 = 8 #Motor_A_Pin2 = 10 Motor_B_Pin1 = 13 Motor_B_Pin2 = 12 Dir_forward = 0 Dir_backward = 1 #pwm_A = 0 pwm_B = 0 def setup():#Motor initialization global pwm_A, pwm_B GPIO.setwarnings(False) GPIO.setmode(GPIO.BOARD) #GPIO.setup(Motor_A_EN, GPIO.OUT) GPIO.setup(Motor_B_EN, GPIO.OUT) #GPIO.setup(Motor_A_Pin1, GPIO.OUT) #GPIO.setup(Motor_A_Pin2, GPIO.OUT) GPIO.setup(Motor_B_Pin1, GPIO.OUT) GPIO.setup(Motor_B_Pin2, GPIO.OUT) #pwm_A = GPIO.PWM(Motor_A_EN, 1000) pwm_B = GPIO.PWM(Motor_B_EN, 1000) def motorStop():#Motor stops #GPIO.output(Motor_A_Pin1, GPIO.LOW) #GPIO.output(Motor_A_Pin2, GPIO.LOW) GPIO.output(Motor_B_Pin1, GPIO.LOW) GPIO.output(Motor_B_Pin2, GPIO.LOW) #GPIO.output(Motor_A_EN, GPIO.LOW) GPIO.output(Motor_B_EN, GPIO.LOW) def motorStart(status, direction, speed):#Motor 2 positive and negative rotation global pwm_B if status == 0: # stop motorStop() else: if direction == Dir_forward: GPIO.output(Motor_B_Pin1, GPIO.HIGH) GPIO.output(Motor_B_Pin2, GPIO.LOW) pwm_B.start(100) pwm_B.ChangeDutyCycle(speed) elif direction == Dir_backward: GPIO.output(Motor_B_Pin1, GPIO.LOW) GPIO.output(Motor_B_Pin2, GPIO.HIGH) pwm_B.start(0) pwm_B.ChangeDutyCycle(speed) def destroy(): motorStop() GPIO.cleanup() # Release resource try: pass except KeyboardInterrupt: destroy()
normal
{ "blob_id": "7369d5a463b0f41c17d5648739d4730256e611f9", "index": 9612, "step-1": "<mask token>\n\n\ndef setup():\n global pwm_A, pwm_B\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BOARD)\n GPIO.setup(Motor_B_EN, GPIO.OUT)\n GPIO.setup(Motor_B_Pin1, GPIO.OUT)\n GPIO.setup(Motor_B_Pin2, GPIO.OUT)\n pwm_B = GPIO.PWM(Motor_B_EN, 1000)\n\n\ndef motorStop():\n GPIO.output(Motor_B_Pin1, GPIO.LOW)\n GPIO.output(Motor_B_Pin2, GPIO.LOW)\n GPIO.output(Motor_B_EN, GPIO.LOW)\n\n\ndef motorStart(status, direction, speed):\n global pwm_B\n if status == 0:\n motorStop()\n elif direction == Dir_forward:\n GPIO.output(Motor_B_Pin1, GPIO.HIGH)\n GPIO.output(Motor_B_Pin2, GPIO.LOW)\n pwm_B.start(100)\n pwm_B.ChangeDutyCycle(speed)\n elif direction == Dir_backward:\n GPIO.output(Motor_B_Pin1, GPIO.LOW)\n GPIO.output(Motor_B_Pin2, GPIO.HIGH)\n pwm_B.start(0)\n pwm_B.ChangeDutyCycle(speed)\n\n\ndef destroy():\n motorStop()\n GPIO.cleanup()\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef setup():\n global pwm_A, pwm_B\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BOARD)\n GPIO.setup(Motor_B_EN, GPIO.OUT)\n GPIO.setup(Motor_B_Pin1, GPIO.OUT)\n GPIO.setup(Motor_B_Pin2, GPIO.OUT)\n pwm_B = GPIO.PWM(Motor_B_EN, 1000)\n\n\ndef motorStop():\n GPIO.output(Motor_B_Pin1, GPIO.LOW)\n GPIO.output(Motor_B_Pin2, GPIO.LOW)\n GPIO.output(Motor_B_EN, GPIO.LOW)\n\n\ndef motorStart(status, direction, speed):\n global pwm_B\n if status == 0:\n motorStop()\n elif direction == Dir_forward:\n GPIO.output(Motor_B_Pin1, GPIO.HIGH)\n GPIO.output(Motor_B_Pin2, GPIO.LOW)\n pwm_B.start(100)\n pwm_B.ChangeDutyCycle(speed)\n elif direction == Dir_backward:\n GPIO.output(Motor_B_Pin1, GPIO.LOW)\n GPIO.output(Motor_B_Pin2, GPIO.HIGH)\n pwm_B.start(0)\n pwm_B.ChangeDutyCycle(speed)\n\n\ndef destroy():\n motorStop()\n GPIO.cleanup()\n\n\ntry:\n pass\nexcept KeyboardInterrupt:\n destroy()\n", "step-3": "<mask token>\nMotor_B_EN = 11\nMotor_B_Pin1 = 13\nMotor_B_Pin2 = 12\nDir_forward = 0\nDir_backward = 1\npwm_B = 0\n\n\ndef setup():\n global pwm_A, pwm_B\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BOARD)\n GPIO.setup(Motor_B_EN, GPIO.OUT)\n GPIO.setup(Motor_B_Pin1, GPIO.OUT)\n GPIO.setup(Motor_B_Pin2, GPIO.OUT)\n pwm_B = GPIO.PWM(Motor_B_EN, 1000)\n\n\ndef motorStop():\n GPIO.output(Motor_B_Pin1, GPIO.LOW)\n GPIO.output(Motor_B_Pin2, GPIO.LOW)\n GPIO.output(Motor_B_EN, GPIO.LOW)\n\n\ndef motorStart(status, direction, speed):\n global pwm_B\n if status == 0:\n motorStop()\n elif direction == Dir_forward:\n GPIO.output(Motor_B_Pin1, GPIO.HIGH)\n GPIO.output(Motor_B_Pin2, GPIO.LOW)\n pwm_B.start(100)\n pwm_B.ChangeDutyCycle(speed)\n elif direction == Dir_backward:\n GPIO.output(Motor_B_Pin1, GPIO.LOW)\n GPIO.output(Motor_B_Pin2, GPIO.HIGH)\n pwm_B.start(0)\n pwm_B.ChangeDutyCycle(speed)\n\n\ndef destroy():\n motorStop()\n GPIO.cleanup()\n\n\ntry:\n pass\nexcept KeyboardInterrupt:\n destroy()\n", "step-4": "import RPi.GPIO as GPIO\nimport time\nMotor_B_EN = 11\nMotor_B_Pin1 = 13\nMotor_B_Pin2 = 12\nDir_forward = 0\nDir_backward = 1\npwm_B = 0\n\n\ndef setup():\n global pwm_A, pwm_B\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BOARD)\n GPIO.setup(Motor_B_EN, GPIO.OUT)\n GPIO.setup(Motor_B_Pin1, GPIO.OUT)\n GPIO.setup(Motor_B_Pin2, GPIO.OUT)\n pwm_B = GPIO.PWM(Motor_B_EN, 1000)\n\n\ndef motorStop():\n GPIO.output(Motor_B_Pin1, GPIO.LOW)\n GPIO.output(Motor_B_Pin2, GPIO.LOW)\n GPIO.output(Motor_B_EN, GPIO.LOW)\n\n\ndef motorStart(status, direction, speed):\n global pwm_B\n if status == 0:\n motorStop()\n elif direction == Dir_forward:\n GPIO.output(Motor_B_Pin1, GPIO.HIGH)\n GPIO.output(Motor_B_Pin2, GPIO.LOW)\n pwm_B.start(100)\n pwm_B.ChangeDutyCycle(speed)\n elif direction == Dir_backward:\n GPIO.output(Motor_B_Pin1, GPIO.LOW)\n GPIO.output(Motor_B_Pin2, GPIO.HIGH)\n pwm_B.start(0)\n pwm_B.ChangeDutyCycle(speed)\n\n\ndef destroy():\n motorStop()\n GPIO.cleanup()\n\n\ntry:\n pass\nexcept KeyboardInterrupt:\n destroy()\n", "step-5": "#!/usr/bin/python3\n\nimport RPi.GPIO as GPIO\nimport time\n# motor_EN_A: Pin7 | motor_EN_B: Pin11\n# motor_A: Pin8,Pin10 | motor_B: Pin13,Pin12\n\n#Motor_A_EN = 7\nMotor_B_EN = 11\n\n#Motor_A_Pin1 = 8\n#Motor_A_Pin2 = 10\nMotor_B_Pin1 = 13\nMotor_B_Pin2 = 12\n\nDir_forward = 0\nDir_backward = 1\n\n#pwm_A = 0\npwm_B = 0\n\ndef setup():#Motor initialization\n\tglobal pwm_A, pwm_B\n\tGPIO.setwarnings(False)\n\tGPIO.setmode(GPIO.BOARD)\n\t#GPIO.setup(Motor_A_EN, GPIO.OUT)\n\tGPIO.setup(Motor_B_EN, GPIO.OUT)\n\t#GPIO.setup(Motor_A_Pin1, GPIO.OUT)\n\t#GPIO.setup(Motor_A_Pin2, GPIO.OUT)\n\tGPIO.setup(Motor_B_Pin1, GPIO.OUT)\n\tGPIO.setup(Motor_B_Pin2, GPIO.OUT)\n\t#pwm_A = GPIO.PWM(Motor_A_EN, 1000)\n\tpwm_B = GPIO.PWM(Motor_B_EN, 1000)\n\ndef motorStop():#Motor stops\n\t#GPIO.output(Motor_A_Pin1, GPIO.LOW)\n\t#GPIO.output(Motor_A_Pin2, GPIO.LOW)\n\tGPIO.output(Motor_B_Pin1, GPIO.LOW)\n\tGPIO.output(Motor_B_Pin2, GPIO.LOW)\n\t#GPIO.output(Motor_A_EN, GPIO.LOW)\n\tGPIO.output(Motor_B_EN, GPIO.LOW)\n\ndef motorStart(status, direction, speed):#Motor 2 positive and negative rotation\n\tglobal pwm_B\n\tif status == 0: # stop\n\t\tmotorStop()\n\telse:\n\t\tif direction == Dir_forward:\n\t\t\tGPIO.output(Motor_B_Pin1, GPIO.HIGH)\n\t\t\tGPIO.output(Motor_B_Pin2, GPIO.LOW)\n\t\t\tpwm_B.start(100)\n\t\t\tpwm_B.ChangeDutyCycle(speed)\n\t\telif direction == Dir_backward:\n\t\t\tGPIO.output(Motor_B_Pin1, GPIO.LOW)\n\t\t\tGPIO.output(Motor_B_Pin2, GPIO.HIGH)\n\t\t\tpwm_B.start(0)\n\t\t\tpwm_B.ChangeDutyCycle(speed)\n\ndef destroy():\n\tmotorStop()\n\tGPIO.cleanup() # Release resource\n\ntry:\n\tpass\nexcept KeyboardInterrupt:\n\tdestroy()\n\n\n\n", "step-ids": [ 4, 5, 6, 7, 8 ] }
[ 4, 5, 6, 7, 8 ]
from flask_table import Table, Col """Lets suppose that we have a class that we get an iterable of from somewhere, such as a database. We can declare a table that pulls out the relevant entries, escapes them and displays them. """ class Item(object): def __init__(self, name, category): self.name = name self.category = category class Category(object): def __init__(self, name): self.name = name class ItemTable(Table): name = Col('Name') category_name = Col('Category', attr_list=['category', 'name']) # Equivalently: Col('Category', attr='category.name') # Both syntaxes are kept as the second is more readable, but # doesn't cover all options. Such as if the items are dicts and # the keys have dots in. def main(): items = [Item('A', Category('catA')), Item('B', Category('catB'))] tab = ItemTable(items) print(tab.__html__()) if __name__ == '__main__': main()
normal
{ "blob_id": "3191fa5f9c50993d17e12e4e2e9d56cfce2108e7", "index": 5646, "step-1": "<mask token>\n\n\nclass Item(object):\n\n def __init__(self, name, category):\n self.name = name\n self.category = category\n\n\nclass Category(object):\n\n def __init__(self, name):\n self.name = name\n\n\nclass ItemTable(Table):\n name = Col('Name')\n category_name = Col('Category', attr_list=['category', 'name'])\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass Item(object):\n\n def __init__(self, name, category):\n self.name = name\n self.category = category\n\n\nclass Category(object):\n\n def __init__(self, name):\n self.name = name\n\n\nclass ItemTable(Table):\n name = Col('Name')\n category_name = Col('Category', attr_list=['category', 'name'])\n\n\ndef main():\n items = [Item('A', Category('catA')), Item('B', Category('catB'))]\n tab = ItemTable(items)\n print(tab.__html__())\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass Item(object):\n\n def __init__(self, name, category):\n self.name = name\n self.category = category\n\n\nclass Category(object):\n\n def __init__(self, name):\n self.name = name\n\n\nclass ItemTable(Table):\n name = Col('Name')\n category_name = Col('Category', attr_list=['category', 'name'])\n\n\ndef main():\n items = [Item('A', Category('catA')), Item('B', Category('catB'))]\n tab = ItemTable(items)\n print(tab.__html__())\n\n\nif __name__ == '__main__':\n main()\n", "step-4": "from flask_table import Table, Col\n<mask token>\n\n\nclass Item(object):\n\n def __init__(self, name, category):\n self.name = name\n self.category = category\n\n\nclass Category(object):\n\n def __init__(self, name):\n self.name = name\n\n\nclass ItemTable(Table):\n name = Col('Name')\n category_name = Col('Category', attr_list=['category', 'name'])\n\n\ndef main():\n items = [Item('A', Category('catA')), Item('B', Category('catB'))]\n tab = ItemTable(items)\n print(tab.__html__())\n\n\nif __name__ == '__main__':\n main()\n", "step-5": "from flask_table import Table, Col\n\n\n\"\"\"Lets suppose that we have a class that we get an iterable of from\nsomewhere, such as a database. We can declare a table that pulls out\nthe relevant entries, escapes them and displays them.\n\n\"\"\"\n\n\nclass Item(object):\n def __init__(self, name, category):\n self.name = name\n self.category = category\n\n\nclass Category(object):\n def __init__(self, name):\n self.name = name\n\n\nclass ItemTable(Table):\n name = Col('Name')\n category_name = Col('Category', attr_list=['category', 'name'])\n # Equivalently: Col('Category', attr='category.name')\n # Both syntaxes are kept as the second is more readable, but\n # doesn't cover all options. Such as if the items are dicts and\n # the keys have dots in.\n\n\ndef main():\n items = [Item('A', Category('catA')),\n Item('B', Category('catB'))]\n\n tab = ItemTable(items)\n print(tab.__html__())\n\nif __name__ == '__main__':\n main()\n", "step-ids": [ 6, 7, 8, 9, 10 ] }
[ 6, 7, 8, 9, 10 ]
import shelve from club import Club #загальний бютжет клубів в заданій країні #клуб який має найбільше трофеїв country = input('country: ') FILENAME = "clubs" with shelve.open(FILENAME) as clubs: clubs_by_country = list(filter(lambda s: s.country.lower() == country.lower(), clubs.values())) if len(clubs_by_country) == 0 : print("No clubs with such country") exit() the_best_club = max(clubs_by_country, key=lambda s: int(s.award)) clubs_budget = sum(int(club.budget) for club in clubs_by_country) print("The best club: ", the_best_club) print("Summary budget: ", clubs_budget)
normal
{ "blob_id": "1346bf78241b4be00f2da3c22731d2846f9d1ada", "index": 4629, "step-1": "<mask token>\n", "step-2": "<mask token>\nwith shelve.open(FILENAME) as clubs:\n clubs_by_country = list(filter(lambda s: s.country.lower() == country.\n lower(), clubs.values()))\n if len(clubs_by_country) == 0:\n print('No clubs with such country')\n exit()\n the_best_club = max(clubs_by_country, key=lambda s: int(s.award))\n clubs_budget = sum(int(club.budget) for club in clubs_by_country)\n print('The best club: ', the_best_club)\n print('Summary budget: ', clubs_budget)\n", "step-3": "<mask token>\ncountry = input('country: ')\nFILENAME = 'clubs'\nwith shelve.open(FILENAME) as clubs:\n clubs_by_country = list(filter(lambda s: s.country.lower() == country.\n lower(), clubs.values()))\n if len(clubs_by_country) == 0:\n print('No clubs with such country')\n exit()\n the_best_club = max(clubs_by_country, key=lambda s: int(s.award))\n clubs_budget = sum(int(club.budget) for club in clubs_by_country)\n print('The best club: ', the_best_club)\n print('Summary budget: ', clubs_budget)\n", "step-4": "import shelve\nfrom club import Club\ncountry = input('country: ')\nFILENAME = 'clubs'\nwith shelve.open(FILENAME) as clubs:\n clubs_by_country = list(filter(lambda s: s.country.lower() == country.\n lower(), clubs.values()))\n if len(clubs_by_country) == 0:\n print('No clubs with such country')\n exit()\n the_best_club = max(clubs_by_country, key=lambda s: int(s.award))\n clubs_budget = sum(int(club.budget) for club in clubs_by_country)\n print('The best club: ', the_best_club)\n print('Summary budget: ', clubs_budget)\n", "step-5": "import shelve\nfrom club import Club\n\n#загальний бютжет клубів в заданій країні\n#клуб який має найбільше трофеїв\n\n\ncountry = input('country: ')\n\nFILENAME = \"clubs\"\n\nwith shelve.open(FILENAME) as clubs:\n clubs_by_country = list(filter(lambda s: s.country.lower() == country.lower(), clubs.values()))\n\n if len(clubs_by_country) == 0 :\n print(\"No clubs with such country\")\n exit()\n\n the_best_club = max(clubs_by_country, key=lambda s: int(s.award))\n clubs_budget = sum(int(club.budget) for club in clubs_by_country)\n\n print(\"The best club: \", the_best_club)\n print(\"Summary budget: \", clubs_budget)\n\n\n\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
from django import forms class CommentForm(forms.Form): name = forms.CharField(label='称呼') email = forms.EmailField(label='邮箱') content = forms.CharField(label='内容')
normal
{ "blob_id": "c2ff3c5e44fa361671a3fdb38060517bcc4bc82c", "index": 2778, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass CommentForm(forms.Form):\n <mask token>\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass CommentForm(forms.Form):\n name = forms.CharField(label='称呼')\n email = forms.EmailField(label='邮箱')\n content = forms.CharField(label='内容')\n", "step-4": "from django import forms\n\n\nclass CommentForm(forms.Form):\n name = forms.CharField(label='称呼')\n email = forms.EmailField(label='邮箱')\n content = forms.CharField(label='内容')\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
card = int(input()) last4 = card % 10000 print(last4)
normal
{ "blob_id": "7b920545a0241b30b66ff99f330dbb361f747f13", "index": 8297, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint(last4)\n", "step-3": "card = int(input())\nlast4 = card % 10000\nprint(last4)\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
#!/usr/bin/env python import numpy as np import cv2 # Creat a Image with Pixel 512x512 RGB image = np.zeros((512, 512, 3), np.uint8) # Pt Definition # x0y0, x1y0, x2 y0 # x0y1 , x1y1, x2y1 # Draw a Line in the Middle of the image # Start Co-ordinate end Co-ordinate While Color and Line Width cv2.line(image, (0, 0), (512, 0), (255, 255, 255), 5) cv2.line(image, (0, 50), (512, 50), (255, 255, 255), 5) # Draw Rectange cv2.rectangle(image, (256, 0), (400, 256), (0, 255, 0), 3) font = cv2.FONT_HERSHEY_COMPLEX cv2.putText(image, "ROS OpenCV", (10, 500), font, 2, (255, 0, 0), 2, cv2.LINE_AA) cv2.imshow("Draw Image", image) cv2.waitKey(0) cv2.destroyAllWindows()
normal
{ "blob_id": "f6c5c2180a1a4b05b3f103c330b455e7387713a6", "index": 8125, "step-1": "<mask token>\n", "step-2": "<mask token>\ncv2.line(image, (0, 0), (512, 0), (255, 255, 255), 5)\ncv2.line(image, (0, 50), (512, 50), (255, 255, 255), 5)\ncv2.rectangle(image, (256, 0), (400, 256), (0, 255, 0), 3)\n<mask token>\ncv2.putText(image, 'ROS OpenCV', (10, 500), font, 2, (255, 0, 0), 2, cv2.\n LINE_AA)\ncv2.imshow('Draw Image', image)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n", "step-3": "<mask token>\nimage = np.zeros((512, 512, 3), np.uint8)\ncv2.line(image, (0, 0), (512, 0), (255, 255, 255), 5)\ncv2.line(image, (0, 50), (512, 50), (255, 255, 255), 5)\ncv2.rectangle(image, (256, 0), (400, 256), (0, 255, 0), 3)\nfont = cv2.FONT_HERSHEY_COMPLEX\ncv2.putText(image, 'ROS OpenCV', (10, 500), font, 2, (255, 0, 0), 2, cv2.\n LINE_AA)\ncv2.imshow('Draw Image', image)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n", "step-4": "import numpy as np\nimport cv2\nimage = np.zeros((512, 512, 3), np.uint8)\ncv2.line(image, (0, 0), (512, 0), (255, 255, 255), 5)\ncv2.line(image, (0, 50), (512, 50), (255, 255, 255), 5)\ncv2.rectangle(image, (256, 0), (400, 256), (0, 255, 0), 3)\nfont = cv2.FONT_HERSHEY_COMPLEX\ncv2.putText(image, 'ROS OpenCV', (10, 500), font, 2, (255, 0, 0), 2, cv2.\n LINE_AA)\ncv2.imshow('Draw Image', image)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n", "step-5": "#!/usr/bin/env python\n\nimport numpy as np\nimport cv2\n\n# Creat a Image with Pixel 512x512 RGB\nimage = np.zeros((512, 512, 3), np.uint8)\n\n\n# Pt Definition\n# x0y0, x1y0, x2 y0\n# x0y1 , x1y1, x2y1\n# Draw a Line in the Middle of the image\n# Start Co-ordinate end Co-ordinate While Color and Line Width\ncv2.line(image, (0, 0), (512, 0), (255, 255, 255), 5)\n\ncv2.line(image, (0, 50), (512, 50), (255, 255, 255), 5)\n\n# Draw Rectange\ncv2.rectangle(image, (256, 0), (400, 256), (0, 255, 0), 3)\n\nfont = cv2.FONT_HERSHEY_COMPLEX\ncv2.putText(image, \"ROS OpenCV\", (10, 500),\n font, 2, (255, 0, 0), 2, cv2.LINE_AA)\n\ncv2.imshow(\"Draw Image\", image)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
from flask import Flask, render_template, request import matplotlib.pyplot as plt import numpy as np import sympy from DerivTest import diff, diff2, trapz from sympy.parsing.sympy_parser import parse_expr from sympy import Symbol #from ParsingClass import Parser #from scitools.StringFunction import StringFunction #from wtforms import Form, TextField, TextAreaField, validators, StringField, SubmitField app = Flask(__name__) app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 1 def functionGraph(function, dVal1, dVal2, dVal3, dVal4, ftcVal1, ftcVal2): print("printing user input from functionGraph - " + function) print(dVal1, dVal2, dVal3, dVal4) #parser = Parser() #x=np.array(range(10)) x1 = -5; x2 = 5; print("1st input:") y=function def f(x): return eval(y) '''print("Domain Val 1:") x1 = float(input()) print("Domain Val 2:") x2 = float(input()) print("Range Val 1:") y1 = float(input()) print("Range Val 2:") y2 = float(input()) ''' x1=int(dVal1) x2=int(dVal2) y1=int(dVal3) y2=int(dVal4) print("Processing...") xRange1 = np.arange(x1, x2, 0.01) yRange1 = np.empty(xRange1.size) count = 0 yParsed = parse_expr(y, evaluate=False) n, d = yParsed.as_numer_denom() #s = Symbol('s', real = True) undef = sympy.solve(d) numzero = sympy.solve(n) plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k') plt.xlim(x1, x2) plt.ylim(y1, y2) plt.autoscale(False) for x in np.nditer(xRange1): yRange1[count] = eval(y) count = count+1 xVal1 = xRange1.tolist() yVal1 = yRange1.tolist() ax1 = plt.subplot(2,2,1) ax1.plot(xVal1, yVal1, 'g') for x in undef: if x not in numzero: try: ax1.axvline(x=x, linestyle = '--') except: pass else: x=x+0.01 ax1.plot(x, eval(y), "o", markersize=7, markeredgewidth=1, markeredgecolor='g',markerfacecolor='None') count = 0 '''for zero in numzero: if zero in undef: ax1.plot(zero, f(zero), marker='s', color='green') count = count + 1''' #ax1.set_aspect('equal') ax1.grid(True, which='both') ax1.axhline(y=0, color='k') ax1.axvline(x=0, color='k') plt.xlim(left=x1, right=x2) plt.ylim(top=y2, bottom=y1) #plt.axis([0,6,0,30]) plt.savefig('/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/graph.png', bbox_inches = 'tight') ############################################# # Relative Extrema ############################################# xRange2 = np.arange(x1, x2, 0.01) count = 0 yRange2 = np.empty(xRange2.size) for x in np.nditer(xRange2): yRange2[count] = diff(y, x) count = count + 1 xVal2 = xRange2.tolist() yVal2 = yRange2.tolist() ax1.plot(xVal2, yVal2, 'r', alpha=0.2) # ax2.set_aspect('equal') ax1.grid(True, which='both') ax1.axhline(y=0, color='k') ax1.axvline(x=0, color='k') count = 1 limit = len(yVal2) - 1 for z in yVal2: if count == limit: break if (yVal2[count - 1]>0 and yVal2[count + 1]<0): ax1.plot(xVal1[count], yVal1[count], marker='s', color='c') ax1.axvline(x=xVal1[count], linestyle='--') count = count + 1 plt.xlim(left=x1, right=x2) plt.ylim(top=y2, bottom=y1) plt.savefig('/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/relmax.png', bbox_inches='tight') plt.clf() xRange1 = np.arange(x1, x2, 0.01) yRange1 = np.empty(xRange1.size) count = 0 for x in np.nditer(xRange1): yRange1[count] = eval(y) count = count + 1 plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k') xVal1 = xRange1.tolist() yVal1 = yRange1.tolist() ax1 = plt.subplot(2, 2, 1) ax1.plot(xVal1, yVal1,'g') # ax1.set_aspect('equal') ax1.grid(True, which='both') ax1.axhline(y=0, color='k') ax1.axvline(x=0, color='k') xRange2 = np.arange(x1, x2, 0.01) count = 0 yRange2 = np.empty(xRange2.size) for x in np.nditer(xRange2): yRange2[count] = diff(y, x) count = count + 1 xVal2 = xRange2.tolist() yVal2 = yRange2.tolist() ax1.plot(xVal2, yVal2, 'r', alpha=0.2) # ax2.set_aspect('equal') ax1.grid(True, which='both') ax1.axhline(y=0, color='k') ax1.axvline(x=0, color='k') count = 1 limit = len(yVal2) - 1 for z in yVal2: if count == limit: break if (yVal2[count - 1] < 0 and yVal2[count + 1] > 0): ax1.plot(xVal1[count], yVal1[count], marker='s', color='c') ax1.axvline(x=xVal1[count], linestyle='--') count = count + 1 plt.xlim(left=x1, right=x2) plt.ylim(top=y2, bottom=y1) plt.savefig('/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/relmin.png', bbox_inches='tight') plt.clf() ############################################# # First Derivative ############################################# xRange1 = np.arange(x1,x2, 0.01) yRange1 = np.empty(xRange1.size) count = 0 for x in np.nditer(xRange1): yRange1[count] = eval(y) count = count+1 plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k') xVal1 = xRange1.tolist() yVal1 = yRange1.tolist() ax1 = plt.subplot(2,2,1) ax1.plot(xVal1, yVal1, 'g') #ax1.set_aspect('equal') ax1.grid(True, which='both') ax1.axhline(y=0, color='k') ax1.axvline(x=0, color='k') xRange2 = np.arange(x1, x2, 0.01) count = 0 yRange2 = np.empty(xRange2.size) for x in np.nditer(xRange2): yRange2[count] = diff(y,x) count = count+1 xVal2 = xRange2.tolist() yVal2 = yRange2.tolist() ax1.plot(xVal2, yVal2, 'r') #ax2.set_aspect('equal') ax1.grid(True, which='both') ax1.axhline(y=0, color='k') ax1.axvline(x=0, color='k') if d == 1: plt.xlim(left=x1, right=x2) plt.ylim(top=y2, bottom=y1) plt.xlim(left=x1, right=x2) plt.ylim(top=y2, bottom=y1) plt.savefig('/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/deriv_graph.png', bbox_inches = 'tight') ############################################# # SECOND DERIVATIVE ############################################# xRange1 = np.arange(x1, x2, 0.01) yRange1 = np.empty(xRange1.size) count = 0 for x in np.nditer(xRange1): yRange1[count] = eval(y) count = count + 1 plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k') xVal1 = xRange1.tolist() yVal1 = yRange1.tolist() ax1 = plt.subplot(2, 2, 1) ax1.plot(xVal1, yVal1, 'g') # ax1.set_aspect('equal') ax1.grid(True, which='both') ax1.axhline(y=0, color='k') ax1.axvline(x=0, color='k') xRange2 = np.arange(x1, x2, 0.01) count = 0 yRange2 = np.empty(xRange2.size) for x in np.nditer(xRange2): yRange2[count] = diff(y, x) count = count + 1 xVal2 = xRange2.tolist() yVal2 = yRange2.tolist() ax1.plot(xVal2, yVal2, 'r') ax1.grid(True, which='both') ax1.axhline(y=0, color='k') ax1.axvline(x=0, color='k') xRange3 = np.arange(x1, x2, 0.01) yRange3 = np.empty(xRange3.size) '''for x in np.nditer(xRange3): yRange3[count] = diff2(y, x) count = count + 1''' count = 1 limit = yRange2.size-1 for x in np.nditer(xRange3): if count == limit: break yRange3[count] = diff2(yRange2[count-1], yRange2[count+1]) count = count + 1 np.delete(xRange3, -1) np.delete(yRange3, -1) xVal3 = xRange3.tolist() yVal3 = yRange3.tolist() print("XXXXXXXXXX") for x in xVal3: print (x) print("YYYYYYYYYY") for yVal in yVal3: print (yVal) ax1.plot(xVal3, yVal3, 'b') ax1.grid(True, which='both') ax1.axhline(y=0, color='k') ax1.axvline(x=0, color='k') if d == 1: plt.xlim(left=x1, right=x2) plt.ylim(top=y2, bottom=y1) plt.xlim(left=x1, right=x2) plt.ylim(top=y2, bottom=y1) plt.savefig('/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/deriv2_graph.png', bbox_inches='tight') plt.clf ############################################# #POINTS OF INFLECTION ############################################# xRange1 = np.arange(x1, x2, 0.01) yRange1 = np.empty(xRange1.size) count = 0 for x in np.nditer(xRange1): yRange1[count] = eval(y) count = count + 1 plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k') xVal1 = xRange1.tolist() yVal1 = yRange1.tolist() ax1 = plt.subplot(2, 2, 1) ax1.plot(xVal1, yVal1, 'g') ax1.grid(True, which='both') ax1.axhline(y=0, color='k') ax1.axvline(x=0, color='k') xRange2 = np.arange(x1, x2, 0.01) count = 0 yRange2 = np.empty(xRange2.size) for x in np.nditer(xRange2): yRange2[count] = diff(y, x) count = count + 1 xVal2 = xRange2.tolist() yVal2 = yRange2.tolist() ax1.plot(xVal2, yVal2, 'r', alpha=0.2) ax1.grid(True, which='both') ax1.axhline(y=0, color='k') ax1.axvline(x=0, color='k') xRange3 = np.arange(x1, x2, 0.01) yRange3 = np.empty(xRange3.size) count = 1 limit = yRange2.size - 1 for x in np.nditer(xRange3): if count == limit: break yRange3[count] = diff2(yRange2[count - 1], yRange2[count + 1]) count = count + 1 np.delete(xRange3, -1) np.delete(yRange3, -1) xVal3 = xRange3.tolist() yVal3 = yRange3.tolist() ax1.plot(xVal3, yVal3, 'b', alpha=0.2) ax1.grid(True, which='both') ax1.axhline(y=0, color='k') ax1.axvline(x=0, color='k') if d == 1: plt.xlim(left=x1, right=x2) plt.ylim(top=y2, bottom=y1) count = 1 limit = len(yVal2) - 1 for z in yVal3: if count == limit: break if yVal3[count - 1] < 0 and yVal3[count + 1] > 0: points1 = ax1.plot(xVal2[count], yVal1[count], marker='s', color='c') ax1.axvline(x=xVal2[count], linestyle='--') count = count + 1 count = 1 limit = len(yVal2) - 1 for z in yVal3: if count == limit: break if yVal3[count - 1] > 0 and yVal3[count + 1] < 0: points1 = ax1.plot(xVal2[count], yVal1[count], marker='s', color='c') ax1.axvline(x=xVal2[count], linestyle='--') count = count + 1 if d == 1: plt.xlim(left=x1, right=x2) plt.ylim(top=y2, bottom=y1) plt.xlim(left=x1, right=x2) plt.ylim(top=y2, bottom=y1) plt.savefig('/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/poi.png', bbox_inches='tight') plt.clf() ############################################# # FTC ############################################# xRange1 = np.arange(x1, x2, 0.01) yRange1 = np.empty(xRange1.size) count = 0 n, d = yParsed.as_numer_denom() undef = sympy.solve(d) plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k') plt.xlim(x1, x2) plt.ylim(y1, y2) plt.autoscale(False) for x in np.nditer(xRange1): yRange1[count] = eval(y) count = count + 1 xVal1 = xRange1.tolist() yVal1 = yRange1.tolist() ax1 = plt.subplot(2, 2, 1) ax1.plot(xVal1, yVal1, 'g') n, d = yParsed.as_numer_denom() s = Symbol('s', real=True) undef = sympy.solve(d, s) for xc in undef: ax1.axvline(x=xc, linestyle='--') ''' print("Integration x1:") x1int = float(input()) print("Integration x2:") x2int = float(input()) ''' x1int = int(ftcVal1) x2int = int(ftcVal2) print("Processing...") sectionx = np.arange(x1int, x2int, 0.00001) sectiony = np.empty(sectionx.size) count = 0 for x in np.nditer(sectionx): sectiony[count] = eval(y) count = count+1 plt.fill_between(sectionx, sectiony) global area area = 0 count = 0 limit = sectionx.size-1 for x in np.nditer(sectionx): if(count == limit): break trapSum = trapz(sectiony[count], sectiony[count+1]) area = area + trapSum count = count + 1 print(area) # ax1.set_aspect('equal') ax1.grid(True, which='both') ax1.axhline(y=0, color='k') ax1.axvline(x=0, color='k') if d == 1: plt.xlim(left=x1, right=x2) plt.ylim(top=y2, bottom=y1) plt.xlim(left=x1, right=x2) plt.ylim(top=y2, bottom=y1) plt.savefig('/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/ftc.png', bbox_inches='tight') global area x1 = -5; x2 = 5; xRange1 = np.arange(x1,x2, 0.01) #print("1st input") #y=input() #yParsed = parse_expr(y, evaluate=False) #functionGraph(y) def testFunc(inp): print("printing user input from testFunc - " +inp) pass ############################################## #works on CHROME ONLY, caching issue in Safari ############################################## @app.route('/', methods=['GET', 'POST']) @app.route('/graph', methods=['GET', 'POST']) def graph(): if request.method == 'POST': func = request.form['Function'] dVal1 = request.form['dVal1'] dVal2 = request.form['dVal2'] dVal3 = request.form['dVal3'] dVal4 = request.form['dVal4'] ftcVal1 = request.form['ftcVal1'] ftcVal2 = request.form['ftcVal2'] functionGraph(func, dVal1, dVal2, dVal3, dVal4, ftcVal1, ftcVal2) print("user input = " +str(input)) #testFunc(input) return render_template("graph.html") #return render_template("graph.html", result=input) @app.route('/home', methods=['GET', 'POST']) def home(): return render_template('home.html') @app.route('/input', methods=['GET', 'POST']) def input(): return render_template('input.html') '''@app.route('/input', methods=['GET', 'POST']) def input_post(): if request.method == 'POST': result = request.form['Function'] print(result) return render_template("graph.html", result=result)''' @app.route('/der', methods=['GET', 'POST']) def derGraph(): return render_template('graph2.html') @app.route('/der2', methods=['GET', 'POST']) def der2Graph(): return render_template('graph3.html') @app.route('/relmax', methods=['GET', 'POST']) def relmax(): return render_template('relmax.html') @app.route('/relmin', methods=['GET', 'POST']) def relmin(): return render_template('relmin.html') @app.route('/poi', methods=['GET', 'POST']) def poi(): return render_template('poi.html') @app.route('/ftc', methods=['GET', 'POST']) def ftc(): global area return render_template('ftc.html', result = str(area)) @app.route('/in1', methods=['GET', 'POST']) def in1(): return render_template('in1.html') @app.route('/out1', methods=['GET', 'POST']) def out1(): return render_template('out1.html') @app.after_request def add_header(response): response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1' response.headers['Cache-Control'] = 'public, max-age=0' return response if __name__ == '__main__': app.run(host='0.0.0.0', port=8080, debug=False)
normal
{ "blob_id": "9dc8449bcc0c6c6ffb5ced5724ca632b6578bf1b", "index": 9170, "step-1": "<mask token>\n\n\ndef functionGraph(function, dVal1, dVal2, dVal3, dVal4, ftcVal1, ftcVal2):\n print('printing user input from functionGraph - ' + function)\n print(dVal1, dVal2, dVal3, dVal4)\n x1 = -5\n x2 = 5\n print('1st input:')\n y = function\n\n def f(x):\n return eval(y)\n \"\"\"print(\"Domain Val 1:\")\n x1 = float(input())\n print(\"Domain Val 2:\")\n x2 = float(input())\n print(\"Range Val 1:\")\n y1 = float(input())\n print(\"Range Val 2:\")\n y2 = float(input())\n \"\"\"\n x1 = int(dVal1)\n x2 = int(dVal2)\n y1 = int(dVal3)\n y2 = int(dVal4)\n print('Processing...')\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n yParsed = parse_expr(y, evaluate=False)\n n, d = yParsed.as_numer_denom()\n undef = sympy.solve(d)\n numzero = sympy.solve(n)\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n plt.xlim(x1, x2)\n plt.ylim(y1, y2)\n plt.autoscale(False)\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n for x in undef:\n if x not in numzero:\n try:\n ax1.axvline(x=x, linestyle='--')\n except:\n pass\n else:\n x = x + 0.01\n ax1.plot(x, eval(y), 'o', markersize=7, markeredgewidth=1,\n markeredgecolor='g', markerfacecolor='None')\n count = 0\n \"\"\"for zero in numzero:\n if zero in undef:\n ax1.plot(zero, f(zero), marker='s', color='green')\n count = count + 1\"\"\"\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/graph.png'\n , bbox_inches='tight')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r', alpha=0.2)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n count = 1\n limit = len(yVal2) - 1\n for z in yVal2:\n if count == limit:\n break\n if yVal2[count - 1] > 0 and yVal2[count + 1] < 0:\n ax1.plot(xVal1[count], yVal1[count], marker='s', color='c')\n ax1.axvline(x=xVal1[count], linestyle='--')\n count = count + 1\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/relmax.png'\n , bbox_inches='tight')\n plt.clf()\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r', alpha=0.2)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n count = 1\n limit = len(yVal2) - 1\n for z in yVal2:\n if count == limit:\n break\n if yVal2[count - 1] < 0 and yVal2[count + 1] > 0:\n ax1.plot(xVal1[count], yVal1[count], marker='s', color='c')\n ax1.axvline(x=xVal1[count], linestyle='--')\n count = count + 1\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/relmin.png'\n , bbox_inches='tight')\n plt.clf()\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/deriv_graph.png'\n , bbox_inches='tight')\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange3 = np.arange(x1, x2, 0.01)\n yRange3 = np.empty(xRange3.size)\n \"\"\"for x in np.nditer(xRange3):\n yRange3[count] = diff2(y, x)\n count = count + 1\"\"\"\n count = 1\n limit = yRange2.size - 1\n for x in np.nditer(xRange3):\n if count == limit:\n break\n yRange3[count] = diff2(yRange2[count - 1], yRange2[count + 1])\n count = count + 1\n np.delete(xRange3, -1)\n np.delete(yRange3, -1)\n xVal3 = xRange3.tolist()\n yVal3 = yRange3.tolist()\n print('XXXXXXXXXX')\n for x in xVal3:\n print(x)\n print('YYYYYYYYYY')\n for yVal in yVal3:\n print(yVal)\n ax1.plot(xVal3, yVal3, 'b')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/deriv2_graph.png'\n , bbox_inches='tight')\n plt.clf\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r', alpha=0.2)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange3 = np.arange(x1, x2, 0.01)\n yRange3 = np.empty(xRange3.size)\n count = 1\n limit = yRange2.size - 1\n for x in np.nditer(xRange3):\n if count == limit:\n break\n yRange3[count] = diff2(yRange2[count - 1], yRange2[count + 1])\n count = count + 1\n np.delete(xRange3, -1)\n np.delete(yRange3, -1)\n xVal3 = xRange3.tolist()\n yVal3 = yRange3.tolist()\n ax1.plot(xVal3, yVal3, 'b', alpha=0.2)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n count = 1\n limit = len(yVal2) - 1\n for z in yVal3:\n if count == limit:\n break\n if yVal3[count - 1] < 0 and yVal3[count + 1] > 0:\n points1 = ax1.plot(xVal2[count], yVal1[count], marker='s',\n color='c')\n ax1.axvline(x=xVal2[count], linestyle='--')\n count = count + 1\n count = 1\n limit = len(yVal2) - 1\n for z in yVal3:\n if count == limit:\n break\n if yVal3[count - 1] > 0 and yVal3[count + 1] < 0:\n points1 = ax1.plot(xVal2[count], yVal1[count], marker='s',\n color='c')\n ax1.axvline(x=xVal2[count], linestyle='--')\n count = count + 1\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/poi.png'\n , bbox_inches='tight')\n plt.clf()\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n n, d = yParsed.as_numer_denom()\n undef = sympy.solve(d)\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n plt.xlim(x1, x2)\n plt.ylim(y1, y2)\n plt.autoscale(False)\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n n, d = yParsed.as_numer_denom()\n s = Symbol('s', real=True)\n undef = sympy.solve(d, s)\n for xc in undef:\n ax1.axvline(x=xc, linestyle='--')\n \"\"\"\n print(\"Integration x1:\")\n x1int = float(input())\n print(\"Integration x2:\")\n x2int = float(input())\n \"\"\"\n x1int = int(ftcVal1)\n x2int = int(ftcVal2)\n print('Processing...')\n sectionx = np.arange(x1int, x2int, 1e-05)\n sectiony = np.empty(sectionx.size)\n count = 0\n for x in np.nditer(sectionx):\n sectiony[count] = eval(y)\n count = count + 1\n plt.fill_between(sectionx, sectiony)\n global area\n area = 0\n count = 0\n limit = sectionx.size - 1\n for x in np.nditer(sectionx):\n if count == limit:\n break\n trapSum = trapz(sectiony[count], sectiony[count + 1])\n area = area + trapSum\n count = count + 1\n print(area)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/ftc.png'\n , bbox_inches='tight')\n\n\n<mask token>\n\n\ndef testFunc(inp):\n print('printing user input from testFunc - ' + inp)\n pass\n\n\[email protected]('/', methods=['GET', 'POST'])\[email protected]('/graph', methods=['GET', 'POST'])\ndef graph():\n if request.method == 'POST':\n func = request.form['Function']\n dVal1 = request.form['dVal1']\n dVal2 = request.form['dVal2']\n dVal3 = request.form['dVal3']\n dVal4 = request.form['dVal4']\n ftcVal1 = request.form['ftcVal1']\n ftcVal2 = request.form['ftcVal2']\n functionGraph(func, dVal1, dVal2, dVal3, dVal4, ftcVal1, ftcVal2)\n print('user input = ' + str(input))\n return render_template('graph.html')\n\n\n<mask token>\n\n\[email protected]('/input', methods=['GET', 'POST'])\ndef input():\n return render_template('input.html')\n\n\n<mask token>\n\n\[email protected]('/der2', methods=['GET', 'POST'])\ndef der2Graph():\n return render_template('graph3.html')\n\n\[email protected]('/relmax', methods=['GET', 'POST'])\ndef relmax():\n return render_template('relmax.html')\n\n\[email protected]('/relmin', methods=['GET', 'POST'])\ndef relmin():\n return render_template('relmin.html')\n\n\n<mask token>\n\n\[email protected]('/ftc', methods=['GET', 'POST'])\ndef ftc():\n global area\n return render_template('ftc.html', result=str(area))\n\n\[email protected]('/in1', methods=['GET', 'POST'])\ndef in1():\n return render_template('in1.html')\n\n\n<mask token>\n\n\[email protected]_request\ndef add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef functionGraph(function, dVal1, dVal2, dVal3, dVal4, ftcVal1, ftcVal2):\n print('printing user input from functionGraph - ' + function)\n print(dVal1, dVal2, dVal3, dVal4)\n x1 = -5\n x2 = 5\n print('1st input:')\n y = function\n\n def f(x):\n return eval(y)\n \"\"\"print(\"Domain Val 1:\")\n x1 = float(input())\n print(\"Domain Val 2:\")\n x2 = float(input())\n print(\"Range Val 1:\")\n y1 = float(input())\n print(\"Range Val 2:\")\n y2 = float(input())\n \"\"\"\n x1 = int(dVal1)\n x2 = int(dVal2)\n y1 = int(dVal3)\n y2 = int(dVal4)\n print('Processing...')\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n yParsed = parse_expr(y, evaluate=False)\n n, d = yParsed.as_numer_denom()\n undef = sympy.solve(d)\n numzero = sympy.solve(n)\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n plt.xlim(x1, x2)\n plt.ylim(y1, y2)\n plt.autoscale(False)\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n for x in undef:\n if x not in numzero:\n try:\n ax1.axvline(x=x, linestyle='--')\n except:\n pass\n else:\n x = x + 0.01\n ax1.plot(x, eval(y), 'o', markersize=7, markeredgewidth=1,\n markeredgecolor='g', markerfacecolor='None')\n count = 0\n \"\"\"for zero in numzero:\n if zero in undef:\n ax1.plot(zero, f(zero), marker='s', color='green')\n count = count + 1\"\"\"\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/graph.png'\n , bbox_inches='tight')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r', alpha=0.2)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n count = 1\n limit = len(yVal2) - 1\n for z in yVal2:\n if count == limit:\n break\n if yVal2[count - 1] > 0 and yVal2[count + 1] < 0:\n ax1.plot(xVal1[count], yVal1[count], marker='s', color='c')\n ax1.axvline(x=xVal1[count], linestyle='--')\n count = count + 1\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/relmax.png'\n , bbox_inches='tight')\n plt.clf()\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r', alpha=0.2)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n count = 1\n limit = len(yVal2) - 1\n for z in yVal2:\n if count == limit:\n break\n if yVal2[count - 1] < 0 and yVal2[count + 1] > 0:\n ax1.plot(xVal1[count], yVal1[count], marker='s', color='c')\n ax1.axvline(x=xVal1[count], linestyle='--')\n count = count + 1\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/relmin.png'\n , bbox_inches='tight')\n plt.clf()\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/deriv_graph.png'\n , bbox_inches='tight')\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange3 = np.arange(x1, x2, 0.01)\n yRange3 = np.empty(xRange3.size)\n \"\"\"for x in np.nditer(xRange3):\n yRange3[count] = diff2(y, x)\n count = count + 1\"\"\"\n count = 1\n limit = yRange2.size - 1\n for x in np.nditer(xRange3):\n if count == limit:\n break\n yRange3[count] = diff2(yRange2[count - 1], yRange2[count + 1])\n count = count + 1\n np.delete(xRange3, -1)\n np.delete(yRange3, -1)\n xVal3 = xRange3.tolist()\n yVal3 = yRange3.tolist()\n print('XXXXXXXXXX')\n for x in xVal3:\n print(x)\n print('YYYYYYYYYY')\n for yVal in yVal3:\n print(yVal)\n ax1.plot(xVal3, yVal3, 'b')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/deriv2_graph.png'\n , bbox_inches='tight')\n plt.clf\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r', alpha=0.2)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange3 = np.arange(x1, x2, 0.01)\n yRange3 = np.empty(xRange3.size)\n count = 1\n limit = yRange2.size - 1\n for x in np.nditer(xRange3):\n if count == limit:\n break\n yRange3[count] = diff2(yRange2[count - 1], yRange2[count + 1])\n count = count + 1\n np.delete(xRange3, -1)\n np.delete(yRange3, -1)\n xVal3 = xRange3.tolist()\n yVal3 = yRange3.tolist()\n ax1.plot(xVal3, yVal3, 'b', alpha=0.2)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n count = 1\n limit = len(yVal2) - 1\n for z in yVal3:\n if count == limit:\n break\n if yVal3[count - 1] < 0 and yVal3[count + 1] > 0:\n points1 = ax1.plot(xVal2[count], yVal1[count], marker='s',\n color='c')\n ax1.axvline(x=xVal2[count], linestyle='--')\n count = count + 1\n count = 1\n limit = len(yVal2) - 1\n for z in yVal3:\n if count == limit:\n break\n if yVal3[count - 1] > 0 and yVal3[count + 1] < 0:\n points1 = ax1.plot(xVal2[count], yVal1[count], marker='s',\n color='c')\n ax1.axvline(x=xVal2[count], linestyle='--')\n count = count + 1\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/poi.png'\n , bbox_inches='tight')\n plt.clf()\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n n, d = yParsed.as_numer_denom()\n undef = sympy.solve(d)\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n plt.xlim(x1, x2)\n plt.ylim(y1, y2)\n plt.autoscale(False)\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n n, d = yParsed.as_numer_denom()\n s = Symbol('s', real=True)\n undef = sympy.solve(d, s)\n for xc in undef:\n ax1.axvline(x=xc, linestyle='--')\n \"\"\"\n print(\"Integration x1:\")\n x1int = float(input())\n print(\"Integration x2:\")\n x2int = float(input())\n \"\"\"\n x1int = int(ftcVal1)\n x2int = int(ftcVal2)\n print('Processing...')\n sectionx = np.arange(x1int, x2int, 1e-05)\n sectiony = np.empty(sectionx.size)\n count = 0\n for x in np.nditer(sectionx):\n sectiony[count] = eval(y)\n count = count + 1\n plt.fill_between(sectionx, sectiony)\n global area\n area = 0\n count = 0\n limit = sectionx.size - 1\n for x in np.nditer(sectionx):\n if count == limit:\n break\n trapSum = trapz(sectiony[count], sectiony[count + 1])\n area = area + trapSum\n count = count + 1\n print(area)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/ftc.png'\n , bbox_inches='tight')\n\n\n<mask token>\n\n\ndef testFunc(inp):\n print('printing user input from testFunc - ' + inp)\n pass\n\n\[email protected]('/', methods=['GET', 'POST'])\[email protected]('/graph', methods=['GET', 'POST'])\ndef graph():\n if request.method == 'POST':\n func = request.form['Function']\n dVal1 = request.form['dVal1']\n dVal2 = request.form['dVal2']\n dVal3 = request.form['dVal3']\n dVal4 = request.form['dVal4']\n ftcVal1 = request.form['ftcVal1']\n ftcVal2 = request.form['ftcVal2']\n functionGraph(func, dVal1, dVal2, dVal3, dVal4, ftcVal1, ftcVal2)\n print('user input = ' + str(input))\n return render_template('graph.html')\n\n\n<mask token>\n\n\[email protected]('/input', methods=['GET', 'POST'])\ndef input():\n return render_template('input.html')\n\n\n<mask token>\n\n\[email protected]('/der', methods=['GET', 'POST'])\ndef derGraph():\n return render_template('graph2.html')\n\n\[email protected]('/der2', methods=['GET', 'POST'])\ndef der2Graph():\n return render_template('graph3.html')\n\n\[email protected]('/relmax', methods=['GET', 'POST'])\ndef relmax():\n return render_template('relmax.html')\n\n\[email protected]('/relmin', methods=['GET', 'POST'])\ndef relmin():\n return render_template('relmin.html')\n\n\[email protected]('/poi', methods=['GET', 'POST'])\ndef poi():\n return render_template('poi.html')\n\n\[email protected]('/ftc', methods=['GET', 'POST'])\ndef ftc():\n global area\n return render_template('ftc.html', result=str(area))\n\n\[email protected]('/in1', methods=['GET', 'POST'])\ndef in1():\n return render_template('in1.html')\n\n\[email protected]('/out1', methods=['GET', 'POST'])\ndef out1():\n return render_template('out1.html')\n\n\[email protected]_request\ndef add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef functionGraph(function, dVal1, dVal2, dVal3, dVal4, ftcVal1, ftcVal2):\n print('printing user input from functionGraph - ' + function)\n print(dVal1, dVal2, dVal3, dVal4)\n x1 = -5\n x2 = 5\n print('1st input:')\n y = function\n\n def f(x):\n return eval(y)\n \"\"\"print(\"Domain Val 1:\")\n x1 = float(input())\n print(\"Domain Val 2:\")\n x2 = float(input())\n print(\"Range Val 1:\")\n y1 = float(input())\n print(\"Range Val 2:\")\n y2 = float(input())\n \"\"\"\n x1 = int(dVal1)\n x2 = int(dVal2)\n y1 = int(dVal3)\n y2 = int(dVal4)\n print('Processing...')\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n yParsed = parse_expr(y, evaluate=False)\n n, d = yParsed.as_numer_denom()\n undef = sympy.solve(d)\n numzero = sympy.solve(n)\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n plt.xlim(x1, x2)\n plt.ylim(y1, y2)\n plt.autoscale(False)\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n for x in undef:\n if x not in numzero:\n try:\n ax1.axvline(x=x, linestyle='--')\n except:\n pass\n else:\n x = x + 0.01\n ax1.plot(x, eval(y), 'o', markersize=7, markeredgewidth=1,\n markeredgecolor='g', markerfacecolor='None')\n count = 0\n \"\"\"for zero in numzero:\n if zero in undef:\n ax1.plot(zero, f(zero), marker='s', color='green')\n count = count + 1\"\"\"\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/graph.png'\n , bbox_inches='tight')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r', alpha=0.2)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n count = 1\n limit = len(yVal2) - 1\n for z in yVal2:\n if count == limit:\n break\n if yVal2[count - 1] > 0 and yVal2[count + 1] < 0:\n ax1.plot(xVal1[count], yVal1[count], marker='s', color='c')\n ax1.axvline(x=xVal1[count], linestyle='--')\n count = count + 1\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/relmax.png'\n , bbox_inches='tight')\n plt.clf()\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r', alpha=0.2)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n count = 1\n limit = len(yVal2) - 1\n for z in yVal2:\n if count == limit:\n break\n if yVal2[count - 1] < 0 and yVal2[count + 1] > 0:\n ax1.plot(xVal1[count], yVal1[count], marker='s', color='c')\n ax1.axvline(x=xVal1[count], linestyle='--')\n count = count + 1\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/relmin.png'\n , bbox_inches='tight')\n plt.clf()\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/deriv_graph.png'\n , bbox_inches='tight')\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange3 = np.arange(x1, x2, 0.01)\n yRange3 = np.empty(xRange3.size)\n \"\"\"for x in np.nditer(xRange3):\n yRange3[count] = diff2(y, x)\n count = count + 1\"\"\"\n count = 1\n limit = yRange2.size - 1\n for x in np.nditer(xRange3):\n if count == limit:\n break\n yRange3[count] = diff2(yRange2[count - 1], yRange2[count + 1])\n count = count + 1\n np.delete(xRange3, -1)\n np.delete(yRange3, -1)\n xVal3 = xRange3.tolist()\n yVal3 = yRange3.tolist()\n print('XXXXXXXXXX')\n for x in xVal3:\n print(x)\n print('YYYYYYYYYY')\n for yVal in yVal3:\n print(yVal)\n ax1.plot(xVal3, yVal3, 'b')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/deriv2_graph.png'\n , bbox_inches='tight')\n plt.clf\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r', alpha=0.2)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange3 = np.arange(x1, x2, 0.01)\n yRange3 = np.empty(xRange3.size)\n count = 1\n limit = yRange2.size - 1\n for x in np.nditer(xRange3):\n if count == limit:\n break\n yRange3[count] = diff2(yRange2[count - 1], yRange2[count + 1])\n count = count + 1\n np.delete(xRange3, -1)\n np.delete(yRange3, -1)\n xVal3 = xRange3.tolist()\n yVal3 = yRange3.tolist()\n ax1.plot(xVal3, yVal3, 'b', alpha=0.2)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n count = 1\n limit = len(yVal2) - 1\n for z in yVal3:\n if count == limit:\n break\n if yVal3[count - 1] < 0 and yVal3[count + 1] > 0:\n points1 = ax1.plot(xVal2[count], yVal1[count], marker='s',\n color='c')\n ax1.axvline(x=xVal2[count], linestyle='--')\n count = count + 1\n count = 1\n limit = len(yVal2) - 1\n for z in yVal3:\n if count == limit:\n break\n if yVal3[count - 1] > 0 and yVal3[count + 1] < 0:\n points1 = ax1.plot(xVal2[count], yVal1[count], marker='s',\n color='c')\n ax1.axvline(x=xVal2[count], linestyle='--')\n count = count + 1\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/poi.png'\n , bbox_inches='tight')\n plt.clf()\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n n, d = yParsed.as_numer_denom()\n undef = sympy.solve(d)\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n plt.xlim(x1, x2)\n plt.ylim(y1, y2)\n plt.autoscale(False)\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n n, d = yParsed.as_numer_denom()\n s = Symbol('s', real=True)\n undef = sympy.solve(d, s)\n for xc in undef:\n ax1.axvline(x=xc, linestyle='--')\n \"\"\"\n print(\"Integration x1:\")\n x1int = float(input())\n print(\"Integration x2:\")\n x2int = float(input())\n \"\"\"\n x1int = int(ftcVal1)\n x2int = int(ftcVal2)\n print('Processing...')\n sectionx = np.arange(x1int, x2int, 1e-05)\n sectiony = np.empty(sectionx.size)\n count = 0\n for x in np.nditer(sectionx):\n sectiony[count] = eval(y)\n count = count + 1\n plt.fill_between(sectionx, sectiony)\n global area\n area = 0\n count = 0\n limit = sectionx.size - 1\n for x in np.nditer(sectionx):\n if count == limit:\n break\n trapSum = trapz(sectiony[count], sectiony[count + 1])\n area = area + trapSum\n count = count + 1\n print(area)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/ftc.png'\n , bbox_inches='tight')\n\n\nglobal area\n<mask token>\n\n\ndef testFunc(inp):\n print('printing user input from testFunc - ' + inp)\n pass\n\n\[email protected]('/', methods=['GET', 'POST'])\[email protected]('/graph', methods=['GET', 'POST'])\ndef graph():\n if request.method == 'POST':\n func = request.form['Function']\n dVal1 = request.form['dVal1']\n dVal2 = request.form['dVal2']\n dVal3 = request.form['dVal3']\n dVal4 = request.form['dVal4']\n ftcVal1 = request.form['ftcVal1']\n ftcVal2 = request.form['ftcVal2']\n functionGraph(func, dVal1, dVal2, dVal3, dVal4, ftcVal1, ftcVal2)\n print('user input = ' + str(input))\n return render_template('graph.html')\n\n\[email protected]('/home', methods=['GET', 'POST'])\ndef home():\n return render_template('home.html')\n\n\[email protected]('/input', methods=['GET', 'POST'])\ndef input():\n return render_template('input.html')\n\n\n<mask token>\n\n\[email protected]('/der', methods=['GET', 'POST'])\ndef derGraph():\n return render_template('graph2.html')\n\n\[email protected]('/der2', methods=['GET', 'POST'])\ndef der2Graph():\n return render_template('graph3.html')\n\n\[email protected]('/relmax', methods=['GET', 'POST'])\ndef relmax():\n return render_template('relmax.html')\n\n\[email protected]('/relmin', methods=['GET', 'POST'])\ndef relmin():\n return render_template('relmin.html')\n\n\[email protected]('/poi', methods=['GET', 'POST'])\ndef poi():\n return render_template('poi.html')\n\n\[email protected]('/ftc', methods=['GET', 'POST'])\ndef ftc():\n global area\n return render_template('ftc.html', result=str(area))\n\n\[email protected]('/in1', methods=['GET', 'POST'])\ndef in1():\n return render_template('in1.html')\n\n\[email protected]('/out1', methods=['GET', 'POST'])\ndef out1():\n return render_template('out1.html')\n\n\[email protected]_request\ndef add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=8080, debug=False)\n", "step-4": "<mask token>\napp = Flask(__name__)\napp.config['SEND_FILE_MAX_AGE_DEFAULT'] = 1\n\n\ndef functionGraph(function, dVal1, dVal2, dVal3, dVal4, ftcVal1, ftcVal2):\n print('printing user input from functionGraph - ' + function)\n print(dVal1, dVal2, dVal3, dVal4)\n x1 = -5\n x2 = 5\n print('1st input:')\n y = function\n\n def f(x):\n return eval(y)\n \"\"\"print(\"Domain Val 1:\")\n x1 = float(input())\n print(\"Domain Val 2:\")\n x2 = float(input())\n print(\"Range Val 1:\")\n y1 = float(input())\n print(\"Range Val 2:\")\n y2 = float(input())\n \"\"\"\n x1 = int(dVal1)\n x2 = int(dVal2)\n y1 = int(dVal3)\n y2 = int(dVal4)\n print('Processing...')\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n yParsed = parse_expr(y, evaluate=False)\n n, d = yParsed.as_numer_denom()\n undef = sympy.solve(d)\n numzero = sympy.solve(n)\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n plt.xlim(x1, x2)\n plt.ylim(y1, y2)\n plt.autoscale(False)\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n for x in undef:\n if x not in numzero:\n try:\n ax1.axvline(x=x, linestyle='--')\n except:\n pass\n else:\n x = x + 0.01\n ax1.plot(x, eval(y), 'o', markersize=7, markeredgewidth=1,\n markeredgecolor='g', markerfacecolor='None')\n count = 0\n \"\"\"for zero in numzero:\n if zero in undef:\n ax1.plot(zero, f(zero), marker='s', color='green')\n count = count + 1\"\"\"\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/graph.png'\n , bbox_inches='tight')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r', alpha=0.2)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n count = 1\n limit = len(yVal2) - 1\n for z in yVal2:\n if count == limit:\n break\n if yVal2[count - 1] > 0 and yVal2[count + 1] < 0:\n ax1.plot(xVal1[count], yVal1[count], marker='s', color='c')\n ax1.axvline(x=xVal1[count], linestyle='--')\n count = count + 1\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/relmax.png'\n , bbox_inches='tight')\n plt.clf()\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r', alpha=0.2)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n count = 1\n limit = len(yVal2) - 1\n for z in yVal2:\n if count == limit:\n break\n if yVal2[count - 1] < 0 and yVal2[count + 1] > 0:\n ax1.plot(xVal1[count], yVal1[count], marker='s', color='c')\n ax1.axvline(x=xVal1[count], linestyle='--')\n count = count + 1\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/relmin.png'\n , bbox_inches='tight')\n plt.clf()\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/deriv_graph.png'\n , bbox_inches='tight')\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange3 = np.arange(x1, x2, 0.01)\n yRange3 = np.empty(xRange3.size)\n \"\"\"for x in np.nditer(xRange3):\n yRange3[count] = diff2(y, x)\n count = count + 1\"\"\"\n count = 1\n limit = yRange2.size - 1\n for x in np.nditer(xRange3):\n if count == limit:\n break\n yRange3[count] = diff2(yRange2[count - 1], yRange2[count + 1])\n count = count + 1\n np.delete(xRange3, -1)\n np.delete(yRange3, -1)\n xVal3 = xRange3.tolist()\n yVal3 = yRange3.tolist()\n print('XXXXXXXXXX')\n for x in xVal3:\n print(x)\n print('YYYYYYYYYY')\n for yVal in yVal3:\n print(yVal)\n ax1.plot(xVal3, yVal3, 'b')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/deriv2_graph.png'\n , bbox_inches='tight')\n plt.clf\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r', alpha=0.2)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange3 = np.arange(x1, x2, 0.01)\n yRange3 = np.empty(xRange3.size)\n count = 1\n limit = yRange2.size - 1\n for x in np.nditer(xRange3):\n if count == limit:\n break\n yRange3[count] = diff2(yRange2[count - 1], yRange2[count + 1])\n count = count + 1\n np.delete(xRange3, -1)\n np.delete(yRange3, -1)\n xVal3 = xRange3.tolist()\n yVal3 = yRange3.tolist()\n ax1.plot(xVal3, yVal3, 'b', alpha=0.2)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n count = 1\n limit = len(yVal2) - 1\n for z in yVal3:\n if count == limit:\n break\n if yVal3[count - 1] < 0 and yVal3[count + 1] > 0:\n points1 = ax1.plot(xVal2[count], yVal1[count], marker='s',\n color='c')\n ax1.axvline(x=xVal2[count], linestyle='--')\n count = count + 1\n count = 1\n limit = len(yVal2) - 1\n for z in yVal3:\n if count == limit:\n break\n if yVal3[count - 1] > 0 and yVal3[count + 1] < 0:\n points1 = ax1.plot(xVal2[count], yVal1[count], marker='s',\n color='c')\n ax1.axvline(x=xVal2[count], linestyle='--')\n count = count + 1\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/poi.png'\n , bbox_inches='tight')\n plt.clf()\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n n, d = yParsed.as_numer_denom()\n undef = sympy.solve(d)\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k'\n )\n plt.xlim(x1, x2)\n plt.ylim(y1, y2)\n plt.autoscale(False)\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n n, d = yParsed.as_numer_denom()\n s = Symbol('s', real=True)\n undef = sympy.solve(d, s)\n for xc in undef:\n ax1.axvline(x=xc, linestyle='--')\n \"\"\"\n print(\"Integration x1:\")\n x1int = float(input())\n print(\"Integration x2:\")\n x2int = float(input())\n \"\"\"\n x1int = int(ftcVal1)\n x2int = int(ftcVal2)\n print('Processing...')\n sectionx = np.arange(x1int, x2int, 1e-05)\n sectiony = np.empty(sectionx.size)\n count = 0\n for x in np.nditer(sectionx):\n sectiony[count] = eval(y)\n count = count + 1\n plt.fill_between(sectionx, sectiony)\n global area\n area = 0\n count = 0\n limit = sectionx.size - 1\n for x in np.nditer(sectionx):\n if count == limit:\n break\n trapSum = trapz(sectiony[count], sectiony[count + 1])\n area = area + trapSum\n count = count + 1\n print(area)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig(\n '/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/ftc.png'\n , bbox_inches='tight')\n\n\nglobal area\nx1 = -5\nx2 = 5\nxRange1 = np.arange(x1, x2, 0.01)\n\n\ndef testFunc(inp):\n print('printing user input from testFunc - ' + inp)\n pass\n\n\[email protected]('/', methods=['GET', 'POST'])\[email protected]('/graph', methods=['GET', 'POST'])\ndef graph():\n if request.method == 'POST':\n func = request.form['Function']\n dVal1 = request.form['dVal1']\n dVal2 = request.form['dVal2']\n dVal3 = request.form['dVal3']\n dVal4 = request.form['dVal4']\n ftcVal1 = request.form['ftcVal1']\n ftcVal2 = request.form['ftcVal2']\n functionGraph(func, dVal1, dVal2, dVal3, dVal4, ftcVal1, ftcVal2)\n print('user input = ' + str(input))\n return render_template('graph.html')\n\n\[email protected]('/home', methods=['GET', 'POST'])\ndef home():\n return render_template('home.html')\n\n\[email protected]('/input', methods=['GET', 'POST'])\ndef input():\n return render_template('input.html')\n\n\n<mask token>\n\n\[email protected]('/der', methods=['GET', 'POST'])\ndef derGraph():\n return render_template('graph2.html')\n\n\[email protected]('/der2', methods=['GET', 'POST'])\ndef der2Graph():\n return render_template('graph3.html')\n\n\[email protected]('/relmax', methods=['GET', 'POST'])\ndef relmax():\n return render_template('relmax.html')\n\n\[email protected]('/relmin', methods=['GET', 'POST'])\ndef relmin():\n return render_template('relmin.html')\n\n\[email protected]('/poi', methods=['GET', 'POST'])\ndef poi():\n return render_template('poi.html')\n\n\[email protected]('/ftc', methods=['GET', 'POST'])\ndef ftc():\n global area\n return render_template('ftc.html', result=str(area))\n\n\[email protected]('/in1', methods=['GET', 'POST'])\ndef in1():\n return render_template('in1.html')\n\n\[email protected]('/out1', methods=['GET', 'POST'])\ndef out1():\n return render_template('out1.html')\n\n\[email protected]_request\ndef add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=8080, debug=False)\n", "step-5": "from flask import Flask, render_template, request\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sympy\nfrom DerivTest import diff, diff2, trapz\nfrom sympy.parsing.sympy_parser import parse_expr\nfrom sympy import Symbol\n#from ParsingClass import Parser\n#from scitools.StringFunction import StringFunction\n#from wtforms import Form, TextField, TextAreaField, validators, StringField, SubmitField\n\napp = Flask(__name__)\napp.config['SEND_FILE_MAX_AGE_DEFAULT'] = 1\n\ndef functionGraph(function, dVal1, dVal2, dVal3, dVal4, ftcVal1, ftcVal2):\n print(\"printing user input from functionGraph - \" + function)\n print(dVal1, dVal2, dVal3, dVal4)\n #parser = Parser()\n #x=np.array(range(10))\n x1 = -5;\n x2 = 5;\n print(\"1st input:\")\n y=function\n def f(x):\n return eval(y)\n '''print(\"Domain Val 1:\")\n x1 = float(input())\n print(\"Domain Val 2:\")\n x2 = float(input())\n print(\"Range Val 1:\")\n y1 = float(input())\n print(\"Range Val 2:\")\n y2 = float(input())\n '''\n\n x1=int(dVal1)\n x2=int(dVal2)\n y1=int(dVal3)\n y2=int(dVal4)\n\n print(\"Processing...\")\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n yParsed = parse_expr(y, evaluate=False)\n n, d = yParsed.as_numer_denom()\n #s = Symbol('s', real = True)\n undef = sympy.solve(d)\n numzero = sympy.solve(n)\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k')\n plt.xlim(x1, x2)\n plt.ylim(y1, y2)\n plt.autoscale(False)\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count+1\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2,2,1)\n ax1.plot(xVal1, yVal1, 'g')\n for x in undef:\n if x not in numzero:\n try:\n ax1.axvline(x=x, linestyle = '--')\n except:\n pass\n else:\n x=x+0.01\n ax1.plot(x, eval(y), \"o\", markersize=7, markeredgewidth=1, markeredgecolor='g',markerfacecolor='None')\n count = 0\n '''for zero in numzero:\n if zero in undef:\n ax1.plot(zero, f(zero), marker='s', color='green')\n count = count + 1'''\n #ax1.set_aspect('equal')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n #plt.axis([0,6,0,30])\n plt.savefig('/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/graph.png', bbox_inches = 'tight')\n\n #############################################\n # Relative Extrema\n #############################################\n\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r', alpha=0.2)\n # ax2.set_aspect('equal')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n count = 1\n limit = len(yVal2) - 1\n for z in yVal2:\n if count == limit:\n break\n if (yVal2[count - 1]>0 and yVal2[count + 1]<0):\n ax1.plot(xVal1[count], yVal1[count], marker='s', color='c')\n ax1.axvline(x=xVal1[count], linestyle='--')\n count = count + 1\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig('/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/relmax.png', bbox_inches='tight')\n plt.clf()\n\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k')\n\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1,'g')\n # ax1.set_aspect('equal')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r', alpha=0.2)\n # ax2.set_aspect('equal')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n count = 1\n limit = len(yVal2) - 1\n for z in yVal2:\n if count == limit:\n break\n if (yVal2[count - 1] < 0 and yVal2[count + 1] > 0):\n ax1.plot(xVal1[count], yVal1[count], marker='s', color='c')\n ax1.axvline(x=xVal1[count], linestyle='--')\n count = count + 1\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig('/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/relmin.png', bbox_inches='tight')\n plt.clf()\n\n\n #############################################\n # First Derivative\n #############################################\n\n xRange1 = np.arange(x1,x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count+1\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k')\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2,2,1)\n ax1.plot(xVal1, yVal1, 'g')\n #ax1.set_aspect('equal')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y,x)\n count = count+1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r')\n #ax2.set_aspect('equal')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig('/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/deriv_graph.png', bbox_inches = 'tight')\n\n #############################################\n # SECOND DERIVATIVE\n #############################################\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k')\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n # ax1.set_aspect('equal')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange3 = np.arange(x1, x2, 0.01)\n yRange3 = np.empty(xRange3.size)\n '''for x in np.nditer(xRange3):\n yRange3[count] = diff2(y, x)\n count = count + 1'''\n count = 1\n limit = yRange2.size-1\n for x in np.nditer(xRange3):\n if count == limit:\n break\n yRange3[count] = diff2(yRange2[count-1], yRange2[count+1])\n count = count + 1\n np.delete(xRange3, -1)\n np.delete(yRange3, -1)\n xVal3 = xRange3.tolist()\n yVal3 = yRange3.tolist()\n print(\"XXXXXXXXXX\")\n for x in xVal3:\n print (x)\n print(\"YYYYYYYYYY\")\n for yVal in yVal3:\n print (yVal)\n ax1.plot(xVal3, yVal3, 'b')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig('/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/deriv2_graph.png', bbox_inches='tight')\n plt.clf\n #############################################\n #POINTS OF INFLECTION\n #############################################\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k')\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange2 = np.arange(x1, x2, 0.01)\n count = 0\n yRange2 = np.empty(xRange2.size)\n for x in np.nditer(xRange2):\n yRange2[count] = diff(y, x)\n count = count + 1\n xVal2 = xRange2.tolist()\n yVal2 = yRange2.tolist()\n ax1.plot(xVal2, yVal2, 'r', alpha=0.2)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n xRange3 = np.arange(x1, x2, 0.01)\n yRange3 = np.empty(xRange3.size)\n count = 1\n limit = yRange2.size - 1\n for x in np.nditer(xRange3):\n if count == limit:\n break\n yRange3[count] = diff2(yRange2[count - 1], yRange2[count + 1])\n count = count + 1\n np.delete(xRange3, -1)\n np.delete(yRange3, -1)\n xVal3 = xRange3.tolist()\n yVal3 = yRange3.tolist()\n ax1.plot(xVal3, yVal3, 'b', alpha=0.2)\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n\n count = 1\n limit = len(yVal2) - 1\n for z in yVal3:\n if count == limit:\n break\n if yVal3[count - 1] < 0 and yVal3[count + 1] > 0:\n points1 = ax1.plot(xVal2[count], yVal1[count], marker='s', color='c')\n ax1.axvline(x=xVal2[count], linestyle='--')\n count = count + 1\n count = 1\n limit = len(yVal2) - 1\n for z in yVal3:\n if count == limit:\n break\n if yVal3[count - 1] > 0 and yVal3[count + 1] < 0:\n points1 = ax1.plot(xVal2[count], yVal1[count], marker='s', color='c')\n ax1.axvline(x=xVal2[count], linestyle='--')\n count = count + 1\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig('/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/poi.png', bbox_inches='tight')\n plt.clf()\n\n #############################################\n # FTC\n #############################################\n xRange1 = np.arange(x1, x2, 0.01)\n yRange1 = np.empty(xRange1.size)\n count = 0\n n, d = yParsed.as_numer_denom()\n undef = sympy.solve(d)\n plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k')\n plt.xlim(x1, x2)\n plt.ylim(y1, y2)\n plt.autoscale(False)\n for x in np.nditer(xRange1):\n yRange1[count] = eval(y)\n count = count + 1\n xVal1 = xRange1.tolist()\n yVal1 = yRange1.tolist()\n ax1 = plt.subplot(2, 2, 1)\n ax1.plot(xVal1, yVal1, 'g')\n n, d = yParsed.as_numer_denom()\n s = Symbol('s', real=True)\n undef = sympy.solve(d, s)\n for xc in undef:\n ax1.axvline(x=xc, linestyle='--')\n '''\n print(\"Integration x1:\")\n x1int = float(input())\n print(\"Integration x2:\")\n x2int = float(input())\n '''\n x1int = int(ftcVal1)\n x2int = int(ftcVal2)\n print(\"Processing...\")\n sectionx = np.arange(x1int, x2int, 0.00001)\n sectiony = np.empty(sectionx.size)\n count = 0\n for x in np.nditer(sectionx):\n sectiony[count] = eval(y)\n count = count+1\n plt.fill_between(sectionx, sectiony)\n global area\n area = 0\n count = 0\n limit = sectionx.size-1\n for x in np.nditer(sectionx):\n if(count == limit):\n break\n trapSum = trapz(sectiony[count], sectiony[count+1])\n area = area + trapSum\n count = count + 1\n print(area)\n # ax1.set_aspect('equal')\n ax1.grid(True, which='both')\n ax1.axhline(y=0, color='k')\n ax1.axvline(x=0, color='k')\n if d == 1:\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.xlim(left=x1, right=x2)\n plt.ylim(top=y2, bottom=y1)\n plt.savefig('/Users/pranav/PycharmProjects/Main/GraphCalcImplementation/static/images/ftc.png', bbox_inches='tight')\n\nglobal area\n\nx1 = -5;\nx2 = 5;\nxRange1 = np.arange(x1,x2, 0.01)\n#print(\"1st input\")\n#y=input()\n#yParsed = parse_expr(y, evaluate=False)\n#functionGraph(y)\n\ndef testFunc(inp):\n print(\"printing user input from testFunc - \" +inp)\n pass\n\n##############################################\n#works on CHROME ONLY, caching issue in Safari\n##############################################\n\[email protected]('/', methods=['GET', 'POST'])\[email protected]('/graph', methods=['GET', 'POST'])\ndef graph():\n if request.method == 'POST':\n func = request.form['Function']\n dVal1 = request.form['dVal1']\n dVal2 = request.form['dVal2']\n dVal3 = request.form['dVal3']\n dVal4 = request.form['dVal4']\n\n ftcVal1 = request.form['ftcVal1']\n ftcVal2 = request.form['ftcVal2']\n\n functionGraph(func, dVal1, dVal2, dVal3, dVal4, ftcVal1, ftcVal2)\n\n print(\"user input = \" +str(input))\n\n\n #testFunc(input)\n return render_template(\"graph.html\")\n #return render_template(\"graph.html\", result=input)\n\n\[email protected]('/home', methods=['GET', 'POST'])\ndef home():\n return render_template('home.html')\n\[email protected]('/input', methods=['GET', 'POST'])\ndef input():\n return render_template('input.html')\n\n'''@app.route('/input', methods=['GET', 'POST'])\ndef input_post():\n if request.method == 'POST':\n result = request.form['Function']\n print(result)\n return render_template(\"graph.html\", result=result)'''\n\[email protected]('/der', methods=['GET', 'POST'])\ndef derGraph():\n return render_template('graph2.html')\n\[email protected]('/der2', methods=['GET', 'POST'])\ndef der2Graph():\n return render_template('graph3.html')\n\[email protected]('/relmax', methods=['GET', 'POST'])\ndef relmax():\n return render_template('relmax.html')\n\[email protected]('/relmin', methods=['GET', 'POST'])\ndef relmin():\n return render_template('relmin.html')\n\[email protected]('/poi', methods=['GET', 'POST'])\ndef poi():\n return render_template('poi.html')\n\[email protected]('/ftc', methods=['GET', 'POST'])\ndef ftc():\n global area\n return render_template('ftc.html', result = str(area))\n\[email protected]('/in1', methods=['GET', 'POST'])\ndef in1():\n return render_template('in1.html')\n\[email protected]('/out1', methods=['GET', 'POST'])\ndef out1():\n return render_template('out1.html')\n\[email protected]_request\ndef add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=8080, debug=False)\n\n\n", "step-ids": [ 10, 13, 15, 16, 18 ] }
[ 10, 13, 15, 16, 18 ]
from .gunicorn import * from .server_app import *
normal
{ "blob_id": "ed5dd954dedb00bf645f9ca14b5ca9cd122b2adc", "index": 6183, "step-1": "<mask token>\n", "step-2": "from .gunicorn import *\nfrom .server_app import *\n", "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0, 1 ] }
[ 0, 1 ]
from django.db import models from django.utils import timezone from django.db.models.signals import post_save from django.urls import reverse # Create your models here. class Purchase(models.Model): invoice = models.SmallIntegerField(primary_key=True,blank=False) ch_no = models.SmallIntegerField(blank=True,null=True) vendor = models.CharField(max_length=128, blank=False) date = models.DateTimeField(default=timezone.now, blank=False) description = models.TextField(max_length=4096, blank=True, null=True) def __str__(self): return self.vendor def get_absolute_url(self): return reverse('entry:purchase_detail', kwargs={'pk': self.pk}) class PurchaseDetail(models.Model): PRODUCT_CHOICES = ( ('WOOD', 'Wood'), ('GLASS', 'Glass'), ('PLASTIC', 'Plastic'), ('LEATHER', 'Leather'), ('FABRIC','Fabric'), ('STEEL', 'Steel'), ) purchase= models.ForeignKey(Purchase,on_delete=models.CASCADE) product_name = models.CharField(max_length=30, choices=PRODUCT_CHOICES, default='WOOD') quantity = models.PositiveSmallIntegerField(blank=False) rate = models.IntegerField(blank=False) total = models.IntegerField(blank=False) remarks = models.CharField(max_length=250) def _get_total(self): return self.quantity * self.rate labor_total = property(_get_total) def __str__(self): return (self.product_name)
normal
{ "blob_id": "bb3c42c9f87a463b9f18601c9e3897b6d21351d5", "index": 7356, "step-1": "<mask token>\n\n\nclass PurchaseDetail(models.Model):\n PRODUCT_CHOICES = ('WOOD', 'Wood'), ('GLASS', 'Glass'), ('PLASTIC',\n 'Plastic'), ('LEATHER', 'Leather'), ('FABRIC', 'Fabric'), ('STEEL',\n 'Steel')\n purchase = models.ForeignKey(Purchase, on_delete=models.CASCADE)\n product_name = models.CharField(max_length=30, choices=PRODUCT_CHOICES,\n default='WOOD')\n quantity = models.PositiveSmallIntegerField(blank=False)\n rate = models.IntegerField(blank=False)\n total = models.IntegerField(blank=False)\n remarks = models.CharField(max_length=250)\n\n def _get_total(self):\n return self.quantity * self.rate\n labor_total = property(_get_total)\n\n def __str__(self):\n return self.product_name\n", "step-2": "<mask token>\n\n\nclass Purchase(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def get_absolute_url(self):\n return reverse('entry:purchase_detail', kwargs={'pk': self.pk})\n\n\nclass PurchaseDetail(models.Model):\n PRODUCT_CHOICES = ('WOOD', 'Wood'), ('GLASS', 'Glass'), ('PLASTIC',\n 'Plastic'), ('LEATHER', 'Leather'), ('FABRIC', 'Fabric'), ('STEEL',\n 'Steel')\n purchase = models.ForeignKey(Purchase, on_delete=models.CASCADE)\n product_name = models.CharField(max_length=30, choices=PRODUCT_CHOICES,\n default='WOOD')\n quantity = models.PositiveSmallIntegerField(blank=False)\n rate = models.IntegerField(blank=False)\n total = models.IntegerField(blank=False)\n remarks = models.CharField(max_length=250)\n\n def _get_total(self):\n return self.quantity * self.rate\n labor_total = property(_get_total)\n\n def __str__(self):\n return self.product_name\n", "step-3": "<mask token>\n\n\nclass Purchase(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return self.vendor\n\n def get_absolute_url(self):\n return reverse('entry:purchase_detail', kwargs={'pk': self.pk})\n\n\nclass PurchaseDetail(models.Model):\n PRODUCT_CHOICES = ('WOOD', 'Wood'), ('GLASS', 'Glass'), ('PLASTIC',\n 'Plastic'), ('LEATHER', 'Leather'), ('FABRIC', 'Fabric'), ('STEEL',\n 'Steel')\n purchase = models.ForeignKey(Purchase, on_delete=models.CASCADE)\n product_name = models.CharField(max_length=30, choices=PRODUCT_CHOICES,\n default='WOOD')\n quantity = models.PositiveSmallIntegerField(blank=False)\n rate = models.IntegerField(blank=False)\n total = models.IntegerField(blank=False)\n remarks = models.CharField(max_length=250)\n\n def _get_total(self):\n return self.quantity * self.rate\n labor_total = property(_get_total)\n\n def __str__(self):\n return self.product_name\n", "step-4": "from django.db import models\nfrom django.utils import timezone\nfrom django.db.models.signals import post_save\nfrom django.urls import reverse\n\n\nclass Purchase(models.Model):\n invoice = models.SmallIntegerField(primary_key=True, blank=False)\n ch_no = models.SmallIntegerField(blank=True, null=True)\n vendor = models.CharField(max_length=128, blank=False)\n date = models.DateTimeField(default=timezone.now, blank=False)\n description = models.TextField(max_length=4096, blank=True, null=True)\n\n def __str__(self):\n return self.vendor\n\n def get_absolute_url(self):\n return reverse('entry:purchase_detail', kwargs={'pk': self.pk})\n\n\nclass PurchaseDetail(models.Model):\n PRODUCT_CHOICES = ('WOOD', 'Wood'), ('GLASS', 'Glass'), ('PLASTIC',\n 'Plastic'), ('LEATHER', 'Leather'), ('FABRIC', 'Fabric'), ('STEEL',\n 'Steel')\n purchase = models.ForeignKey(Purchase, on_delete=models.CASCADE)\n product_name = models.CharField(max_length=30, choices=PRODUCT_CHOICES,\n default='WOOD')\n quantity = models.PositiveSmallIntegerField(blank=False)\n rate = models.IntegerField(blank=False)\n total = models.IntegerField(blank=False)\n remarks = models.CharField(max_length=250)\n\n def _get_total(self):\n return self.quantity * self.rate\n labor_total = property(_get_total)\n\n def __str__(self):\n return self.product_name\n", "step-5": "from django.db import models\nfrom django.utils import timezone\nfrom django.db.models.signals import post_save\nfrom django.urls import reverse\n# Create your models here.\n\nclass Purchase(models.Model):\n invoice = models.SmallIntegerField(primary_key=True,blank=False)\n ch_no = models.SmallIntegerField(blank=True,null=True)\n vendor = models.CharField(max_length=128, blank=False)\n date = models.DateTimeField(default=timezone.now, blank=False)\n description = models.TextField(max_length=4096, blank=True, null=True)\n\n def __str__(self):\n return self.vendor\n\n def get_absolute_url(self):\n return reverse('entry:purchase_detail', kwargs={'pk': self.pk})\n\n\n\nclass PurchaseDetail(models.Model):\n \n PRODUCT_CHOICES = (\n ('WOOD', 'Wood'),\n ('GLASS', 'Glass'),\n ('PLASTIC', 'Plastic'),\n ('LEATHER', 'Leather'),\n ('FABRIC','Fabric'),\n ('STEEL', 'Steel'),\n )\n purchase= models.ForeignKey(Purchase,on_delete=models.CASCADE)\n product_name = models.CharField(max_length=30,\n choices=PRODUCT_CHOICES,\n default='WOOD')\n quantity = models.PositiveSmallIntegerField(blank=False)\n rate = models.IntegerField(blank=False)\n total = models.IntegerField(blank=False)\n remarks = models.CharField(max_length=250)\n\n def _get_total(self):\n return self.quantity * self.rate\n labor_total = property(_get_total)\n def __str__(self):\n return (self.product_name)\n ", "step-ids": [ 4, 6, 7, 9, 10 ] }
[ 4, 6, 7, 9, 10 ]
import matplotlib.image as mpimg import cv2 import rasterio from ode_data_access.image_utils import view_as_blocks, is_black, align_and_crop import os import numpy as np from tqdm import tqdm class ChunkProcessor: def write_result_blocks(self, result_blocks, window, product_name, chunk_size, save_dir='test', skip_black_images=False, align_and_crop_thresholds=None, vectorized_chunks=None): for i in range(result_blocks.shape[0]): for j in range(result_blocks.shape[1]): img = result_blocks[i][j] if not skip_black_images or not is_black(img): filename = f'{product_name}_img_row_{window.row_off}_col_{window.col_off}_w_{window.width}_h_{window.height}_x_{i}_y_{j}.jpg' filepath = './' + save_dir + '/' + filename mpimg.imsave(filepath, img, cmap="gray") img = mpimg.imread(filepath) if align_and_crop_thresholds is not None: img = align_and_crop(img, *align_and_crop_thresholds) img = cv2.resize(img, (chunk_size, chunk_size), cv2.INTER_AREA) mpimg.imsave(filepath, img, cmap='gray') new_filename = f'{product_name}_img_row_{window.row_off}_col_{window.col_off}_w_{img.shape[1]}_h_{img.shape[0]}_x_{i}_y_{j}.jpg' new_filepath = './' + save_dir + '/' + new_filename os.rename(filepath, new_filepath) if vectorized_chunks is not None: img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) vectorized_chunks.append(img.astype(np.uint8)) # Based on the idea provided here - https://gis.stackexchange.com/questions/158527/reading-raster-files-by-block-with-rasterio def chunkify(self, img_file, product_name, chunk_size=256, save_dir='test', skip_black_images=True, align_and_crop_thresholds=None, vectorized_chunks=None): with rasterio.open(img_file) as src: print('Resolution =', src.width, 'x', src.height) print('Estimated number of iterations =', ((src.width * src.height) / (1024 * 1024)) * 1.085) for block_index, window in tqdm(src.block_windows(1)): block_array = src.read(window=window) # print('Block array', block_array.shape) block_array = np.moveaxis(block_array, 0, -1) # print('Move axis', block_array.shape) if block_array.shape[2] != 1: block_array = cv2.cvtColor(block_array, cv2.COLOR_RGB2GRAY) else: block_array = np.squeeze(block_array) block_array_shape = block_array.shape # plt.imshow(block_array, cmap='gray') # print('Grayscale Block Shape', block_array_shape) if block_array_shape[0] % chunk_size == 0 and block_array_shape[1] % chunk_size == 0: result_blocks = view_as_blocks(block_array, block_shape=(chunk_size, chunk_size)) self.write_result_blocks(result_blocks, window, product_name, chunk_size, save_dir, skip_black_images, align_and_crop_thresholds, vectorized_chunks) def chunkify_all(self, save_dir_prefix, chunk_size, product_image_urls, skip_black_images=True, align_and_crop_thresholds=None, vectorized_chunks=None): for product_image_url, product_name in product_image_urls: filename = product_image_url.split('/')[-1] if filename.endswith('JP2') or filename.lower().endswith('jpg'): print('Chunkifying', product_name) jp2_filename = filename chunk_dir = save_dir_prefix + '_' + product_name if not os.path.exists(chunk_dir): os.makedirs(chunk_dir) self.chunkify(jp2_filename, product_name, chunk_size, chunk_dir, skip_black_images, align_and_crop_thresholds, vectorized_chunks) print("Number of chunks found:", len([name for name in os.listdir(chunk_dir) if os.path.isfile(chunk_dir + '/' + name)])) print('-----')
normal
{ "blob_id": "303e1b95c2ca60041a34b8c09e013849112a108d", "index": 3475, "step-1": "<mask token>\n\n\nclass ChunkProcessor:\n <mask token>\n <mask token>\n <mask token>\n", "step-2": "<mask token>\n\n\nclass ChunkProcessor:\n <mask token>\n\n def chunkify(self, img_file, product_name, chunk_size=256, save_dir=\n 'test', skip_black_images=True, align_and_crop_thresholds=None,\n vectorized_chunks=None):\n with rasterio.open(img_file) as src:\n print('Resolution =', src.width, 'x', src.height)\n print('Estimated number of iterations =', src.width * src.\n height / (1024 * 1024) * 1.085)\n for block_index, window in tqdm(src.block_windows(1)):\n block_array = src.read(window=window)\n block_array = np.moveaxis(block_array, 0, -1)\n if block_array.shape[2] != 1:\n block_array = cv2.cvtColor(block_array, cv2.COLOR_RGB2GRAY)\n else:\n block_array = np.squeeze(block_array)\n block_array_shape = block_array.shape\n if block_array_shape[0\n ] % chunk_size == 0 and block_array_shape[1\n ] % chunk_size == 0:\n result_blocks = view_as_blocks(block_array, block_shape\n =(chunk_size, chunk_size))\n self.write_result_blocks(result_blocks, window,\n product_name, chunk_size, save_dir,\n skip_black_images, align_and_crop_thresholds,\n vectorized_chunks)\n <mask token>\n", "step-3": "<mask token>\n\n\nclass ChunkProcessor:\n\n def write_result_blocks(self, result_blocks, window, product_name,\n chunk_size, save_dir='test', skip_black_images=False,\n align_and_crop_thresholds=None, vectorized_chunks=None):\n for i in range(result_blocks.shape[0]):\n for j in range(result_blocks.shape[1]):\n img = result_blocks[i][j]\n if not skip_black_images or not is_black(img):\n filename = (\n f'{product_name}_img_row_{window.row_off}_col_{window.col_off}_w_{window.width}_h_{window.height}_x_{i}_y_{j}.jpg'\n )\n filepath = './' + save_dir + '/' + filename\n mpimg.imsave(filepath, img, cmap='gray')\n img = mpimg.imread(filepath)\n if align_and_crop_thresholds is not None:\n img = align_and_crop(img, *align_and_crop_thresholds)\n img = cv2.resize(img, (chunk_size, chunk_size), cv2\n .INTER_AREA)\n mpimg.imsave(filepath, img, cmap='gray')\n new_filename = (\n f'{product_name}_img_row_{window.row_off}_col_{window.col_off}_w_{img.shape[1]}_h_{img.shape[0]}_x_{i}_y_{j}.jpg'\n )\n new_filepath = './' + save_dir + '/' + new_filename\n os.rename(filepath, new_filepath)\n if vectorized_chunks is not None:\n img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n vectorized_chunks.append(img.astype(np.uint8))\n\n def chunkify(self, img_file, product_name, chunk_size=256, save_dir=\n 'test', skip_black_images=True, align_and_crop_thresholds=None,\n vectorized_chunks=None):\n with rasterio.open(img_file) as src:\n print('Resolution =', src.width, 'x', src.height)\n print('Estimated number of iterations =', src.width * src.\n height / (1024 * 1024) * 1.085)\n for block_index, window in tqdm(src.block_windows(1)):\n block_array = src.read(window=window)\n block_array = np.moveaxis(block_array, 0, -1)\n if block_array.shape[2] != 1:\n block_array = cv2.cvtColor(block_array, cv2.COLOR_RGB2GRAY)\n else:\n block_array = np.squeeze(block_array)\n block_array_shape = block_array.shape\n if block_array_shape[0\n ] % chunk_size == 0 and block_array_shape[1\n ] % chunk_size == 0:\n result_blocks = view_as_blocks(block_array, block_shape\n =(chunk_size, chunk_size))\n self.write_result_blocks(result_blocks, window,\n product_name, chunk_size, save_dir,\n skip_black_images, align_and_crop_thresholds,\n vectorized_chunks)\n <mask token>\n", "step-4": "import matplotlib.image as mpimg\nimport cv2\nimport rasterio\nfrom ode_data_access.image_utils import view_as_blocks, is_black, align_and_crop\nimport os\nimport numpy as np\nfrom tqdm import tqdm\n\n\nclass ChunkProcessor:\n\n def write_result_blocks(self, result_blocks, window, product_name,\n chunk_size, save_dir='test', skip_black_images=False,\n align_and_crop_thresholds=None, vectorized_chunks=None):\n for i in range(result_blocks.shape[0]):\n for j in range(result_blocks.shape[1]):\n img = result_blocks[i][j]\n if not skip_black_images or not is_black(img):\n filename = (\n f'{product_name}_img_row_{window.row_off}_col_{window.col_off}_w_{window.width}_h_{window.height}_x_{i}_y_{j}.jpg'\n )\n filepath = './' + save_dir + '/' + filename\n mpimg.imsave(filepath, img, cmap='gray')\n img = mpimg.imread(filepath)\n if align_and_crop_thresholds is not None:\n img = align_and_crop(img, *align_and_crop_thresholds)\n img = cv2.resize(img, (chunk_size, chunk_size), cv2\n .INTER_AREA)\n mpimg.imsave(filepath, img, cmap='gray')\n new_filename = (\n f'{product_name}_img_row_{window.row_off}_col_{window.col_off}_w_{img.shape[1]}_h_{img.shape[0]}_x_{i}_y_{j}.jpg'\n )\n new_filepath = './' + save_dir + '/' + new_filename\n os.rename(filepath, new_filepath)\n if vectorized_chunks is not None:\n img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n vectorized_chunks.append(img.astype(np.uint8))\n\n def chunkify(self, img_file, product_name, chunk_size=256, save_dir=\n 'test', skip_black_images=True, align_and_crop_thresholds=None,\n vectorized_chunks=None):\n with rasterio.open(img_file) as src:\n print('Resolution =', src.width, 'x', src.height)\n print('Estimated number of iterations =', src.width * src.\n height / (1024 * 1024) * 1.085)\n for block_index, window in tqdm(src.block_windows(1)):\n block_array = src.read(window=window)\n block_array = np.moveaxis(block_array, 0, -1)\n if block_array.shape[2] != 1:\n block_array = cv2.cvtColor(block_array, cv2.COLOR_RGB2GRAY)\n else:\n block_array = np.squeeze(block_array)\n block_array_shape = block_array.shape\n if block_array_shape[0\n ] % chunk_size == 0 and block_array_shape[1\n ] % chunk_size == 0:\n result_blocks = view_as_blocks(block_array, block_shape\n =(chunk_size, chunk_size))\n self.write_result_blocks(result_blocks, window,\n product_name, chunk_size, save_dir,\n skip_black_images, align_and_crop_thresholds,\n vectorized_chunks)\n\n def chunkify_all(self, save_dir_prefix, chunk_size, product_image_urls,\n skip_black_images=True, align_and_crop_thresholds=None,\n vectorized_chunks=None):\n for product_image_url, product_name in product_image_urls:\n filename = product_image_url.split('/')[-1]\n if filename.endswith('JP2') or filename.lower().endswith('jpg'):\n print('Chunkifying', product_name)\n jp2_filename = filename\n chunk_dir = save_dir_prefix + '_' + product_name\n if not os.path.exists(chunk_dir):\n os.makedirs(chunk_dir)\n self.chunkify(jp2_filename, product_name, chunk_size,\n chunk_dir, skip_black_images, align_and_crop_thresholds,\n vectorized_chunks)\n print('Number of chunks found:', len([name for name in os.\n listdir(chunk_dir) if os.path.isfile(chunk_dir + '/' +\n name)]))\n print('-----')\n", "step-5": "import matplotlib.image as mpimg\r\nimport cv2\r\nimport rasterio\r\nfrom ode_data_access.image_utils import view_as_blocks, is_black, align_and_crop\r\nimport os\r\nimport numpy as np\r\nfrom tqdm import tqdm\r\n\r\n\r\nclass ChunkProcessor:\r\n\r\n def write_result_blocks(self, result_blocks, window, product_name, chunk_size, save_dir='test', skip_black_images=False,\r\n align_and_crop_thresholds=None, vectorized_chunks=None):\r\n for i in range(result_blocks.shape[0]):\r\n for j in range(result_blocks.shape[1]):\r\n img = result_blocks[i][j]\r\n if not skip_black_images or not is_black(img):\r\n filename = f'{product_name}_img_row_{window.row_off}_col_{window.col_off}_w_{window.width}_h_{window.height}_x_{i}_y_{j}.jpg'\r\n filepath = './' + save_dir + '/' + filename\r\n mpimg.imsave(filepath, img, cmap=\"gray\")\r\n img = mpimg.imread(filepath)\r\n\r\n if align_and_crop_thresholds is not None:\r\n img = align_and_crop(img, *align_and_crop_thresholds)\r\n img = cv2.resize(img, (chunk_size, chunk_size), cv2.INTER_AREA)\r\n mpimg.imsave(filepath, img, cmap='gray')\r\n new_filename = f'{product_name}_img_row_{window.row_off}_col_{window.col_off}_w_{img.shape[1]}_h_{img.shape[0]}_x_{i}_y_{j}.jpg'\r\n new_filepath = './' + save_dir + '/' + new_filename\r\n os.rename(filepath, new_filepath)\r\n\r\n if vectorized_chunks is not None:\r\n img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\r\n vectorized_chunks.append(img.astype(np.uint8))\r\n\r\n\r\n # Based on the idea provided here - https://gis.stackexchange.com/questions/158527/reading-raster-files-by-block-with-rasterio\r\n def chunkify(self, img_file, product_name, chunk_size=256, save_dir='test', skip_black_images=True, align_and_crop_thresholds=None,\r\n vectorized_chunks=None):\r\n with rasterio.open(img_file) as src:\r\n print('Resolution =', src.width, 'x', src.height)\r\n print('Estimated number of iterations =', ((src.width * src.height) / (1024 * 1024)) * 1.085)\r\n\r\n for block_index, window in tqdm(src.block_windows(1)):\r\n block_array = src.read(window=window)\r\n # print('Block array', block_array.shape)\r\n\r\n block_array = np.moveaxis(block_array, 0, -1)\r\n # print('Move axis', block_array.shape)\r\n\r\n if block_array.shape[2] != 1:\r\n block_array = cv2.cvtColor(block_array, cv2.COLOR_RGB2GRAY)\r\n else:\r\n block_array = np.squeeze(block_array)\r\n block_array_shape = block_array.shape\r\n\r\n # plt.imshow(block_array, cmap='gray')\r\n # print('Grayscale Block Shape', block_array_shape)\r\n\r\n if block_array_shape[0] % chunk_size == 0 and block_array_shape[1] % chunk_size == 0:\r\n result_blocks = view_as_blocks(block_array, block_shape=(chunk_size, chunk_size))\r\n self.write_result_blocks(result_blocks, window, product_name, chunk_size, save_dir, skip_black_images,\r\n align_and_crop_thresholds, vectorized_chunks)\r\n\r\n\r\n def chunkify_all(self, save_dir_prefix, chunk_size, product_image_urls, skip_black_images=True, align_and_crop_thresholds=None,\r\n vectorized_chunks=None):\r\n\r\n for product_image_url, product_name in product_image_urls:\r\n filename = product_image_url.split('/')[-1]\r\n if filename.endswith('JP2') or filename.lower().endswith('jpg'):\r\n print('Chunkifying', product_name)\r\n jp2_filename = filename\r\n chunk_dir = save_dir_prefix + '_' + product_name\r\n\r\n if not os.path.exists(chunk_dir):\r\n os.makedirs(chunk_dir)\r\n\r\n self.chunkify(jp2_filename, product_name, chunk_size, chunk_dir, skip_black_images, align_and_crop_thresholds,\r\n vectorized_chunks)\r\n\r\n print(\"Number of chunks found:\",\r\n len([name for name in os.listdir(chunk_dir) if os.path.isfile(chunk_dir + '/' + name)]))\r\n print('-----')", "step-ids": [ 1, 2, 3, 5, 6 ] }
[ 1, 2, 3, 5, 6 ]