body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
---|---|---|---|---|---|---|---|---|---|
ab121b9ce9cc95b2eed96bc135591fbe3075c2fc03c925b2fdaed653460b9381 | @type.setter
def type(self, type):
'Sets the type of this ShowRecordSetByZoneResp.\n\n 记录类型。 取值范围:A、AAAA、MX、CNAME、TXT、NS、SRV、CAA。\n\n :param type: The type of this ShowRecordSetByZoneResp.\n :type: str\n '
self._type = type | Sets the type of this ShowRecordSetByZoneResp.
记录类型。 取值范围:A、AAAA、MX、CNAME、TXT、NS、SRV、CAA。
:param type: The type of this ShowRecordSetByZoneResp.
:type: str | huaweicloud-sdk-dns/huaweicloudsdkdns/v2/model/show_record_set_by_zone_resp.py | type | githubmilesma/huaweicloud-sdk-python-v3 | 1 | python | @type.setter
def type(self, type):
'Sets the type of this ShowRecordSetByZoneResp.\n\n 记录类型。 取值范围:A、AAAA、MX、CNAME、TXT、NS、SRV、CAA。\n\n :param type: The type of this ShowRecordSetByZoneResp.\n :type: str\n '
self._type = type | @type.setter
def type(self, type):
'Sets the type of this ShowRecordSetByZoneResp.\n\n 记录类型。 取值范围:A、AAAA、MX、CNAME、TXT、NS、SRV、CAA。\n\n :param type: The type of this ShowRecordSetByZoneResp.\n :type: str\n '
self._type = type<|docstring|>Sets the type of this ShowRecordSetByZoneResp.
记录类型。 取值范围:A、AAAA、MX、CNAME、TXT、NS、SRV、CAA。
:param type: The type of this ShowRecordSetByZoneResp.
:type: str<|endoftext|> |
96daa99768ad8878a746661209dd7050d7e1659b8e395f769c1bdfba61745122 | @property
def ttl(self):
'Gets the ttl of this ShowRecordSetByZoneResp.\n\n 解析记录在本地DNS服务器的缓存时间,缓存时间越长更新生效越慢,以秒为单位。\n\n :return: The ttl of this ShowRecordSetByZoneResp.\n :rtype: int\n '
return self._ttl | Gets the ttl of this ShowRecordSetByZoneResp.
解析记录在本地DNS服务器的缓存时间,缓存时间越长更新生效越慢,以秒为单位。
:return: The ttl of this ShowRecordSetByZoneResp.
:rtype: int | huaweicloud-sdk-dns/huaweicloudsdkdns/v2/model/show_record_set_by_zone_resp.py | ttl | githubmilesma/huaweicloud-sdk-python-v3 | 1 | python | @property
def ttl(self):
'Gets the ttl of this ShowRecordSetByZoneResp.\n\n 解析记录在本地DNS服务器的缓存时间,缓存时间越长更新生效越慢,以秒为单位。\n\n :return: The ttl of this ShowRecordSetByZoneResp.\n :rtype: int\n '
return self._ttl | @property
def ttl(self):
'Gets the ttl of this ShowRecordSetByZoneResp.\n\n 解析记录在本地DNS服务器的缓存时间,缓存时间越长更新生效越慢,以秒为单位。\n\n :return: The ttl of this ShowRecordSetByZoneResp.\n :rtype: int\n '
return self._ttl<|docstring|>Gets the ttl of this ShowRecordSetByZoneResp.
解析记录在本地DNS服务器的缓存时间,缓存时间越长更新生效越慢,以秒为单位。
:return: The ttl of this ShowRecordSetByZoneResp.
:rtype: int<|endoftext|> |
c5f239d2d246631cceba68f0b28629ff9e8a853c94e34ad59d5e7c063d4decb8 | @ttl.setter
def ttl(self, ttl):
'Sets the ttl of this ShowRecordSetByZoneResp.\n\n 解析记录在本地DNS服务器的缓存时间,缓存时间越长更新生效越慢,以秒为单位。\n\n :param ttl: The ttl of this ShowRecordSetByZoneResp.\n :type: int\n '
self._ttl = ttl | Sets the ttl of this ShowRecordSetByZoneResp.
解析记录在本地DNS服务器的缓存时间,缓存时间越长更新生效越慢,以秒为单位。
:param ttl: The ttl of this ShowRecordSetByZoneResp.
:type: int | huaweicloud-sdk-dns/huaweicloudsdkdns/v2/model/show_record_set_by_zone_resp.py | ttl | githubmilesma/huaweicloud-sdk-python-v3 | 1 | python | @ttl.setter
def ttl(self, ttl):
'Sets the ttl of this ShowRecordSetByZoneResp.\n\n 解析记录在本地DNS服务器的缓存时间,缓存时间越长更新生效越慢,以秒为单位。\n\n :param ttl: The ttl of this ShowRecordSetByZoneResp.\n :type: int\n '
self._ttl = ttl | @ttl.setter
def ttl(self, ttl):
'Sets the ttl of this ShowRecordSetByZoneResp.\n\n 解析记录在本地DNS服务器的缓存时间,缓存时间越长更新生效越慢,以秒为单位。\n\n :param ttl: The ttl of this ShowRecordSetByZoneResp.\n :type: int\n '
self._ttl = ttl<|docstring|>Sets the ttl of this ShowRecordSetByZoneResp.
解析记录在本地DNS服务器的缓存时间,缓存时间越长更新生效越慢,以秒为单位。
:param ttl: The ttl of this ShowRecordSetByZoneResp.
:type: int<|endoftext|> |
bf87725511ffb17a42115c672b51b1548be261b333a91e4ca249473b8a948cf8 | @property
def records(self):
'Gets the records of this ShowRecordSetByZoneResp.\n\n 域名解析后的值。\n\n :return: The records of this ShowRecordSetByZoneResp.\n :rtype: list[str]\n '
return self._records | Gets the records of this ShowRecordSetByZoneResp.
域名解析后的值。
:return: The records of this ShowRecordSetByZoneResp.
:rtype: list[str] | huaweicloud-sdk-dns/huaweicloudsdkdns/v2/model/show_record_set_by_zone_resp.py | records | githubmilesma/huaweicloud-sdk-python-v3 | 1 | python | @property
def records(self):
'Gets the records of this ShowRecordSetByZoneResp.\n\n 域名解析后的值。\n\n :return: The records of this ShowRecordSetByZoneResp.\n :rtype: list[str]\n '
return self._records | @property
def records(self):
'Gets the records of this ShowRecordSetByZoneResp.\n\n 域名解析后的值。\n\n :return: The records of this ShowRecordSetByZoneResp.\n :rtype: list[str]\n '
return self._records<|docstring|>Gets the records of this ShowRecordSetByZoneResp.
域名解析后的值。
:return: The records of this ShowRecordSetByZoneResp.
:rtype: list[str]<|endoftext|> |
eeb8744fe3334209f4e49c499921b9a19f7f7313044f992ca05c262f7ee04dd9 | @records.setter
def records(self, records):
'Sets the records of this ShowRecordSetByZoneResp.\n\n 域名解析后的值。\n\n :param records: The records of this ShowRecordSetByZoneResp.\n :type: list[str]\n '
self._records = records | Sets the records of this ShowRecordSetByZoneResp.
域名解析后的值。
:param records: The records of this ShowRecordSetByZoneResp.
:type: list[str] | huaweicloud-sdk-dns/huaweicloudsdkdns/v2/model/show_record_set_by_zone_resp.py | records | githubmilesma/huaweicloud-sdk-python-v3 | 1 | python | @records.setter
def records(self, records):
'Sets the records of this ShowRecordSetByZoneResp.\n\n 域名解析后的值。\n\n :param records: The records of this ShowRecordSetByZoneResp.\n :type: list[str]\n '
self._records = records | @records.setter
def records(self, records):
'Sets the records of this ShowRecordSetByZoneResp.\n\n 域名解析后的值。\n\n :param records: The records of this ShowRecordSetByZoneResp.\n :type: list[str]\n '
self._records = records<|docstring|>Sets the records of this ShowRecordSetByZoneResp.
域名解析后的值。
:param records: The records of this ShowRecordSetByZoneResp.
:type: list[str]<|endoftext|> |
b63870c00f2c62f0e00f7aad7b0a344002e79745c6926004b5e0ef2458b88703 | @property
def create_at(self):
'Gets the create_at of this ShowRecordSetByZoneResp.\n\n 创建时间。\n\n :return: The create_at of this ShowRecordSetByZoneResp.\n :rtype: str\n '
return self._create_at | Gets the create_at of this ShowRecordSetByZoneResp.
创建时间。
:return: The create_at of this ShowRecordSetByZoneResp.
:rtype: str | huaweicloud-sdk-dns/huaweicloudsdkdns/v2/model/show_record_set_by_zone_resp.py | create_at | githubmilesma/huaweicloud-sdk-python-v3 | 1 | python | @property
def create_at(self):
'Gets the create_at of this ShowRecordSetByZoneResp.\n\n 创建时间。\n\n :return: The create_at of this ShowRecordSetByZoneResp.\n :rtype: str\n '
return self._create_at | @property
def create_at(self):
'Gets the create_at of this ShowRecordSetByZoneResp.\n\n 创建时间。\n\n :return: The create_at of this ShowRecordSetByZoneResp.\n :rtype: str\n '
return self._create_at<|docstring|>Gets the create_at of this ShowRecordSetByZoneResp.
创建时间。
:return: The create_at of this ShowRecordSetByZoneResp.
:rtype: str<|endoftext|> |
849f7b7b3327bc856661725121901c736f2e7d91b284a2e727ca7f354dc6dd1d | @create_at.setter
def create_at(self, create_at):
'Sets the create_at of this ShowRecordSetByZoneResp.\n\n 创建时间。\n\n :param create_at: The create_at of this ShowRecordSetByZoneResp.\n :type: str\n '
self._create_at = create_at | Sets the create_at of this ShowRecordSetByZoneResp.
创建时间。
:param create_at: The create_at of this ShowRecordSetByZoneResp.
:type: str | huaweicloud-sdk-dns/huaweicloudsdkdns/v2/model/show_record_set_by_zone_resp.py | create_at | githubmilesma/huaweicloud-sdk-python-v3 | 1 | python | @create_at.setter
def create_at(self, create_at):
'Sets the create_at of this ShowRecordSetByZoneResp.\n\n 创建时间。\n\n :param create_at: The create_at of this ShowRecordSetByZoneResp.\n :type: str\n '
self._create_at = create_at | @create_at.setter
def create_at(self, create_at):
'Sets the create_at of this ShowRecordSetByZoneResp.\n\n 创建时间。\n\n :param create_at: The create_at of this ShowRecordSetByZoneResp.\n :type: str\n '
self._create_at = create_at<|docstring|>Sets the create_at of this ShowRecordSetByZoneResp.
创建时间。
:param create_at: The create_at of this ShowRecordSetByZoneResp.
:type: str<|endoftext|> |
04adca9a7cafb3e75205a28b2e05493ea64fe88f361852a678b5c2bb91a843b3 | @property
def update_at(self):
'Gets the update_at of this ShowRecordSetByZoneResp.\n\n 更新时间。\n\n :return: The update_at of this ShowRecordSetByZoneResp.\n :rtype: str\n '
return self._update_at | Gets the update_at of this ShowRecordSetByZoneResp.
更新时间。
:return: The update_at of this ShowRecordSetByZoneResp.
:rtype: str | huaweicloud-sdk-dns/huaweicloudsdkdns/v2/model/show_record_set_by_zone_resp.py | update_at | githubmilesma/huaweicloud-sdk-python-v3 | 1 | python | @property
def update_at(self):
'Gets the update_at of this ShowRecordSetByZoneResp.\n\n 更新时间。\n\n :return: The update_at of this ShowRecordSetByZoneResp.\n :rtype: str\n '
return self._update_at | @property
def update_at(self):
'Gets the update_at of this ShowRecordSetByZoneResp.\n\n 更新时间。\n\n :return: The update_at of this ShowRecordSetByZoneResp.\n :rtype: str\n '
return self._update_at<|docstring|>Gets the update_at of this ShowRecordSetByZoneResp.
更新时间。
:return: The update_at of this ShowRecordSetByZoneResp.
:rtype: str<|endoftext|> |
8ceecbef11d9ecbb61d455388219fa967281246d6e1a42e7774aea8aa59093a7 | @update_at.setter
def update_at(self, update_at):
'Sets the update_at of this ShowRecordSetByZoneResp.\n\n 更新时间。\n\n :param update_at: The update_at of this ShowRecordSetByZoneResp.\n :type: str\n '
self._update_at = update_at | Sets the update_at of this ShowRecordSetByZoneResp.
更新时间。
:param update_at: The update_at of this ShowRecordSetByZoneResp.
:type: str | huaweicloud-sdk-dns/huaweicloudsdkdns/v2/model/show_record_set_by_zone_resp.py | update_at | githubmilesma/huaweicloud-sdk-python-v3 | 1 | python | @update_at.setter
def update_at(self, update_at):
'Sets the update_at of this ShowRecordSetByZoneResp.\n\n 更新时间。\n\n :param update_at: The update_at of this ShowRecordSetByZoneResp.\n :type: str\n '
self._update_at = update_at | @update_at.setter
def update_at(self, update_at):
'Sets the update_at of this ShowRecordSetByZoneResp.\n\n 更新时间。\n\n :param update_at: The update_at of this ShowRecordSetByZoneResp.\n :type: str\n '
self._update_at = update_at<|docstring|>Sets the update_at of this ShowRecordSetByZoneResp.
更新时间。
:param update_at: The update_at of this ShowRecordSetByZoneResp.
:type: str<|endoftext|> |
64e8814806a9de66383768eb5c0718f1a63bdf8fbe8dc05b8aa2a875458997cb | @property
def status(self):
'Gets the status of this ShowRecordSetByZoneResp.\n\n 资源状态。\n\n :return: The status of this ShowRecordSetByZoneResp.\n :rtype: str\n '
return self._status | Gets the status of this ShowRecordSetByZoneResp.
资源状态。
:return: The status of this ShowRecordSetByZoneResp.
:rtype: str | huaweicloud-sdk-dns/huaweicloudsdkdns/v2/model/show_record_set_by_zone_resp.py | status | githubmilesma/huaweicloud-sdk-python-v3 | 1 | python | @property
def status(self):
'Gets the status of this ShowRecordSetByZoneResp.\n\n 资源状态。\n\n :return: The status of this ShowRecordSetByZoneResp.\n :rtype: str\n '
return self._status | @property
def status(self):
'Gets the status of this ShowRecordSetByZoneResp.\n\n 资源状态。\n\n :return: The status of this ShowRecordSetByZoneResp.\n :rtype: str\n '
return self._status<|docstring|>Gets the status of this ShowRecordSetByZoneResp.
资源状态。
:return: The status of this ShowRecordSetByZoneResp.
:rtype: str<|endoftext|> |
10db93dde55cd0dbf1ada7c6400d1e609f5cd2700250d71a6190bffa0b44481b | @status.setter
def status(self, status):
'Sets the status of this ShowRecordSetByZoneResp.\n\n 资源状态。\n\n :param status: The status of this ShowRecordSetByZoneResp.\n :type: str\n '
self._status = status | Sets the status of this ShowRecordSetByZoneResp.
资源状态。
:param status: The status of this ShowRecordSetByZoneResp.
:type: str | huaweicloud-sdk-dns/huaweicloudsdkdns/v2/model/show_record_set_by_zone_resp.py | status | githubmilesma/huaweicloud-sdk-python-v3 | 1 | python | @status.setter
def status(self, status):
'Sets the status of this ShowRecordSetByZoneResp.\n\n 资源状态。\n\n :param status: The status of this ShowRecordSetByZoneResp.\n :type: str\n '
self._status = status | @status.setter
def status(self, status):
'Sets the status of this ShowRecordSetByZoneResp.\n\n 资源状态。\n\n :param status: The status of this ShowRecordSetByZoneResp.\n :type: str\n '
self._status = status<|docstring|>Sets the status of this ShowRecordSetByZoneResp.
资源状态。
:param status: The status of this ShowRecordSetByZoneResp.
:type: str<|endoftext|> |
0214949af90cd33d488986a2449bad0a734e07fa98f0aebf09f4ed6ebda1f8bf | @property
def default(self):
'Gets the default of this ShowRecordSetByZoneResp.\n\n 标识是否由系统默认生成,系统默认生成的Record Set不能删除。\n\n :return: The default of this ShowRecordSetByZoneResp.\n :rtype: bool\n '
return self._default | Gets the default of this ShowRecordSetByZoneResp.
标识是否由系统默认生成,系统默认生成的Record Set不能删除。
:return: The default of this ShowRecordSetByZoneResp.
:rtype: bool | huaweicloud-sdk-dns/huaweicloudsdkdns/v2/model/show_record_set_by_zone_resp.py | default | githubmilesma/huaweicloud-sdk-python-v3 | 1 | python | @property
def default(self):
'Gets the default of this ShowRecordSetByZoneResp.\n\n 标识是否由系统默认生成,系统默认生成的Record Set不能删除。\n\n :return: The default of this ShowRecordSetByZoneResp.\n :rtype: bool\n '
return self._default | @property
def default(self):
'Gets the default of this ShowRecordSetByZoneResp.\n\n 标识是否由系统默认生成,系统默认生成的Record Set不能删除。\n\n :return: The default of this ShowRecordSetByZoneResp.\n :rtype: bool\n '
return self._default<|docstring|>Gets the default of this ShowRecordSetByZoneResp.
标识是否由系统默认生成,系统默认生成的Record Set不能删除。
:return: The default of this ShowRecordSetByZoneResp.
:rtype: bool<|endoftext|> |
41568ab58e4967d453ca7fcbbc3820b145913811f22eec7249fbd210e9a162c0 | @default.setter
def default(self, default):
'Sets the default of this ShowRecordSetByZoneResp.\n\n 标识是否由系统默认生成,系统默认生成的Record Set不能删除。\n\n :param default: The default of this ShowRecordSetByZoneResp.\n :type: bool\n '
self._default = default | Sets the default of this ShowRecordSetByZoneResp.
标识是否由系统默认生成,系统默认生成的Record Set不能删除。
:param default: The default of this ShowRecordSetByZoneResp.
:type: bool | huaweicloud-sdk-dns/huaweicloudsdkdns/v2/model/show_record_set_by_zone_resp.py | default | githubmilesma/huaweicloud-sdk-python-v3 | 1 | python | @default.setter
def default(self, default):
'Sets the default of this ShowRecordSetByZoneResp.\n\n 标识是否由系统默认生成,系统默认生成的Record Set不能删除。\n\n :param default: The default of this ShowRecordSetByZoneResp.\n :type: bool\n '
self._default = default | @default.setter
def default(self, default):
'Sets the default of this ShowRecordSetByZoneResp.\n\n 标识是否由系统默认生成,系统默认生成的Record Set不能删除。\n\n :param default: The default of this ShowRecordSetByZoneResp.\n :type: bool\n '
self._default = default<|docstring|>Sets the default of this ShowRecordSetByZoneResp.
标识是否由系统默认生成,系统默认生成的Record Set不能删除。
:param default: The default of this ShowRecordSetByZoneResp.
:type: bool<|endoftext|> |
fb438de8a5bdc7f482459493384a1fe14f095ad2beddcafe4ec579693a85d181 | @property
def project_id(self):
'Gets the project_id of this ShowRecordSetByZoneResp.\n\n 该Record Set所属的项目ID。\n\n :return: The project_id of this ShowRecordSetByZoneResp.\n :rtype: str\n '
return self._project_id | Gets the project_id of this ShowRecordSetByZoneResp.
该Record Set所属的项目ID。
:return: The project_id of this ShowRecordSetByZoneResp.
:rtype: str | huaweicloud-sdk-dns/huaweicloudsdkdns/v2/model/show_record_set_by_zone_resp.py | project_id | githubmilesma/huaweicloud-sdk-python-v3 | 1 | python | @property
def project_id(self):
'Gets the project_id of this ShowRecordSetByZoneResp.\n\n 该Record Set所属的项目ID。\n\n :return: The project_id of this ShowRecordSetByZoneResp.\n :rtype: str\n '
return self._project_id | @property
def project_id(self):
'Gets the project_id of this ShowRecordSetByZoneResp.\n\n 该Record Set所属的项目ID。\n\n :return: The project_id of this ShowRecordSetByZoneResp.\n :rtype: str\n '
return self._project_id<|docstring|>Gets the project_id of this ShowRecordSetByZoneResp.
该Record Set所属的项目ID。
:return: The project_id of this ShowRecordSetByZoneResp.
:rtype: str<|endoftext|> |
b38e7ac0dcf15c9de807895ae3cc8be3c5daa5f1ce03b4f52cc2f3ce7624925c | @project_id.setter
def project_id(self, project_id):
'Sets the project_id of this ShowRecordSetByZoneResp.\n\n 该Record Set所属的项目ID。\n\n :param project_id: The project_id of this ShowRecordSetByZoneResp.\n :type: str\n '
self._project_id = project_id | Sets the project_id of this ShowRecordSetByZoneResp.
该Record Set所属的项目ID。
:param project_id: The project_id of this ShowRecordSetByZoneResp.
:type: str | huaweicloud-sdk-dns/huaweicloudsdkdns/v2/model/show_record_set_by_zone_resp.py | project_id | githubmilesma/huaweicloud-sdk-python-v3 | 1 | python | @project_id.setter
def project_id(self, project_id):
'Sets the project_id of this ShowRecordSetByZoneResp.\n\n 该Record Set所属的项目ID。\n\n :param project_id: The project_id of this ShowRecordSetByZoneResp.\n :type: str\n '
self._project_id = project_id | @project_id.setter
def project_id(self, project_id):
'Sets the project_id of this ShowRecordSetByZoneResp.\n\n 该Record Set所属的项目ID。\n\n :param project_id: The project_id of this ShowRecordSetByZoneResp.\n :type: str\n '
self._project_id = project_id<|docstring|>Sets the project_id of this ShowRecordSetByZoneResp.
该Record Set所属的项目ID。
:param project_id: The project_id of this ShowRecordSetByZoneResp.
:type: str<|endoftext|> |
7dff96d9edfe7847ae8483353fd6ad3d91dedf13aa1dc0baed73c28f458604b8 | @property
def links(self):
'Gets the links of this ShowRecordSetByZoneResp.\n\n\n :return: The links of this ShowRecordSetByZoneResp.\n :rtype: PageLink\n '
return self._links | Gets the links of this ShowRecordSetByZoneResp.
:return: The links of this ShowRecordSetByZoneResp.
:rtype: PageLink | huaweicloud-sdk-dns/huaweicloudsdkdns/v2/model/show_record_set_by_zone_resp.py | links | githubmilesma/huaweicloud-sdk-python-v3 | 1 | python | @property
def links(self):
'Gets the links of this ShowRecordSetByZoneResp.\n\n\n :return: The links of this ShowRecordSetByZoneResp.\n :rtype: PageLink\n '
return self._links | @property
def links(self):
'Gets the links of this ShowRecordSetByZoneResp.\n\n\n :return: The links of this ShowRecordSetByZoneResp.\n :rtype: PageLink\n '
return self._links<|docstring|>Gets the links of this ShowRecordSetByZoneResp.
:return: The links of this ShowRecordSetByZoneResp.
:rtype: PageLink<|endoftext|> |
702ff5a42cdc977ad96171a0522d8ccc78f384ee9f2d08e51e34630985de031a | @links.setter
def links(self, links):
'Sets the links of this ShowRecordSetByZoneResp.\n\n\n :param links: The links of this ShowRecordSetByZoneResp.\n :type: PageLink\n '
self._links = links | Sets the links of this ShowRecordSetByZoneResp.
:param links: The links of this ShowRecordSetByZoneResp.
:type: PageLink | huaweicloud-sdk-dns/huaweicloudsdkdns/v2/model/show_record_set_by_zone_resp.py | links | githubmilesma/huaweicloud-sdk-python-v3 | 1 | python | @links.setter
def links(self, links):
'Sets the links of this ShowRecordSetByZoneResp.\n\n\n :param links: The links of this ShowRecordSetByZoneResp.\n :type: PageLink\n '
self._links = links | @links.setter
def links(self, links):
'Sets the links of this ShowRecordSetByZoneResp.\n\n\n :param links: The links of this ShowRecordSetByZoneResp.\n :type: PageLink\n '
self._links = links<|docstring|>Sets the links of this ShowRecordSetByZoneResp.
:param links: The links of this ShowRecordSetByZoneResp.
:type: PageLink<|endoftext|> |
cd7f893d4d1206271e1c4608b4be10f2da592dc093261db0d3e352babd38bedc | @property
def line(self):
'Gets the line of this ShowRecordSetByZoneResp.\n\n 解析线路ID。\n\n :return: The line of this ShowRecordSetByZoneResp.\n :rtype: str\n '
return self._line | Gets the line of this ShowRecordSetByZoneResp.
解析线路ID。
:return: The line of this ShowRecordSetByZoneResp.
:rtype: str | huaweicloud-sdk-dns/huaweicloudsdkdns/v2/model/show_record_set_by_zone_resp.py | line | githubmilesma/huaweicloud-sdk-python-v3 | 1 | python | @property
def line(self):
'Gets the line of this ShowRecordSetByZoneResp.\n\n 解析线路ID。\n\n :return: The line of this ShowRecordSetByZoneResp.\n :rtype: str\n '
return self._line | @property
def line(self):
'Gets the line of this ShowRecordSetByZoneResp.\n\n 解析线路ID。\n\n :return: The line of this ShowRecordSetByZoneResp.\n :rtype: str\n '
return self._line<|docstring|>Gets the line of this ShowRecordSetByZoneResp.
解析线路ID。
:return: The line of this ShowRecordSetByZoneResp.
:rtype: str<|endoftext|> |
4918b2163de1447299f38b25a71d39fdb1677d7e62f8fe8e6f8d3352f1618399 | @line.setter
def line(self, line):
'Sets the line of this ShowRecordSetByZoneResp.\n\n 解析线路ID。\n\n :param line: The line of this ShowRecordSetByZoneResp.\n :type: str\n '
self._line = line | Sets the line of this ShowRecordSetByZoneResp.
解析线路ID。
:param line: The line of this ShowRecordSetByZoneResp.
:type: str | huaweicloud-sdk-dns/huaweicloudsdkdns/v2/model/show_record_set_by_zone_resp.py | line | githubmilesma/huaweicloud-sdk-python-v3 | 1 | python | @line.setter
def line(self, line):
'Sets the line of this ShowRecordSetByZoneResp.\n\n 解析线路ID。\n\n :param line: The line of this ShowRecordSetByZoneResp.\n :type: str\n '
self._line = line | @line.setter
def line(self, line):
'Sets the line of this ShowRecordSetByZoneResp.\n\n 解析线路ID。\n\n :param line: The line of this ShowRecordSetByZoneResp.\n :type: str\n '
self._line = line<|docstring|>Sets the line of this ShowRecordSetByZoneResp.
解析线路ID。
:param line: The line of this ShowRecordSetByZoneResp.
:type: str<|endoftext|> |
acb333f6121889d7181ffea0f288986abef788beea46613a8c02b95f4c666db5 | @property
def weight(self):
'Gets the weight of this ShowRecordSetByZoneResp.\n\n 解析记录的权重。\n\n :return: The weight of this ShowRecordSetByZoneResp.\n :rtype: int\n '
return self._weight | Gets the weight of this ShowRecordSetByZoneResp.
解析记录的权重。
:return: The weight of this ShowRecordSetByZoneResp.
:rtype: int | huaweicloud-sdk-dns/huaweicloudsdkdns/v2/model/show_record_set_by_zone_resp.py | weight | githubmilesma/huaweicloud-sdk-python-v3 | 1 | python | @property
def weight(self):
'Gets the weight of this ShowRecordSetByZoneResp.\n\n 解析记录的权重。\n\n :return: The weight of this ShowRecordSetByZoneResp.\n :rtype: int\n '
return self._weight | @property
def weight(self):
'Gets the weight of this ShowRecordSetByZoneResp.\n\n 解析记录的权重。\n\n :return: The weight of this ShowRecordSetByZoneResp.\n :rtype: int\n '
return self._weight<|docstring|>Gets the weight of this ShowRecordSetByZoneResp.
解析记录的权重。
:return: The weight of this ShowRecordSetByZoneResp.
:rtype: int<|endoftext|> |
980c695e33250e92dcfab838d2f1456d54f16b3b9e47003221a527c16985b7c9 | @weight.setter
def weight(self, weight):
'Sets the weight of this ShowRecordSetByZoneResp.\n\n 解析记录的权重。\n\n :param weight: The weight of this ShowRecordSetByZoneResp.\n :type: int\n '
self._weight = weight | Sets the weight of this ShowRecordSetByZoneResp.
解析记录的权重。
:param weight: The weight of this ShowRecordSetByZoneResp.
:type: int | huaweicloud-sdk-dns/huaweicloudsdkdns/v2/model/show_record_set_by_zone_resp.py | weight | githubmilesma/huaweicloud-sdk-python-v3 | 1 | python | @weight.setter
def weight(self, weight):
'Sets the weight of this ShowRecordSetByZoneResp.\n\n 解析记录的权重。\n\n :param weight: The weight of this ShowRecordSetByZoneResp.\n :type: int\n '
self._weight = weight | @weight.setter
def weight(self, weight):
'Sets the weight of this ShowRecordSetByZoneResp.\n\n 解析记录的权重。\n\n :param weight: The weight of this ShowRecordSetByZoneResp.\n :type: int\n '
self._weight = weight<|docstring|>Sets the weight of this ShowRecordSetByZoneResp.
解析记录的权重。
:param weight: The weight of this ShowRecordSetByZoneResp.
:type: int<|endoftext|> |
45298f80613665137863222309a60f5cc4722905cfb5261d36a272f9125ac972 | @property
def health_check_id(self):
'Gets the health_check_id of this ShowRecordSetByZoneResp.\n\n 健康检查ID。\n\n :return: The health_check_id of this ShowRecordSetByZoneResp.\n :rtype: str\n '
return self._health_check_id | Gets the health_check_id of this ShowRecordSetByZoneResp.
健康检查ID。
:return: The health_check_id of this ShowRecordSetByZoneResp.
:rtype: str | huaweicloud-sdk-dns/huaweicloudsdkdns/v2/model/show_record_set_by_zone_resp.py | health_check_id | githubmilesma/huaweicloud-sdk-python-v3 | 1 | python | @property
def health_check_id(self):
'Gets the health_check_id of this ShowRecordSetByZoneResp.\n\n 健康检查ID。\n\n :return: The health_check_id of this ShowRecordSetByZoneResp.\n :rtype: str\n '
return self._health_check_id | @property
def health_check_id(self):
'Gets the health_check_id of this ShowRecordSetByZoneResp.\n\n 健康检查ID。\n\n :return: The health_check_id of this ShowRecordSetByZoneResp.\n :rtype: str\n '
return self._health_check_id<|docstring|>Gets the health_check_id of this ShowRecordSetByZoneResp.
健康检查ID。
:return: The health_check_id of this ShowRecordSetByZoneResp.
:rtype: str<|endoftext|> |
1cae4ad7770a3c2d5d1edc4649204dc19a5712f23f7f7fd7db9139a884828542 | @health_check_id.setter
def health_check_id(self, health_check_id):
'Sets the health_check_id of this ShowRecordSetByZoneResp.\n\n 健康检查ID。\n\n :param health_check_id: The health_check_id of this ShowRecordSetByZoneResp.\n :type: str\n '
self._health_check_id = health_check_id | Sets the health_check_id of this ShowRecordSetByZoneResp.
健康检查ID。
:param health_check_id: The health_check_id of this ShowRecordSetByZoneResp.
:type: str | huaweicloud-sdk-dns/huaweicloudsdkdns/v2/model/show_record_set_by_zone_resp.py | health_check_id | githubmilesma/huaweicloud-sdk-python-v3 | 1 | python | @health_check_id.setter
def health_check_id(self, health_check_id):
'Sets the health_check_id of this ShowRecordSetByZoneResp.\n\n 健康检查ID。\n\n :param health_check_id: The health_check_id of this ShowRecordSetByZoneResp.\n :type: str\n '
self._health_check_id = health_check_id | @health_check_id.setter
def health_check_id(self, health_check_id):
'Sets the health_check_id of this ShowRecordSetByZoneResp.\n\n 健康检查ID。\n\n :param health_check_id: The health_check_id of this ShowRecordSetByZoneResp.\n :type: str\n '
self._health_check_id = health_check_id<|docstring|>Sets the health_check_id of this ShowRecordSetByZoneResp.
健康检查ID。
:param health_check_id: The health_check_id of this ShowRecordSetByZoneResp.
:type: str<|endoftext|> |
022039109c70193e1b5f8c2663d0408218b6420e526615e2eecfed5045b34e8b | @property
def alias_target(self):
'Gets the alias_target of this ShowRecordSetByZoneResp.\n\n\n :return: The alias_target of this ShowRecordSetByZoneResp.\n :rtype: AliasTarget\n '
return self._alias_target | Gets the alias_target of this ShowRecordSetByZoneResp.
:return: The alias_target of this ShowRecordSetByZoneResp.
:rtype: AliasTarget | huaweicloud-sdk-dns/huaweicloudsdkdns/v2/model/show_record_set_by_zone_resp.py | alias_target | githubmilesma/huaweicloud-sdk-python-v3 | 1 | python | @property
def alias_target(self):
'Gets the alias_target of this ShowRecordSetByZoneResp.\n\n\n :return: The alias_target of this ShowRecordSetByZoneResp.\n :rtype: AliasTarget\n '
return self._alias_target | @property
def alias_target(self):
'Gets the alias_target of this ShowRecordSetByZoneResp.\n\n\n :return: The alias_target of this ShowRecordSetByZoneResp.\n :rtype: AliasTarget\n '
return self._alias_target<|docstring|>Gets the alias_target of this ShowRecordSetByZoneResp.
:return: The alias_target of this ShowRecordSetByZoneResp.
:rtype: AliasTarget<|endoftext|> |
51aea2fb58d362bf5f7594e6102604f0c4fd7db0d8cd6ba1d219d872e811aae2 | @alias_target.setter
def alias_target(self, alias_target):
'Sets the alias_target of this ShowRecordSetByZoneResp.\n\n\n :param alias_target: The alias_target of this ShowRecordSetByZoneResp.\n :type: AliasTarget\n '
self._alias_target = alias_target | Sets the alias_target of this ShowRecordSetByZoneResp.
:param alias_target: The alias_target of this ShowRecordSetByZoneResp.
:type: AliasTarget | huaweicloud-sdk-dns/huaweicloudsdkdns/v2/model/show_record_set_by_zone_resp.py | alias_target | githubmilesma/huaweicloud-sdk-python-v3 | 1 | python | @alias_target.setter
def alias_target(self, alias_target):
'Sets the alias_target of this ShowRecordSetByZoneResp.\n\n\n :param alias_target: The alias_target of this ShowRecordSetByZoneResp.\n :type: AliasTarget\n '
self._alias_target = alias_target | @alias_target.setter
def alias_target(self, alias_target):
'Sets the alias_target of this ShowRecordSetByZoneResp.\n\n\n :param alias_target: The alias_target of this ShowRecordSetByZoneResp.\n :type: AliasTarget\n '
self._alias_target = alias_target<|docstring|>Sets the alias_target of this ShowRecordSetByZoneResp.
:param alias_target: The alias_target of this ShowRecordSetByZoneResp.
:type: AliasTarget<|endoftext|> |
23795442a46e2cd10dec98fded44ed9172a29971e98983a30ad89baa6c9c0a03 | def to_dict(self):
'Returns the model properties as a dict'
result = {}
for (attr, _) in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
elif (attr in self.sensitive_list):
result[attr] = '****'
else:
result[attr] = value
return result | Returns the model properties as a dict | huaweicloud-sdk-dns/huaweicloudsdkdns/v2/model/show_record_set_by_zone_resp.py | to_dict | githubmilesma/huaweicloud-sdk-python-v3 | 1 | python | def to_dict(self):
result = {}
for (attr, _) in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
elif (attr in self.sensitive_list):
result[attr] = '****'
else:
result[attr] = value
return result | def to_dict(self):
result = {}
for (attr, _) in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
elif (attr in self.sensitive_list):
result[attr] = '****'
else:
result[attr] = value
return result<|docstring|>Returns the model properties as a dict<|endoftext|> |
cbb19eaa2fc8a113d9e32f924ef280a7e97563f8915f94f65dab438997af2e99 | def to_str(self):
'Returns the string representation of the model'
return pprint.pformat(self.to_dict()) | Returns the string representation of the model | huaweicloud-sdk-dns/huaweicloudsdkdns/v2/model/show_record_set_by_zone_resp.py | to_str | githubmilesma/huaweicloud-sdk-python-v3 | 1 | python | def to_str(self):
return pprint.pformat(self.to_dict()) | def to_str(self):
return pprint.pformat(self.to_dict())<|docstring|>Returns the string representation of the model<|endoftext|> |
772243a2c2b3261a9b954d07aaf295e3c1242a579a495e2d6a5679c677861703 | def __repr__(self):
'For `print` and `pprint`'
return self.to_str() | For `print` and `pprint` | huaweicloud-sdk-dns/huaweicloudsdkdns/v2/model/show_record_set_by_zone_resp.py | __repr__ | githubmilesma/huaweicloud-sdk-python-v3 | 1 | python | def __repr__(self):
return self.to_str() | def __repr__(self):
return self.to_str()<|docstring|>For `print` and `pprint`<|endoftext|> |
cb81c63641e2f5cfe1829d140fd05dc59c28d93191093580679b2e4243f74c65 | def __eq__(self, other):
'Returns true if both objects are equal'
if (not isinstance(other, ShowRecordSetByZoneResp)):
return False
return (self.__dict__ == other.__dict__) | Returns true if both objects are equal | huaweicloud-sdk-dns/huaweicloudsdkdns/v2/model/show_record_set_by_zone_resp.py | __eq__ | githubmilesma/huaweicloud-sdk-python-v3 | 1 | python | def __eq__(self, other):
if (not isinstance(other, ShowRecordSetByZoneResp)):
return False
return (self.__dict__ == other.__dict__) | def __eq__(self, other):
if (not isinstance(other, ShowRecordSetByZoneResp)):
return False
return (self.__dict__ == other.__dict__)<|docstring|>Returns true if both objects are equal<|endoftext|> |
43dc6740163eb9fc1161d09cb2208a64c7ad0cc8d9c8637ac3264522d3ec7e42 | def __ne__(self, other):
'Returns true if both objects are not equal'
return (not (self == other)) | Returns true if both objects are not equal | huaweicloud-sdk-dns/huaweicloudsdkdns/v2/model/show_record_set_by_zone_resp.py | __ne__ | githubmilesma/huaweicloud-sdk-python-v3 | 1 | python | def __ne__(self, other):
return (not (self == other)) | def __ne__(self, other):
return (not (self == other))<|docstring|>Returns true if both objects are not equal<|endoftext|> |
084a102073e36b9ca792292b8bee648db2c3f34e207dd9fd17b51c8b7e885c43 | async def async_setup_entry(hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities: AddEntitiesCallback) -> None:
'Set up the UPnP/IGD sensors.'
coordinator = hass.data[DOMAIN][config_entry.entry_id]
entities: list[UpnpSensor] = [RawUpnpSensor(coordinator=coordinator, entity_description=entity_description) for entity_description in RAW_SENSORS if (coordinator.data.get(entity_description.key) is not None)]
entities.extend([DerivedUpnpSensor(coordinator=coordinator, entity_description=entity_description) for entity_description in DERIVED_SENSORS if (coordinator.data.get(entity_description.key) is not None)])
LOGGER.debug('Adding sensor entities: %s', entities)
async_add_entities(entities) | Set up the UPnP/IGD sensors. | homeassistant/components/upnp/sensor.py | async_setup_entry | a-p-z/core | 30,023 | python | async def async_setup_entry(hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities: AddEntitiesCallback) -> None:
coordinator = hass.data[DOMAIN][config_entry.entry_id]
entities: list[UpnpSensor] = [RawUpnpSensor(coordinator=coordinator, entity_description=entity_description) for entity_description in RAW_SENSORS if (coordinator.data.get(entity_description.key) is not None)]
entities.extend([DerivedUpnpSensor(coordinator=coordinator, entity_description=entity_description) for entity_description in DERIVED_SENSORS if (coordinator.data.get(entity_description.key) is not None)])
LOGGER.debug('Adding sensor entities: %s', entities)
async_add_entities(entities) | async def async_setup_entry(hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities: AddEntitiesCallback) -> None:
coordinator = hass.data[DOMAIN][config_entry.entry_id]
entities: list[UpnpSensor] = [RawUpnpSensor(coordinator=coordinator, entity_description=entity_description) for entity_description in RAW_SENSORS if (coordinator.data.get(entity_description.key) is not None)]
entities.extend([DerivedUpnpSensor(coordinator=coordinator, entity_description=entity_description) for entity_description in DERIVED_SENSORS if (coordinator.data.get(entity_description.key) is not None)])
LOGGER.debug('Adding sensor entities: %s', entities)
async_add_entities(entities)<|docstring|>Set up the UPnP/IGD sensors.<|endoftext|> |
242fe10050a683bd371adae95123c195f0d1bc171242fd19c03b99c1e170d5f3 | @property
def native_value(self) -> (str | None):
'Return the state of the device.'
value = self.coordinator.data[self.entity_description.key]
if (value is None):
return None
return format(value, self.entity_description.format) | Return the state of the device. | homeassistant/components/upnp/sensor.py | native_value | a-p-z/core | 30,023 | python | @property
def native_value(self) -> (str | None):
value = self.coordinator.data[self.entity_description.key]
if (value is None):
return None
return format(value, self.entity_description.format) | @property
def native_value(self) -> (str | None):
value = self.coordinator.data[self.entity_description.key]
if (value is None):
return None
return format(value, self.entity_description.format)<|docstring|>Return the state of the device.<|endoftext|> |
44efc8dbf8ba0ad8e9155b5bd986bea71c90ba68ae78f1bf80a5389753a5ab5b | def __init__(self, coordinator: UpnpDataUpdateCoordinator, entity_description: UpnpSensorEntityDescription) -> None:
'Initialize sensor.'
super().__init__(coordinator=coordinator, entity_description=entity_description)
self._last_value = None
self._last_timestamp = None | Initialize sensor. | homeassistant/components/upnp/sensor.py | __init__ | a-p-z/core | 30,023 | python | def __init__(self, coordinator: UpnpDataUpdateCoordinator, entity_description: UpnpSensorEntityDescription) -> None:
super().__init__(coordinator=coordinator, entity_description=entity_description)
self._last_value = None
self._last_timestamp = None | def __init__(self, coordinator: UpnpDataUpdateCoordinator, entity_description: UpnpSensorEntityDescription) -> None:
super().__init__(coordinator=coordinator, entity_description=entity_description)
self._last_value = None
self._last_timestamp = None<|docstring|>Initialize sensor.<|endoftext|> |
066a748655b6a8731a5c9d95c7b70324ad3b885ebc4484bb0001336656a139bb | def _has_overflowed(self, current_value) -> bool:
'Check if value has overflowed.'
return (current_value < self._last_value) | Check if value has overflowed. | homeassistant/components/upnp/sensor.py | _has_overflowed | a-p-z/core | 30,023 | python | def _has_overflowed(self, current_value) -> bool:
return (current_value < self._last_value) | def _has_overflowed(self, current_value) -> bool:
return (current_value < self._last_value)<|docstring|>Check if value has overflowed.<|endoftext|> |
ffdfdc4ecf8d15a6b2544f436b97d358c5fd84d729634a6c6fa037901dd643e4 | @property
def native_value(self) -> (str | None):
'Return the state of the device.'
current_value = self.coordinator.data[self.entity_description.key]
if (current_value is None):
return None
current_timestamp = self.coordinator.data[TIMESTAMP]
if ((self._last_value is None) or self._has_overflowed(current_value)):
self._last_value = current_value
self._last_timestamp = current_timestamp
return None
delta_value = (current_value - self._last_value)
if (self.entity_description.native_unit_of_measurement == DATA_RATE_KIBIBYTES_PER_SECOND):
delta_value /= KIBIBYTE
delta_time = (current_timestamp - self._last_timestamp)
if (delta_time.total_seconds() == 0):
return None
derived = (delta_value / delta_time.total_seconds())
self._last_value = current_value
self._last_timestamp = current_timestamp
return format(derived, self.entity_description.format) | Return the state of the device. | homeassistant/components/upnp/sensor.py | native_value | a-p-z/core | 30,023 | python | @property
def native_value(self) -> (str | None):
current_value = self.coordinator.data[self.entity_description.key]
if (current_value is None):
return None
current_timestamp = self.coordinator.data[TIMESTAMP]
if ((self._last_value is None) or self._has_overflowed(current_value)):
self._last_value = current_value
self._last_timestamp = current_timestamp
return None
delta_value = (current_value - self._last_value)
if (self.entity_description.native_unit_of_measurement == DATA_RATE_KIBIBYTES_PER_SECOND):
delta_value /= KIBIBYTE
delta_time = (current_timestamp - self._last_timestamp)
if (delta_time.total_seconds() == 0):
return None
derived = (delta_value / delta_time.total_seconds())
self._last_value = current_value
self._last_timestamp = current_timestamp
return format(derived, self.entity_description.format) | @property
def native_value(self) -> (str | None):
current_value = self.coordinator.data[self.entity_description.key]
if (current_value is None):
return None
current_timestamp = self.coordinator.data[TIMESTAMP]
if ((self._last_value is None) or self._has_overflowed(current_value)):
self._last_value = current_value
self._last_timestamp = current_timestamp
return None
delta_value = (current_value - self._last_value)
if (self.entity_description.native_unit_of_measurement == DATA_RATE_KIBIBYTES_PER_SECOND):
delta_value /= KIBIBYTE
delta_time = (current_timestamp - self._last_timestamp)
if (delta_time.total_seconds() == 0):
return None
derived = (delta_value / delta_time.total_seconds())
self._last_value = current_value
self._last_timestamp = current_timestamp
return format(derived, self.entity_description.format)<|docstring|>Return the state of the device.<|endoftext|> |
e4aefebb20512d66c9c797efe1f66fb74d19cfae592cccc9f1f9bfafa155edb0 | def GeneralisationTest(noOfTestData=500, noOfPrototypes=10, decay=0.0, doPredictions=1, doMatLabResults=False):
'Function to create a disjoint from the training set test set'
categories = True
X = np.load('allInputDataCurrent.npy')
T = np.load('allOutputDataCurrent.npy')
from keras.models import load_model
model = load_model('Random_model.h5')
noOfTrainData = len(X)
assert (len(X) == len(T))
lenOfInput = len(X[3])
lenOfOutput = len(T[3])
lenOfBlock = int((lenOfInput / noOfPrototypes))
noOfExamples = (noOfTrainData // noOfPrototypes)
noOfNewExamples = (noOfTestData // noOfPrototypes)
lenOfR = (lenOfInput - lenOfBlock)
weightOfX = int(sum(X[0]))
weightOfR = (weightOfX - lenOfBlock)
inverseWeightOfR = (lenOfR - weightOfR)
denom = (lenOfInput - (lenOfInput / noOfPrototypes))
assert (int(denom) == lenOfR)
fractionalWeightOfR = (weightOfR / denom)
fractionalInverseWeightOfR = (inverseWeightOfR / denom)
weight = [fractionalWeightOfR, fractionalInverseWeightOfR]
weightOfT = int(sum(T[3]))
if (lenOfOutput == noOfPrototypes):
use1HOT = 1
else:
use1HOT = 0
if (categories == True):
noOfOutputs = noOfPrototypes
if (use1HOT == 1):
sizeOfOutput = noOfPrototypes
print('Overwriting output vector size to length {}'.format(noOfPrototypes))
else:
noOfOutputs = noOfTrainData
print('Random vector, R, has weight {0}'.format(weightOfR))
if doMatLabResults:
Test_X = code.make_prototyped_random_codes(M=500, n=lenOfInput, p=noOfPrototypes, weight=[fractionalWeightOfR], k=2, symbolList=None, verbose=verbose, decay_templates=decay)
sio.savemat('Test_X5000.mat', {'Test_X': Test_X})
R = code.make_random_codes(M=500, n=501, weight=weight, k=2, symbolList=[1, 0], verbose=True)
sio.savemat('R3.mat', {'R': R})
(Test_X, All_X) = code.get_test_x(X=X, noOfTestData=noOfTestData, lenOfInput=lenOfInput, noOfPrototypes=noOfPrototypes, weight=[fractionalWeightOfR, fractionalInverseWeightOfR], k=2, symbolList=None, verbose=verbose, decay_templates=decay)
(Test_T, prototypeOutputCodes) = code.get_test_t(T, noOfPrototypes=noOfPrototypes, noOfTestData=noOfTestData, lenOfOutput=len(T[0]), verbose=False)
if (doPredictions == 1):
d.prediction_tester(model, X, T, name='Training data')
if (noOfTestData != 0):
d.prediction_tester(model, Test_X, Test_T, name='Test data', example_no=0)
np.save('GeneralisantionInputDataTest.npy', Test_X)
np.save('GeneralisationOutputDataTest.npy', Test_T)
return (Test_X, Test_T) | Function to create a disjoint from the training set test set | code/walker_with_generalisation.py | GeneralisationTest | ellagale/constructionist_binary_NN_tester | 0 | python | def GeneralisationTest(noOfTestData=500, noOfPrototypes=10, decay=0.0, doPredictions=1, doMatLabResults=False):
categories = True
X = np.load('allInputDataCurrent.npy')
T = np.load('allOutputDataCurrent.npy')
from keras.models import load_model
model = load_model('Random_model.h5')
noOfTrainData = len(X)
assert (len(X) == len(T))
lenOfInput = len(X[3])
lenOfOutput = len(T[3])
lenOfBlock = int((lenOfInput / noOfPrototypes))
noOfExamples = (noOfTrainData // noOfPrototypes)
noOfNewExamples = (noOfTestData // noOfPrototypes)
lenOfR = (lenOfInput - lenOfBlock)
weightOfX = int(sum(X[0]))
weightOfR = (weightOfX - lenOfBlock)
inverseWeightOfR = (lenOfR - weightOfR)
denom = (lenOfInput - (lenOfInput / noOfPrototypes))
assert (int(denom) == lenOfR)
fractionalWeightOfR = (weightOfR / denom)
fractionalInverseWeightOfR = (inverseWeightOfR / denom)
weight = [fractionalWeightOfR, fractionalInverseWeightOfR]
weightOfT = int(sum(T[3]))
if (lenOfOutput == noOfPrototypes):
use1HOT = 1
else:
use1HOT = 0
if (categories == True):
noOfOutputs = noOfPrototypes
if (use1HOT == 1):
sizeOfOutput = noOfPrototypes
print('Overwriting output vector size to length {}'.format(noOfPrototypes))
else:
noOfOutputs = noOfTrainData
print('Random vector, R, has weight {0}'.format(weightOfR))
if doMatLabResults:
Test_X = code.make_prototyped_random_codes(M=500, n=lenOfInput, p=noOfPrototypes, weight=[fractionalWeightOfR], k=2, symbolList=None, verbose=verbose, decay_templates=decay)
sio.savemat('Test_X5000.mat', {'Test_X': Test_X})
R = code.make_random_codes(M=500, n=501, weight=weight, k=2, symbolList=[1, 0], verbose=True)
sio.savemat('R3.mat', {'R': R})
(Test_X, All_X) = code.get_test_x(X=X, noOfTestData=noOfTestData, lenOfInput=lenOfInput, noOfPrototypes=noOfPrototypes, weight=[fractionalWeightOfR, fractionalInverseWeightOfR], k=2, symbolList=None, verbose=verbose, decay_templates=decay)
(Test_T, prototypeOutputCodes) = code.get_test_t(T, noOfPrototypes=noOfPrototypes, noOfTestData=noOfTestData, lenOfOutput=len(T[0]), verbose=False)
if (doPredictions == 1):
d.prediction_tester(model, X, T, name='Training data')
if (noOfTestData != 0):
d.prediction_tester(model, Test_X, Test_T, name='Test data', example_no=0)
np.save('GeneralisantionInputDataTest.npy', Test_X)
np.save('GeneralisationOutputDataTest.npy', Test_T)
return (Test_X, Test_T) | def GeneralisationTest(noOfTestData=500, noOfPrototypes=10, decay=0.0, doPredictions=1, doMatLabResults=False):
categories = True
X = np.load('allInputDataCurrent.npy')
T = np.load('allOutputDataCurrent.npy')
from keras.models import load_model
model = load_model('Random_model.h5')
noOfTrainData = len(X)
assert (len(X) == len(T))
lenOfInput = len(X[3])
lenOfOutput = len(T[3])
lenOfBlock = int((lenOfInput / noOfPrototypes))
noOfExamples = (noOfTrainData // noOfPrototypes)
noOfNewExamples = (noOfTestData // noOfPrototypes)
lenOfR = (lenOfInput - lenOfBlock)
weightOfX = int(sum(X[0]))
weightOfR = (weightOfX - lenOfBlock)
inverseWeightOfR = (lenOfR - weightOfR)
denom = (lenOfInput - (lenOfInput / noOfPrototypes))
assert (int(denom) == lenOfR)
fractionalWeightOfR = (weightOfR / denom)
fractionalInverseWeightOfR = (inverseWeightOfR / denom)
weight = [fractionalWeightOfR, fractionalInverseWeightOfR]
weightOfT = int(sum(T[3]))
if (lenOfOutput == noOfPrototypes):
use1HOT = 1
else:
use1HOT = 0
if (categories == True):
noOfOutputs = noOfPrototypes
if (use1HOT == 1):
sizeOfOutput = noOfPrototypes
print('Overwriting output vector size to length {}'.format(noOfPrototypes))
else:
noOfOutputs = noOfTrainData
print('Random vector, R, has weight {0}'.format(weightOfR))
if doMatLabResults:
Test_X = code.make_prototyped_random_codes(M=500, n=lenOfInput, p=noOfPrototypes, weight=[fractionalWeightOfR], k=2, symbolList=None, verbose=verbose, decay_templates=decay)
sio.savemat('Test_X5000.mat', {'Test_X': Test_X})
R = code.make_random_codes(M=500, n=501, weight=weight, k=2, symbolList=[1, 0], verbose=True)
sio.savemat('R3.mat', {'R': R})
(Test_X, All_X) = code.get_test_x(X=X, noOfTestData=noOfTestData, lenOfInput=lenOfInput, noOfPrototypes=noOfPrototypes, weight=[fractionalWeightOfR, fractionalInverseWeightOfR], k=2, symbolList=None, verbose=verbose, decay_templates=decay)
(Test_T, prototypeOutputCodes) = code.get_test_t(T, noOfPrototypes=noOfPrototypes, noOfTestData=noOfTestData, lenOfOutput=len(T[0]), verbose=False)
if (doPredictions == 1):
d.prediction_tester(model, X, T, name='Training data')
if (noOfTestData != 0):
d.prediction_tester(model, Test_X, Test_T, name='Test data', example_no=0)
np.save('GeneralisantionInputDataTest.npy', Test_X)
np.save('GeneralisationOutputDataTest.npy', Test_T)
return (Test_X, Test_T)<|docstring|>Function to create a disjoint from the training set test set<|endoftext|> |
42953f5b6770563eb06ca5589dfa3814a08e773963adb58c1da016d7d2a52758 | def update_datasource(self):
' Create a bokeh column data source for the\n selected dataset and period\n '
if (self.measurements.size > 0):
self.cds.data = self.measurements.to_dict(orient='list')
else:
self.cds.data = self.empty | Create a bokeh column data source for the
selected dataset and period | app/monitor/base.py | update_datasource | SimonKrughoff/squash-bokeh | 2 | python | def update_datasource(self):
' Create a bokeh column data source for the\n selected dataset and period\n '
if (self.measurements.size > 0):
self.cds.data = self.measurements.to_dict(orient='list')
else:
self.cds.data = self.empty | def update_datasource(self):
' Create a bokeh column data source for the\n selected dataset and period\n '
if (self.measurements.size > 0):
self.cds.data = self.measurements.to_dict(orient='list')
else:
self.cds.data = self.empty<|docstring|>Create a bokeh column data source for the
selected dataset and period<|endoftext|> |
2e5852089d196576fed51c43bec1e1dc58d0b1dca2d08a352a8c43a8804cffb1 | @abstractmethod
def fit(self, X: pd.DataFrame, y: Optional[pd.Series]=None, sample_weight: Optional[Union[(pd.Series, np.ndarray)]]=None) -> None:
'Calculate cut points for given discretisation approach.\n\n The cut_points attribute should be set by this method.\n '
pass | Calculate cut points for given discretisation approach.
The cut_points attribute should be set by this method. | src/sumnplot/discretisation.py | fit | richardangell/analysis-development | 1 | python | @abstractmethod
def fit(self, X: pd.DataFrame, y: Optional[pd.Series]=None, sample_weight: Optional[Union[(pd.Series, np.ndarray)]]=None) -> None:
'Calculate cut points for given discretisation approach.\n\n The cut_points attribute should be set by this method.\n '
pass | @abstractmethod
def fit(self, X: pd.DataFrame, y: Optional[pd.Series]=None, sample_weight: Optional[Union[(pd.Series, np.ndarray)]]=None) -> None:
'Calculate cut points for given discretisation approach.\n\n The cut_points attribute should be set by this method.\n '
pass<|docstring|>Calculate cut points for given discretisation approach.
The cut_points attribute should be set by this method.<|endoftext|> |
b6d10df1cefa1636347263b2f4183881f1c780f3fa43830f39aba6ed9003df7d | def transform(self, X: pd.DataFrame) -> pd.Series:
'Cut variable in X at cut_points. This function uses the pd.cut\n method.\n\n A specific null category is added on the cut output.\n\n Parameters\n ----------\n X : pd.DataFrame\n DataFrame containing column to discretise. This column is defined\n by the variable attribute.\n\n Returns\n -------\n variable_cut : pd.Series\n Discretised variable.\n\n '
check_is_fitted(self, 'cut_points')
check_columns_in_df(X, [self.variable])
variable_cut = pd.cut(x=X[self.variable], bins=self.cut_points, include_lowest=True, duplicates='drop')
variable_cut = self._add_null_category(variable_cut)
return variable_cut | Cut variable in X at cut_points. This function uses the pd.cut
method.
A specific null category is added on the cut output.
Parameters
----------
X : pd.DataFrame
DataFrame containing column to discretise. This column is defined
by the variable attribute.
Returns
-------
variable_cut : pd.Series
Discretised variable. | src/sumnplot/discretisation.py | transform | richardangell/analysis-development | 1 | python | def transform(self, X: pd.DataFrame) -> pd.Series:
'Cut variable in X at cut_points. This function uses the pd.cut\n method.\n\n A specific null category is added on the cut output.\n\n Parameters\n ----------\n X : pd.DataFrame\n DataFrame containing column to discretise. This column is defined\n by the variable attribute.\n\n Returns\n -------\n variable_cut : pd.Series\n Discretised variable.\n\n '
check_is_fitted(self, 'cut_points')
check_columns_in_df(X, [self.variable])
variable_cut = pd.cut(x=X[self.variable], bins=self.cut_points, include_lowest=True, duplicates='drop')
variable_cut = self._add_null_category(variable_cut)
return variable_cut | def transform(self, X: pd.DataFrame) -> pd.Series:
'Cut variable in X at cut_points. This function uses the pd.cut\n method.\n\n A specific null category is added on the cut output.\n\n Parameters\n ----------\n X : pd.DataFrame\n DataFrame containing column to discretise. This column is defined\n by the variable attribute.\n\n Returns\n -------\n variable_cut : pd.Series\n Discretised variable.\n\n '
check_is_fitted(self, 'cut_points')
check_columns_in_df(X, [self.variable])
variable_cut = pd.cut(x=X[self.variable], bins=self.cut_points, include_lowest=True, duplicates='drop')
variable_cut = self._add_null_category(variable_cut)
return variable_cut<|docstring|>Cut variable in X at cut_points. This function uses the pd.cut
method.
A specific null category is added on the cut output.
Parameters
----------
X : pd.DataFrame
DataFrame containing column to discretise. This column is defined
by the variable attribute.
Returns
-------
variable_cut : pd.Series
Discretised variable.<|endoftext|> |
bc784980be5dc5ca1dbd0873b8c6b52e0a945d57c9232e12ff924cc10cf8a762 | @staticmethod
def _clean_cut_points(cut_points: np.ndarray) -> np.ndarray:
'Clean provided cut points for discretisation by removing null values\n and returning unique values.\n\n Parameters\n ----------\n cut_points : np.ndarray\n Array of cut points that define where a particular column should be\n split to discretise it.\n\n Returns\n -------\n cleaned_cut_points : np.ndarray\n Array of the unique cut points input to the function, with any null\n values also removed.\n\n '
cleaned_cut_points = np.unique(cut_points[(~ np.isnan(cut_points))])
if (len(cleaned_cut_points) <= 1):
raise ValueError(f'only 1 cut point after cleaning {cleaned_cut_points} - before cleaning {cut_points}')
return cleaned_cut_points | Clean provided cut points for discretisation by removing null values
and returning unique values.
Parameters
----------
cut_points : np.ndarray
Array of cut points that define where a particular column should be
split to discretise it.
Returns
-------
cleaned_cut_points : np.ndarray
Array of the unique cut points input to the function, with any null
values also removed. | src/sumnplot/discretisation.py | _clean_cut_points | richardangell/analysis-development | 1 | python | @staticmethod
def _clean_cut_points(cut_points: np.ndarray) -> np.ndarray:
'Clean provided cut points for discretisation by removing null values\n and returning unique values.\n\n Parameters\n ----------\n cut_points : np.ndarray\n Array of cut points that define where a particular column should be\n split to discretise it.\n\n Returns\n -------\n cleaned_cut_points : np.ndarray\n Array of the unique cut points input to the function, with any null\n values also removed.\n\n '
cleaned_cut_points = np.unique(cut_points[(~ np.isnan(cut_points))])
if (len(cleaned_cut_points) <= 1):
raise ValueError(f'only 1 cut point after cleaning {cleaned_cut_points} - before cleaning {cut_points}')
return cleaned_cut_points | @staticmethod
def _clean_cut_points(cut_points: np.ndarray) -> np.ndarray:
'Clean provided cut points for discretisation by removing null values\n and returning unique values.\n\n Parameters\n ----------\n cut_points : np.ndarray\n Array of cut points that define where a particular column should be\n split to discretise it.\n\n Returns\n -------\n cleaned_cut_points : np.ndarray\n Array of the unique cut points input to the function, with any null\n values also removed.\n\n '
cleaned_cut_points = np.unique(cut_points[(~ np.isnan(cut_points))])
if (len(cleaned_cut_points) <= 1):
raise ValueError(f'only 1 cut point after cleaning {cleaned_cut_points} - before cleaning {cut_points}')
return cleaned_cut_points<|docstring|>Clean provided cut points for discretisation by removing null values
and returning unique values.
Parameters
----------
cut_points : np.ndarray
Array of cut points that define where a particular column should be
split to discretise it.
Returns
-------
cleaned_cut_points : np.ndarray
Array of the unique cut points input to the function, with any null
values also removed.<|endoftext|> |
0dee99be33c842fff7899562fab5250ff6880ee071c0c727463756f22abcde4e | @staticmethod
def _add_null_category(categorical_variable: pd.Series, null_category_name: str='Null') -> pd.Series:
"Function to add new categorical level to categorical variable and\n set NAs to this category.\n\n Parameters\n ----------\n categorical_variable : pd.Series\n Categorical variable to add null categorical level to.\n\n null_category_name : str, default = 'Null'\n The name of the categorical level for null values to add.\n\n Returns\n -------\n cat : pd.Series\n Categorical variable (pandas category type) with null categorical\n level added.\n\n "
check_type(categorical_variable, pd.Series, 'categorical_variable')
check_type(null_category_name, str, 'null_category_name')
check_condition(is_categorical_dtype(categorical_variable), f'categorical_variable ({categorical_variable.name}) is categorical dtype')
check_condition((null_category_name not in categorical_variable.cat.categories), f'null_category_name ({null_category_name}) not already in categorical_variable ({categorical_variable.name}) categories')
cat = categorical_variable.cat.add_categories([null_category_name])
cat.fillna(null_category_name, inplace=True)
return cat | Function to add new categorical level to categorical variable and
set NAs to this category.
Parameters
----------
categorical_variable : pd.Series
Categorical variable to add null categorical level to.
null_category_name : str, default = 'Null'
The name of the categorical level for null values to add.
Returns
-------
cat : pd.Series
Categorical variable (pandas category type) with null categorical
level added. | src/sumnplot/discretisation.py | _add_null_category | richardangell/analysis-development | 1 | python | @staticmethod
def _add_null_category(categorical_variable: pd.Series, null_category_name: str='Null') -> pd.Series:
"Function to add new categorical level to categorical variable and\n set NAs to this category.\n\n Parameters\n ----------\n categorical_variable : pd.Series\n Categorical variable to add null categorical level to.\n\n null_category_name : str, default = 'Null'\n The name of the categorical level for null values to add.\n\n Returns\n -------\n cat : pd.Series\n Categorical variable (pandas category type) with null categorical\n level added.\n\n "
check_type(categorical_variable, pd.Series, 'categorical_variable')
check_type(null_category_name, str, 'null_category_name')
check_condition(is_categorical_dtype(categorical_variable), f'categorical_variable ({categorical_variable.name}) is categorical dtype')
check_condition((null_category_name not in categorical_variable.cat.categories), f'null_category_name ({null_category_name}) not already in categorical_variable ({categorical_variable.name}) categories')
cat = categorical_variable.cat.add_categories([null_category_name])
cat.fillna(null_category_name, inplace=True)
return cat | @staticmethod
def _add_null_category(categorical_variable: pd.Series, null_category_name: str='Null') -> pd.Series:
"Function to add new categorical level to categorical variable and\n set NAs to this category.\n\n Parameters\n ----------\n categorical_variable : pd.Series\n Categorical variable to add null categorical level to.\n\n null_category_name : str, default = 'Null'\n The name of the categorical level for null values to add.\n\n Returns\n -------\n cat : pd.Series\n Categorical variable (pandas category type) with null categorical\n level added.\n\n "
check_type(categorical_variable, pd.Series, 'categorical_variable')
check_type(null_category_name, str, 'null_category_name')
check_condition(is_categorical_dtype(categorical_variable), f'categorical_variable ({categorical_variable.name}) is categorical dtype')
check_condition((null_category_name not in categorical_variable.cat.categories), f'null_category_name ({null_category_name}) not already in categorical_variable ({categorical_variable.name}) categories')
cat = categorical_variable.cat.add_categories([null_category_name])
cat.fillna(null_category_name, inplace=True)
return cat<|docstring|>Function to add new categorical level to categorical variable and
set NAs to this category.
Parameters
----------
categorical_variable : pd.Series
Categorical variable to add null categorical level to.
null_category_name : str, default = 'Null'
The name of the categorical level for null values to add.
Returns
-------
cat : pd.Series
Categorical variable (pandas category type) with null categorical
level added.<|endoftext|> |
2a90b5a341757f18fa2d9ac665f00715c5e815b828c3e8aa281156c79a596191 | @abstractmethod
def _get_max_number_of_bins(self):
'Method to return the maximum number of bins possible for the given\n variable.\n\n Note, the actual number may be lower once calculated on a given dataset\n because the cut points may not be unique.\n '
pass | Method to return the maximum number of bins possible for the given
variable.
Note, the actual number may be lower once calculated on a given dataset
because the cut points may not be unique. | src/sumnplot/discretisation.py | _get_max_number_of_bins | richardangell/analysis-development | 1 | python | @abstractmethod
def _get_max_number_of_bins(self):
'Method to return the maximum number of bins possible for the given\n variable.\n\n Note, the actual number may be lower once calculated on a given dataset\n because the cut points may not be unique.\n '
pass | @abstractmethod
def _get_max_number_of_bins(self):
'Method to return the maximum number of bins possible for the given\n variable.\n\n Note, the actual number may be lower once calculated on a given dataset\n because the cut points may not be unique.\n '
pass<|docstring|>Method to return the maximum number of bins possible for the given
variable.
Note, the actual number may be lower once calculated on a given dataset
because the cut points may not be unique.<|endoftext|> |
948e1000b853f4777fd6d823d0283a7266119220bb25a7fff01a1fa70aed07a2 | def _get_actual_number_of_bins(self) -> int:
'Method to return the actual number of bins based off cut_points\n after the fit method has been run.\n\n Returns\n -------\n int\n Actual number of bins variable has been cut into.\n\n '
check_is_fitted(self, 'cut_points')
return (len(self.cut_points) - 1) | Method to return the actual number of bins based off cut_points
after the fit method has been run.
Returns
-------
int
Actual number of bins variable has been cut into. | src/sumnplot/discretisation.py | _get_actual_number_of_bins | richardangell/analysis-development | 1 | python | def _get_actual_number_of_bins(self) -> int:
'Method to return the actual number of bins based off cut_points\n after the fit method has been run.\n\n Returns\n -------\n int\n Actual number of bins variable has been cut into.\n\n '
check_is_fitted(self, 'cut_points')
return (len(self.cut_points) - 1) | def _get_actual_number_of_bins(self) -> int:
'Method to return the actual number of bins based off cut_points\n after the fit method has been run.\n\n Returns\n -------\n int\n Actual number of bins variable has been cut into.\n\n '
check_is_fitted(self, 'cut_points')
return (len(self.cut_points) - 1)<|docstring|>Method to return the actual number of bins based off cut_points
after the fit method has been run.
Returns
-------
int
Actual number of bins variable has been cut into.<|endoftext|> |
6702fc1177b6d757213a76da66edeae46e12ab3476935667bc0ca1245ea5b728 | def fit(self, X: pd.DataFrame, y: Optional[pd.Series]=None, sample_weight: Optional[Union[(pd.Series, np.ndarray)]]=None):
'Calculate cut points on the input data X.\n\n Cut points are equally spaced across the range of the variable. The\n attribute cut_points contains the calculate cut points.\n\n Parameters\n ----------\n X : pd.DataFrame\n DataFrame containing column to discretise. This column is defined\n by the variable attribute.\n\n y : pd.Series, default = None\n Response variable. Not used. Only implemented for compatibility\n with scikit-learn.\n\n sample_weight : pd.Series or np.ndarray, default = None\n Optional, sample weights for each record in X.\n\n '
check_columns_in_df(X, [self.variable])
variable_min = X[self.variable].min()
variable_max = X[self.variable].max()
cut_points = np.linspace(start=variable_min, stop=variable_max, num=(self.n + 1))
self.cut_points = self._clean_cut_points(cut_points)
return self | Calculate cut points on the input data X.
Cut points are equally spaced across the range of the variable. The
attribute cut_points contains the calculate cut points.
Parameters
----------
X : pd.DataFrame
DataFrame containing column to discretise. This column is defined
by the variable attribute.
y : pd.Series, default = None
Response variable. Not used. Only implemented for compatibility
with scikit-learn.
sample_weight : pd.Series or np.ndarray, default = None
Optional, sample weights for each record in X. | src/sumnplot/discretisation.py | fit | richardangell/analysis-development | 1 | python | def fit(self, X: pd.DataFrame, y: Optional[pd.Series]=None, sample_weight: Optional[Union[(pd.Series, np.ndarray)]]=None):
'Calculate cut points on the input data X.\n\n Cut points are equally spaced across the range of the variable. The\n attribute cut_points contains the calculate cut points.\n\n Parameters\n ----------\n X : pd.DataFrame\n DataFrame containing column to discretise. This column is defined\n by the variable attribute.\n\n y : pd.Series, default = None\n Response variable. Not used. Only implemented for compatibility\n with scikit-learn.\n\n sample_weight : pd.Series or np.ndarray, default = None\n Optional, sample weights for each record in X.\n\n '
check_columns_in_df(X, [self.variable])
variable_min = X[self.variable].min()
variable_max = X[self.variable].max()
cut_points = np.linspace(start=variable_min, stop=variable_max, num=(self.n + 1))
self.cut_points = self._clean_cut_points(cut_points)
return self | def fit(self, X: pd.DataFrame, y: Optional[pd.Series]=None, sample_weight: Optional[Union[(pd.Series, np.ndarray)]]=None):
'Calculate cut points on the input data X.\n\n Cut points are equally spaced across the range of the variable. The\n attribute cut_points contains the calculate cut points.\n\n Parameters\n ----------\n X : pd.DataFrame\n DataFrame containing column to discretise. This column is defined\n by the variable attribute.\n\n y : pd.Series, default = None\n Response variable. Not used. Only implemented for compatibility\n with scikit-learn.\n\n sample_weight : pd.Series or np.ndarray, default = None\n Optional, sample weights for each record in X.\n\n '
check_columns_in_df(X, [self.variable])
variable_min = X[self.variable].min()
variable_max = X[self.variable].max()
cut_points = np.linspace(start=variable_min, stop=variable_max, num=(self.n + 1))
self.cut_points = self._clean_cut_points(cut_points)
return self<|docstring|>Calculate cut points on the input data X.
Cut points are equally spaced across the range of the variable. The
attribute cut_points contains the calculate cut points.
Parameters
----------
X : pd.DataFrame
DataFrame containing column to discretise. This column is defined
by the variable attribute.
y : pd.Series, default = None
Response variable. Not used. Only implemented for compatibility
with scikit-learn.
sample_weight : pd.Series or np.ndarray, default = None
Optional, sample weights for each record in X.<|endoftext|> |
55f452180f1968e63f634af8a09df0c0ccfb436382b8e266f74f793575b9a2f8 | def _get_max_number_of_bins(self) -> int:
'Return the maximum number of bins possible for the given\n variable.\n '
return self.n | Return the maximum number of bins possible for the given
variable. | src/sumnplot/discretisation.py | _get_max_number_of_bins | richardangell/analysis-development | 1 | python | def _get_max_number_of_bins(self) -> int:
'Return the maximum number of bins possible for the given\n variable.\n '
return self.n | def _get_max_number_of_bins(self) -> int:
'Return the maximum number of bins possible for the given\n variable.\n '
return self.n<|docstring|>Return the maximum number of bins possible for the given
variable.<|endoftext|> |
f812858674afaa903eddab3be70f0d3d8e7af8a122e471574626e7b88c71a5c8 | def fit(self, X: pd.DataFrame, y: Optional[pd.Series]=None, sample_weight: Optional[Union[(pd.Series, np.ndarray)]]=None):
'Calculate cut points on the input data X.\n\n Cut points are chosen so each of the n buckets contains an equal amount\n of weight. The attribute cut_points contains the calculate cut points.\n\n Parameters\n ----------\n X : pd.DataFrame\n DataFrame containing column to discretise. This column is defined\n by the variable attribute.\n\n y : pd.Series, default = None\n Response variable. Not used. Only implemented for compatibility\n with scikit-learn.\n\n sample_weight : pd.Series or np.ndarray, default = None\n Optional, sample weights for each record in X.\n\n '
check_columns_in_df(X, [self.variable])
cut_points = QuantileDiscretiser._compute_weighted_quantile(values=X[self.variable], quantiles=tuple(np.linspace(start=0, stop=1, num=(self.n + 1))), sample_weight=sample_weight)
self.cut_points = self._clean_cut_points(cut_points)
return self | Calculate cut points on the input data X.
Cut points are chosen so each of the n buckets contains an equal amount
of weight. The attribute cut_points contains the calculate cut points.
Parameters
----------
X : pd.DataFrame
DataFrame containing column to discretise. This column is defined
by the variable attribute.
y : pd.Series, default = None
Response variable. Not used. Only implemented for compatibility
with scikit-learn.
sample_weight : pd.Series or np.ndarray, default = None
Optional, sample weights for each record in X. | src/sumnplot/discretisation.py | fit | richardangell/analysis-development | 1 | python | def fit(self, X: pd.DataFrame, y: Optional[pd.Series]=None, sample_weight: Optional[Union[(pd.Series, np.ndarray)]]=None):
'Calculate cut points on the input data X.\n\n Cut points are chosen so each of the n buckets contains an equal amount\n of weight. The attribute cut_points contains the calculate cut points.\n\n Parameters\n ----------\n X : pd.DataFrame\n DataFrame containing column to discretise. This column is defined\n by the variable attribute.\n\n y : pd.Series, default = None\n Response variable. Not used. Only implemented for compatibility\n with scikit-learn.\n\n sample_weight : pd.Series or np.ndarray, default = None\n Optional, sample weights for each record in X.\n\n '
check_columns_in_df(X, [self.variable])
cut_points = QuantileDiscretiser._compute_weighted_quantile(values=X[self.variable], quantiles=tuple(np.linspace(start=0, stop=1, num=(self.n + 1))), sample_weight=sample_weight)
self.cut_points = self._clean_cut_points(cut_points)
return self | def fit(self, X: pd.DataFrame, y: Optional[pd.Series]=None, sample_weight: Optional[Union[(pd.Series, np.ndarray)]]=None):
'Calculate cut points on the input data X.\n\n Cut points are chosen so each of the n buckets contains an equal amount\n of weight. The attribute cut_points contains the calculate cut points.\n\n Parameters\n ----------\n X : pd.DataFrame\n DataFrame containing column to discretise. This column is defined\n by the variable attribute.\n\n y : pd.Series, default = None\n Response variable. Not used. Only implemented for compatibility\n with scikit-learn.\n\n sample_weight : pd.Series or np.ndarray, default = None\n Optional, sample weights for each record in X.\n\n '
check_columns_in_df(X, [self.variable])
cut_points = QuantileDiscretiser._compute_weighted_quantile(values=X[self.variable], quantiles=tuple(np.linspace(start=0, stop=1, num=(self.n + 1))), sample_weight=sample_weight)
self.cut_points = self._clean_cut_points(cut_points)
return self<|docstring|>Calculate cut points on the input data X.
Cut points are chosen so each of the n buckets contains an equal amount
of weight. The attribute cut_points contains the calculate cut points.
Parameters
----------
X : pd.DataFrame
DataFrame containing column to discretise. This column is defined
by the variable attribute.
y : pd.Series, default = None
Response variable. Not used. Only implemented for compatibility
with scikit-learn.
sample_weight : pd.Series or np.ndarray, default = None
Optional, sample weights for each record in X.<|endoftext|> |
f121b0f690c630cef9abc9894d83f7b56da119aa3011ef72eb3a2a61caef024f | def _get_max_number_of_bins(self) -> int:
'Return the maximum number of bins possible for variable.'
return self.n | Return the maximum number of bins possible for variable. | src/sumnplot/discretisation.py | _get_max_number_of_bins | richardangell/analysis-development | 1 | python | def _get_max_number_of_bins(self) -> int:
return self.n | def _get_max_number_of_bins(self) -> int:
return self.n<|docstring|>Return the maximum number of bins possible for variable.<|endoftext|> |
afbd6b63020123ea3da63241686b7bfd76329eafbe798a31fa529fa5e8c6d450 | def fit(self, X: pd.DataFrame, y: Optional[pd.Series]=None, sample_weight: Optional[Union[(pd.Series, np.ndarray)]]=None):
'Calculate cut points on the input data X.\n\n Cut points are (potentially weighted) quantiles specified when\n initialising the transformer.\n\n Parameters\n ----------\n X : pd.DataFrame\n DataFrame containing column to discretise. This column is defined\n by the variable attribute.\n\n y : pd.Series, default = None\n Response variable. Not used. Only implemented for compatibility\n with scikit-learn.\n\n sample_weight : pd.Series or np.ndarray, default = None\n Optional, sample weights for each record in X.\n\n '
check_columns_in_df(X, [self.variable])
cut_points = self._compute_weighted_quantile(values=X[self.variable], quantiles=self.quantiles, sample_weight=sample_weight)
self.cut_points = self._clean_cut_points(cut_points)
return self | Calculate cut points on the input data X.
Cut points are (potentially weighted) quantiles specified when
initialising the transformer.
Parameters
----------
X : pd.DataFrame
DataFrame containing column to discretise. This column is defined
by the variable attribute.
y : pd.Series, default = None
Response variable. Not used. Only implemented for compatibility
with scikit-learn.
sample_weight : pd.Series or np.ndarray, default = None
Optional, sample weights for each record in X. | src/sumnplot/discretisation.py | fit | richardangell/analysis-development | 1 | python | def fit(self, X: pd.DataFrame, y: Optional[pd.Series]=None, sample_weight: Optional[Union[(pd.Series, np.ndarray)]]=None):
'Calculate cut points on the input data X.\n\n Cut points are (potentially weighted) quantiles specified when\n initialising the transformer.\n\n Parameters\n ----------\n X : pd.DataFrame\n DataFrame containing column to discretise. This column is defined\n by the variable attribute.\n\n y : pd.Series, default = None\n Response variable. Not used. Only implemented for compatibility\n with scikit-learn.\n\n sample_weight : pd.Series or np.ndarray, default = None\n Optional, sample weights for each record in X.\n\n '
check_columns_in_df(X, [self.variable])
cut_points = self._compute_weighted_quantile(values=X[self.variable], quantiles=self.quantiles, sample_weight=sample_weight)
self.cut_points = self._clean_cut_points(cut_points)
return self | def fit(self, X: pd.DataFrame, y: Optional[pd.Series]=None, sample_weight: Optional[Union[(pd.Series, np.ndarray)]]=None):
'Calculate cut points on the input data X.\n\n Cut points are (potentially weighted) quantiles specified when\n initialising the transformer.\n\n Parameters\n ----------\n X : pd.DataFrame\n DataFrame containing column to discretise. This column is defined\n by the variable attribute.\n\n y : pd.Series, default = None\n Response variable. Not used. Only implemented for compatibility\n with scikit-learn.\n\n sample_weight : pd.Series or np.ndarray, default = None\n Optional, sample weights for each record in X.\n\n '
check_columns_in_df(X, [self.variable])
cut_points = self._compute_weighted_quantile(values=X[self.variable], quantiles=self.quantiles, sample_weight=sample_weight)
self.cut_points = self._clean_cut_points(cut_points)
return self<|docstring|>Calculate cut points on the input data X.
Cut points are (potentially weighted) quantiles specified when
initialising the transformer.
Parameters
----------
X : pd.DataFrame
DataFrame containing column to discretise. This column is defined
by the variable attribute.
y : pd.Series, default = None
Response variable. Not used. Only implemented for compatibility
with scikit-learn.
sample_weight : pd.Series or np.ndarray, default = None
Optional, sample weights for each record in X.<|endoftext|> |
4016b61d2b433042746dcc80698d70247a3f10ed7beea9ff23496f00905f3dd7 | @staticmethod
def _compute_weighted_quantile(values: np.ndarray, quantiles: tuple, sample_weight: Optional[Union[(pd.Series, np.ndarray)]]=None, values_sorted: bool=False):
'Funtion to calculate weighted percentiles.\n\n Code modified from the answer given by users Alleo & Max Ghenis on\n stackoverflow https://stackoverflow.com/a/29677616. Removed old_style\n arg and associated code from answer.\n\n See https://en.wikipedia.org/wiki/Percentile#The_weighted_percentile_method\n for description of method.\n\n If no weights are passed then equal weighting per observation in values\n is applied.\n\n Parameters\n ----------\n values : array-like\n Data of interest, must contain a column supplied in variable.\n\n quantiles : array-like\n Value(s) between 0 <= quantiles <= 1, the weighted quantile(s) to compute.\n\n sample_weight : array-like, default = None\n Array of weights, must be same length as values. Default value of None\n means each observation in values is equally weighted.\n\n values_sorted : bool\n Are the values and sample_weight arrays pre-sorted? If True arrays will not\n be sorted in function.\n\n Returns\n -------\n interpolated_quantiles : np.array\n Computed (weighted) quantiles.\n\n '
values = np.array(values)
quantiles_ = np.array(quantiles)
quantiles_ = np.unique(np.sort(np.append(quantiles_, [0, 1])))
if (sample_weight is None):
sample_weight = np.ones(len(values))
sample_weight = np.array(sample_weight)
if (not values_sorted):
sorter = np.argsort(values)
values = values[sorter]
sample_weight = sample_weight[sorter]
weighted_quantiles = (np.cumsum(sample_weight) - (0.5 * sample_weight))
weighted_quantiles /= np.sum(sample_weight)
interpolated_quantiles = np.interp(quantiles_, weighted_quantiles, values)
return interpolated_quantiles | Funtion to calculate weighted percentiles.
Code modified from the answer given by users Alleo & Max Ghenis on
stackoverflow https://stackoverflow.com/a/29677616. Removed old_style
arg and associated code from answer.
See https://en.wikipedia.org/wiki/Percentile#The_weighted_percentile_method
for description of method.
If no weights are passed then equal weighting per observation in values
is applied.
Parameters
----------
values : array-like
Data of interest, must contain a column supplied in variable.
quantiles : array-like
Value(s) between 0 <= quantiles <= 1, the weighted quantile(s) to compute.
sample_weight : array-like, default = None
Array of weights, must be same length as values. Default value of None
means each observation in values is equally weighted.
values_sorted : bool
Are the values and sample_weight arrays pre-sorted? If True arrays will not
be sorted in function.
Returns
-------
interpolated_quantiles : np.array
Computed (weighted) quantiles. | src/sumnplot/discretisation.py | _compute_weighted_quantile | richardangell/analysis-development | 1 | python | @staticmethod
def _compute_weighted_quantile(values: np.ndarray, quantiles: tuple, sample_weight: Optional[Union[(pd.Series, np.ndarray)]]=None, values_sorted: bool=False):
'Funtion to calculate weighted percentiles.\n\n Code modified from the answer given by users Alleo & Max Ghenis on\n stackoverflow https://stackoverflow.com/a/29677616. Removed old_style\n arg and associated code from answer.\n\n See https://en.wikipedia.org/wiki/Percentile#The_weighted_percentile_method\n for description of method.\n\n If no weights are passed then equal weighting per observation in values\n is applied.\n\n Parameters\n ----------\n values : array-like\n Data of interest, must contain a column supplied in variable.\n\n quantiles : array-like\n Value(s) between 0 <= quantiles <= 1, the weighted quantile(s) to compute.\n\n sample_weight : array-like, default = None\n Array of weights, must be same length as values. Default value of None\n means each observation in values is equally weighted.\n\n values_sorted : bool\n Are the values and sample_weight arrays pre-sorted? If True arrays will not\n be sorted in function.\n\n Returns\n -------\n interpolated_quantiles : np.array\n Computed (weighted) quantiles.\n\n '
values = np.array(values)
quantiles_ = np.array(quantiles)
quantiles_ = np.unique(np.sort(np.append(quantiles_, [0, 1])))
if (sample_weight is None):
sample_weight = np.ones(len(values))
sample_weight = np.array(sample_weight)
if (not values_sorted):
sorter = np.argsort(values)
values = values[sorter]
sample_weight = sample_weight[sorter]
weighted_quantiles = (np.cumsum(sample_weight) - (0.5 * sample_weight))
weighted_quantiles /= np.sum(sample_weight)
interpolated_quantiles = np.interp(quantiles_, weighted_quantiles, values)
return interpolated_quantiles | @staticmethod
def _compute_weighted_quantile(values: np.ndarray, quantiles: tuple, sample_weight: Optional[Union[(pd.Series, np.ndarray)]]=None, values_sorted: bool=False):
'Funtion to calculate weighted percentiles.\n\n Code modified from the answer given by users Alleo & Max Ghenis on\n stackoverflow https://stackoverflow.com/a/29677616. Removed old_style\n arg and associated code from answer.\n\n See https://en.wikipedia.org/wiki/Percentile#The_weighted_percentile_method\n for description of method.\n\n If no weights are passed then equal weighting per observation in values\n is applied.\n\n Parameters\n ----------\n values : array-like\n Data of interest, must contain a column supplied in variable.\n\n quantiles : array-like\n Value(s) between 0 <= quantiles <= 1, the weighted quantile(s) to compute.\n\n sample_weight : array-like, default = None\n Array of weights, must be same length as values. Default value of None\n means each observation in values is equally weighted.\n\n values_sorted : bool\n Are the values and sample_weight arrays pre-sorted? If True arrays will not\n be sorted in function.\n\n Returns\n -------\n interpolated_quantiles : np.array\n Computed (weighted) quantiles.\n\n '
values = np.array(values)
quantiles_ = np.array(quantiles)
quantiles_ = np.unique(np.sort(np.append(quantiles_, [0, 1])))
if (sample_weight is None):
sample_weight = np.ones(len(values))
sample_weight = np.array(sample_weight)
if (not values_sorted):
sorter = np.argsort(values)
values = values[sorter]
sample_weight = sample_weight[sorter]
weighted_quantiles = (np.cumsum(sample_weight) - (0.5 * sample_weight))
weighted_quantiles /= np.sum(sample_weight)
interpolated_quantiles = np.interp(quantiles_, weighted_quantiles, values)
return interpolated_quantiles<|docstring|>Funtion to calculate weighted percentiles.
Code modified from the answer given by users Alleo & Max Ghenis on
stackoverflow https://stackoverflow.com/a/29677616. Removed old_style
arg and associated code from answer.
See https://en.wikipedia.org/wiki/Percentile#The_weighted_percentile_method
for description of method.
If no weights are passed then equal weighting per observation in values
is applied.
Parameters
----------
values : array-like
Data of interest, must contain a column supplied in variable.
quantiles : array-like
Value(s) between 0 <= quantiles <= 1, the weighted quantile(s) to compute.
sample_weight : array-like, default = None
Array of weights, must be same length as values. Default value of None
means each observation in values is equally weighted.
values_sorted : bool
Are the values and sample_weight arrays pre-sorted? If True arrays will not
be sorted in function.
Returns
-------
interpolated_quantiles : np.array
Computed (weighted) quantiles.<|endoftext|> |
40aef22ab33993412bb27063fa4e73e5fa7141ef3c99db9b9abdd622e06ebe47 | @staticmethod
def _clean_quantiles(quantiles: Tuple[(Union[(int, float)], ...)]) -> Tuple[(Union[(int, float)], ...)]:
'Clean input quantiles by ensuring 0 and 1 are included, they are\n sorted and unique.\n\n Note, quantiles are converted back and forth between a tuple a\n np.ndarray. This is so the transformer is compatible with scikit-learn\n as the quantiles are set during init.\n\n Parameters\n ----------\n quantiles : tuple\n Quantiles within the range [0, 1].\n\n Returns\n -------\n cleaned_quantiles : tuple\n Sorted, unique quantiles.\n\n '
quantiles_array = np.array(quantiles)
quantiles_array = np.unique(np.sort(np.append(quantiles_array, [0, 1])))
check_condition(all((quantiles_array >= 0)), 'all quantiles >= 0')
check_condition(all((quantiles_array <= 1)), 'all quantiles <= 1')
cleaned_quantiles = tuple(quantiles_array)
return cleaned_quantiles | Clean input quantiles by ensuring 0 and 1 are included, they are
sorted and unique.
Note, quantiles are converted back and forth between a tuple a
np.ndarray. This is so the transformer is compatible with scikit-learn
as the quantiles are set during init.
Parameters
----------
quantiles : tuple
Quantiles within the range [0, 1].
Returns
-------
cleaned_quantiles : tuple
Sorted, unique quantiles. | src/sumnplot/discretisation.py | _clean_quantiles | richardangell/analysis-development | 1 | python | @staticmethod
def _clean_quantiles(quantiles: Tuple[(Union[(int, float)], ...)]) -> Tuple[(Union[(int, float)], ...)]:
'Clean input quantiles by ensuring 0 and 1 are included, they are\n sorted and unique.\n\n Note, quantiles are converted back and forth between a tuple a\n np.ndarray. This is so the transformer is compatible with scikit-learn\n as the quantiles are set during init.\n\n Parameters\n ----------\n quantiles : tuple\n Quantiles within the range [0, 1].\n\n Returns\n -------\n cleaned_quantiles : tuple\n Sorted, unique quantiles.\n\n '
quantiles_array = np.array(quantiles)
quantiles_array = np.unique(np.sort(np.append(quantiles_array, [0, 1])))
check_condition(all((quantiles_array >= 0)), 'all quantiles >= 0')
check_condition(all((quantiles_array <= 1)), 'all quantiles <= 1')
cleaned_quantiles = tuple(quantiles_array)
return cleaned_quantiles | @staticmethod
def _clean_quantiles(quantiles: Tuple[(Union[(int, float)], ...)]) -> Tuple[(Union[(int, float)], ...)]:
'Clean input quantiles by ensuring 0 and 1 are included, they are\n sorted and unique.\n\n Note, quantiles are converted back and forth between a tuple a\n np.ndarray. This is so the transformer is compatible with scikit-learn\n as the quantiles are set during init.\n\n Parameters\n ----------\n quantiles : tuple\n Quantiles within the range [0, 1].\n\n Returns\n -------\n cleaned_quantiles : tuple\n Sorted, unique quantiles.\n\n '
quantiles_array = np.array(quantiles)
quantiles_array = np.unique(np.sort(np.append(quantiles_array, [0, 1])))
check_condition(all((quantiles_array >= 0)), 'all quantiles >= 0')
check_condition(all((quantiles_array <= 1)), 'all quantiles <= 1')
cleaned_quantiles = tuple(quantiles_array)
return cleaned_quantiles<|docstring|>Clean input quantiles by ensuring 0 and 1 are included, they are
sorted and unique.
Note, quantiles are converted back and forth between a tuple a
np.ndarray. This is so the transformer is compatible with scikit-learn
as the quantiles are set during init.
Parameters
----------
quantiles : tuple
Quantiles within the range [0, 1].
Returns
-------
cleaned_quantiles : tuple
Sorted, unique quantiles.<|endoftext|> |
0148d6f6fb84f7e37d5bab91701f9d882df5a2d50e8741ad9a5002474785e6ed | def _get_max_number_of_bins(self) -> int:
'Return the maximum number of bins possible for variable.'
return len(self.quantiles) | Return the maximum number of bins possible for variable. | src/sumnplot/discretisation.py | _get_max_number_of_bins | richardangell/analysis-development | 1 | python | def _get_max_number_of_bins(self) -> int:
return len(self.quantiles) | def _get_max_number_of_bins(self) -> int:
return len(self.quantiles)<|docstring|>Return the maximum number of bins possible for variable.<|endoftext|> |
824a41b365f8e18632f9c71ac76b579786f214b05bfd223482cf95db67f48e4c | def batch_size_fn(new, count, sofar):
'持续扩大批处理并计算标识+填充的总数'
global max_src_in_batch, max_tgt_in_batch
if (count == 1):
max_src_in_batch = 0
max_tgt_in_batch = 0
max_src_in_batch = max(max_src_in_batch, len(new.src))
max_tgt_in_batch = max(max_tgt_in_batch, (len(new.src) + 2))
src_elements = (count * max_src_in_batch)
tgt_elements = (count * max_tgt_in_batch)
return max(src_elements, tgt_elements) | 持续扩大批处理并计算标识+填充的总数 | Batch.py | batch_size_fn | RongTouchTouch/AutoComplete | 0 | python | def batch_size_fn(new, count, sofar):
global max_src_in_batch, max_tgt_in_batch
if (count == 1):
max_src_in_batch = 0
max_tgt_in_batch = 0
max_src_in_batch = max(max_src_in_batch, len(new.src))
max_tgt_in_batch = max(max_tgt_in_batch, (len(new.src) + 2))
src_elements = (count * max_src_in_batch)
tgt_elements = (count * max_tgt_in_batch)
return max(src_elements, tgt_elements) | def batch_size_fn(new, count, sofar):
global max_src_in_batch, max_tgt_in_batch
if (count == 1):
max_src_in_batch = 0
max_tgt_in_batch = 0
max_src_in_batch = max(max_src_in_batch, len(new.src))
max_tgt_in_batch = max(max_tgt_in_batch, (len(new.src) + 2))
src_elements = (count * max_src_in_batch)
tgt_elements = (count * max_tgt_in_batch)
return max(src_elements, tgt_elements)<|docstring|>持续扩大批处理并计算标识+填充的总数<|endoftext|> |
73e1e44824890739078d8667b75a33407413db7a0f8c89ab9e61eb69bfba3f77 | @staticmethod
def make_std_mask(tgt, pad):
'创建一个mask来隐藏填充和将来的单词'
tgt_mask = (tgt != pad).unsqueeze((- 2))
tgt_mask = (tgt_mask & Variable(subsequent_mask(tgt.size((- 1))).type_as(tgt_mask.data)))
return tgt_mask | 创建一个mask来隐藏填充和将来的单词 | Batch.py | make_std_mask | RongTouchTouch/AutoComplete | 0 | python | @staticmethod
def make_std_mask(tgt, pad):
tgt_mask = (tgt != pad).unsqueeze((- 2))
tgt_mask = (tgt_mask & Variable(subsequent_mask(tgt.size((- 1))).type_as(tgt_mask.data)))
return tgt_mask | @staticmethod
def make_std_mask(tgt, pad):
tgt_mask = (tgt != pad).unsqueeze((- 2))
tgt_mask = (tgt_mask & Variable(subsequent_mask(tgt.size((- 1))).type_as(tgt_mask.data)))
return tgt_mask<|docstring|>创建一个mask来隐藏填充和将来的单词<|endoftext|> |
73e1e44824890739078d8667b75a33407413db7a0f8c89ab9e61eb69bfba3f77 | @staticmethod
def make_std_mask(tgt, pad):
'创建一个mask来隐藏填充和将来的单词'
tgt_mask = (tgt != pad).unsqueeze((- 2))
tgt_mask = (tgt_mask & Variable(subsequent_mask(tgt.size((- 1))).type_as(tgt_mask.data)))
return tgt_mask | 创建一个mask来隐藏填充和将来的单词 | Batch.py | make_std_mask | RongTouchTouch/AutoComplete | 0 | python | @staticmethod
def make_std_mask(tgt, pad):
tgt_mask = (tgt != pad).unsqueeze((- 2))
tgt_mask = (tgt_mask & Variable(subsequent_mask(tgt.size((- 1))).type_as(tgt_mask.data)))
return tgt_mask | @staticmethod
def make_std_mask(tgt, pad):
tgt_mask = (tgt != pad).unsqueeze((- 2))
tgt_mask = (tgt_mask & Variable(subsequent_mask(tgt.size((- 1))).type_as(tgt_mask.data)))
return tgt_mask<|docstring|>创建一个mask来隐藏填充和将来的单词<|endoftext|> |
ee10b9facd7af4d136d1362c0b75912634368a10b5c974d0547c72689ff813bb | def find_loss(prediction, target):
'\n Calculating the squared loss on the normalized GED.\n '
prediction = prediction
target = target
score = ((prediction - target) ** 2)
return score | Calculating the squared loss on the normalized GED. | src/utilities.py | find_loss | Yagyamodi/SimGNN-main | 0 | python | def find_loss(prediction, target):
'\n \n '
prediction = prediction
target = target
score = ((prediction - target) ** 2)
return score | def find_loss(prediction, target):
'\n \n '
prediction = prediction
target = target
score = ((prediction - target) ** 2)
return score<|docstring|>Calculating the squared loss on the normalized GED.<|endoftext|> |
e8cf5f5f3e55808295e67c0d2ebdf1a8c41d5bb70fcf099a28b413ff6e6ad188 | @click.command()
@click.argument('database_dir')
@click.argument('target_dir')
def main(database_dir, target_dir):
'Generate CSV files from a CronosPro/CronosPlus database.'
if (not os.path.isdir(database_dir)):
raise click.ClickException('Database directory does not exist!')
try:
os.makedirs(target_dir)
except:
pass
try:
parse(database_dir, target_dir)
except CronosException as ex:
raise click.ClickException(ex) | Generate CSV files from a CronosPro/CronosPlus database. | cronos/cli.py | main | OlegBravo/cronosparser | 0 | python | @click.command()
@click.argument('database_dir')
@click.argument('target_dir')
def main(database_dir, target_dir):
if (not os.path.isdir(database_dir)):
raise click.ClickException('Database directory does not exist!')
try:
os.makedirs(target_dir)
except:
pass
try:
parse(database_dir, target_dir)
except CronosException as ex:
raise click.ClickException(ex) | @click.command()
@click.argument('database_dir')
@click.argument('target_dir')
def main(database_dir, target_dir):
if (not os.path.isdir(database_dir)):
raise click.ClickException('Database directory does not exist!')
try:
os.makedirs(target_dir)
except:
pass
try:
parse(database_dir, target_dir)
except CronosException as ex:
raise click.ClickException(ex)<|docstring|>Generate CSV files from a CronosPro/CronosPlus database.<|endoftext|> |
4d02854035b9dc8a389e04698ea119b0a19a36978fe6baa7d96c6612c7045924 | def clear(self):
'Initialize and clear intermediate results.'
self.Y = None
self.n = None
self.phi = None
self.exc = None
self.vxc = None
return | Initialize and clear intermediate results. | eminus/scf.py | clear | wangenau/eminus | 0 | python | def clear(self):
self.Y = None
self.n = None
self.phi = None
self.exc = None
self.vxc = None
return | def clear(self):
self.Y = None
self.n = None
self.phi = None
self.exc = None
self.vxc = None
return<|docstring|>Initialize and clear intermediate results.<|endoftext|> |
6f71d97ac8392c438c955db06155f21e09bd294c1aa6de2d65fd07144a72a0ad | def initialize(self):
'Validate inputs, update them and build all necessary parameters.'
self._set_potential()
self._init_W()
return | Validate inputs, update them and build all necessary parameters. | eminus/scf.py | initialize | wangenau/eminus | 0 | python | def initialize(self):
self._set_potential()
self._init_W()
return | def initialize(self):
self._set_potential()
self._init_W()
return<|docstring|>Validate inputs, update them and build all necessary parameters.<|endoftext|> |
887aef9a9d2fbdd63df116beb12694ccaf1817befeb11645c7b6f9aaf3906e90 | def run(self, **kwargs):
'Run the self-consistent field (SCF) calculation.'
self.log.debug(f'''--- System information ---
{self.atoms}
Number of states: {self.atoms.Ns}
Occupation per state: {self.atoms.f}
--- Cell information ---
Side lengths: {self.atoms.a} Bohr
Sampling per axis: {self.atoms.s}
Cut-off energy: {self.atoms.ecut} Hartree
Compression: {(len(self.atoms.G2) / len(self.atoms.G2c)):.5f}
--- Calculation information ---
{self}
--- SCF data ---''')
self.energies.Eewald = get_Eewald(self.atoms)
Etots = []
minimizer_log = {}
for imin in self.min:
try:
self.log.info(f'Start {eval(imin).__name__}...')
except NameError:
self.log.exception(f'No minimizer found for "{imin}"')
raise
start = timeit.default_timer()
Elist = eval(imin)(self, self.min[imin], **kwargs)
end = timeit.default_timer()
minimizer_log[imin] = {}
minimizer_log[imin]['time'] = (end - start)
minimizer_log[imin]['iter'] = len(Elist)
Etots += Elist
if (abs((Etots[(- 2)] - Etots[(- 1)])) < self.etol):
break
if (abs((Etots[(- 2)] - Etots[(- 1)])) < self.etol):
self.log.info(f'SCF converged after {len(Etots)} iterations.')
else:
self.log.warning('SCF not converged!')
self.log.debug('\n--- SCF results ---')
t_tot = 0
for imin in self.min:
N = minimizer_log[imin]['iter']
t = minimizer_log[imin]['time']
t_tot += t
self.log.debug(f'''Minimizer: {imin}
Iterations: {N}
Time: {t:.5f} s
Time/Iteration: {(t / N):.5f} s''')
self.log.info(f'Total SCF time: {t_tot:.5f} s')
if self.sic:
self.energies.Esic = get_Esic(self, self.Y)
if (self.log.level <= logging.DEBUG):
self.log.debug(f'''
--- Energy data ---
{self.energies}''')
else:
self.log.info(f'Total energy: {self.energies.Etot:.9f} Eh')
return self.energies.Etot | Run the self-consistent field (SCF) calculation. | eminus/scf.py | run | wangenau/eminus | 0 | python | def run(self, **kwargs):
self.log.debug(f'--- System information ---
{self.atoms}
Number of states: {self.atoms.Ns}
Occupation per state: {self.atoms.f}
--- Cell information ---
Side lengths: {self.atoms.a} Bohr
Sampling per axis: {self.atoms.s}
Cut-off energy: {self.atoms.ecut} Hartree
Compression: {(len(self.atoms.G2) / len(self.atoms.G2c)):.5f}
--- Calculation information ---
{self}
--- SCF data ---')
self.energies.Eewald = get_Eewald(self.atoms)
Etots = []
minimizer_log = {}
for imin in self.min:
try:
self.log.info(f'Start {eval(imin).__name__}...')
except NameError:
self.log.exception(f'No minimizer found for "{imin}"')
raise
start = timeit.default_timer()
Elist = eval(imin)(self, self.min[imin], **kwargs)
end = timeit.default_timer()
minimizer_log[imin] = {}
minimizer_log[imin]['time'] = (end - start)
minimizer_log[imin]['iter'] = len(Elist)
Etots += Elist
if (abs((Etots[(- 2)] - Etots[(- 1)])) < self.etol):
break
if (abs((Etots[(- 2)] - Etots[(- 1)])) < self.etol):
self.log.info(f'SCF converged after {len(Etots)} iterations.')
else:
self.log.warning('SCF not converged!')
self.log.debug('\n--- SCF results ---')
t_tot = 0
for imin in self.min:
N = minimizer_log[imin]['iter']
t = minimizer_log[imin]['time']
t_tot += t
self.log.debug(f'Minimizer: {imin}
Iterations: {N}
Time: {t:.5f} s
Time/Iteration: {(t / N):.5f} s')
self.log.info(f'Total SCF time: {t_tot:.5f} s')
if self.sic:
self.energies.Esic = get_Esic(self, self.Y)
if (self.log.level <= logging.DEBUG):
self.log.debug(f'
--- Energy data ---
{self.energies}')
else:
self.log.info(f'Total energy: {self.energies.Etot:.9f} Eh')
return self.energies.Etot | def run(self, **kwargs):
self.log.debug(f'--- System information ---
{self.atoms}
Number of states: {self.atoms.Ns}
Occupation per state: {self.atoms.f}
--- Cell information ---
Side lengths: {self.atoms.a} Bohr
Sampling per axis: {self.atoms.s}
Cut-off energy: {self.atoms.ecut} Hartree
Compression: {(len(self.atoms.G2) / len(self.atoms.G2c)):.5f}
--- Calculation information ---
{self}
--- SCF data ---')
self.energies.Eewald = get_Eewald(self.atoms)
Etots = []
minimizer_log = {}
for imin in self.min:
try:
self.log.info(f'Start {eval(imin).__name__}...')
except NameError:
self.log.exception(f'No minimizer found for "{imin}"')
raise
start = timeit.default_timer()
Elist = eval(imin)(self, self.min[imin], **kwargs)
end = timeit.default_timer()
minimizer_log[imin] = {}
minimizer_log[imin]['time'] = (end - start)
minimizer_log[imin]['iter'] = len(Elist)
Etots += Elist
if (abs((Etots[(- 2)] - Etots[(- 1)])) < self.etol):
break
if (abs((Etots[(- 2)] - Etots[(- 1)])) < self.etol):
self.log.info(f'SCF converged after {len(Etots)} iterations.')
else:
self.log.warning('SCF not converged!')
self.log.debug('\n--- SCF results ---')
t_tot = 0
for imin in self.min:
N = minimizer_log[imin]['iter']
t = minimizer_log[imin]['time']
t_tot += t
self.log.debug(f'Minimizer: {imin}
Iterations: {N}
Time: {t:.5f} s
Time/Iteration: {(t / N):.5f} s')
self.log.info(f'Total SCF time: {t_tot:.5f} s')
if self.sic:
self.energies.Esic = get_Esic(self, self.Y)
if (self.log.level <= logging.DEBUG):
self.log.debug(f'
--- Energy data ---
{self.energies}')
else:
self.log.info(f'Total energy: {self.energies.Etot:.9f} Eh')
return self.energies.Etot<|docstring|>Run the self-consistent field (SCF) calculation.<|endoftext|> |
146a27b3d64902d78e550e4b935a1545eb0f2090d25afb427ad24106f8b43484 | def _set_potential(self):
'Build the potential.'
atoms = self.atoms
if (self.pot == 'gth'):
for ia in range(atoms.Natoms):
self.GTH[atoms.atom[ia]] = read_gth(atoms.atom[ia], atoms.Z[ia])
self.Vloc = init_gth_loc(self)
(self.NbetaNL, self.prj2beta, self.betaNL) = init_gth_nonloc(self)
else:
self.Vloc = init_pot(self)
return | Build the potential. | eminus/scf.py | _set_potential | wangenau/eminus | 0 | python | def _set_potential(self):
atoms = self.atoms
if (self.pot == 'gth'):
for ia in range(atoms.Natoms):
self.GTH[atoms.atom[ia]] = read_gth(atoms.atom[ia], atoms.Z[ia])
self.Vloc = init_gth_loc(self)
(self.NbetaNL, self.prj2beta, self.betaNL) = init_gth_nonloc(self)
else:
self.Vloc = init_pot(self)
return | def _set_potential(self):
atoms = self.atoms
if (self.pot == 'gth'):
for ia in range(atoms.Natoms):
self.GTH[atoms.atom[ia]] = read_gth(atoms.atom[ia], atoms.Z[ia])
self.Vloc = init_gth_loc(self)
(self.NbetaNL, self.prj2beta, self.betaNL) = init_gth_nonloc(self)
else:
self.Vloc = init_pot(self)
return<|docstring|>Build the potential.<|endoftext|> |
bf96f65fb7e753bb485d69dd689f07743c40815f649200b0efbb5defbc1413c9 | def _init_W(self):
'Initialize wave functions.'
if (self.guess in ('gauss', 'gaussian')):
self.W = guess_gaussian(self)
elif (self.guess in ('rand', 'random')):
self.W = guess_random(self, complex=True, reproduce=True)
else:
self.log.error(f'No guess found for "{self.guess}"')
return | Initialize wave functions. | eminus/scf.py | _init_W | wangenau/eminus | 0 | python | def _init_W(self):
if (self.guess in ('gauss', 'gaussian')):
self.W = guess_gaussian(self)
elif (self.guess in ('rand', 'random')):
self.W = guess_random(self, complex=True, reproduce=True)
else:
self.log.error(f'No guess found for "{self.guess}"')
return | def _init_W(self):
if (self.guess in ('gauss', 'gaussian')):
self.W = guess_gaussian(self)
elif (self.guess in ('rand', 'random')):
self.W = guess_random(self, complex=True, reproduce=True)
else:
self.log.error(f'No guess found for "{self.guess}"')
return<|docstring|>Initialize wave functions.<|endoftext|> |
8188b13b4f51ea93fa52d2a200bdc33a1ece272f325ee06426b46a201e2690d4 | def __repr__(self):
'Print the parameters stored in the SCF object.'
return f'''XC functionals: {self.xc}
Potential: {self.pot}
Starting guess: {self.guess}
Convergence tolerance: {self.etol}
Non-local contribution: {(self.NbetaNL > 0)}''' | Print the parameters stored in the SCF object. | eminus/scf.py | __repr__ | wangenau/eminus | 0 | python | def __repr__(self):
return f'XC functionals: {self.xc}
Potential: {self.pot}
Starting guess: {self.guess}
Convergence tolerance: {self.etol}
Non-local contribution: {(self.NbetaNL > 0)}' | def __repr__(self):
return f'XC functionals: {self.xc}
Potential: {self.pot}
Starting guess: {self.guess}
Convergence tolerance: {self.etol}
Non-local contribution: {(self.NbetaNL > 0)}'<|docstring|>Print the parameters stored in the SCF object.<|endoftext|> |
6728d9662b3b340d81b84db0280ed7690d58680b01947e6965de45ba7c4f7536 | @property
def verbose(self):
'Verbosity level.'
return self._verbose | Verbosity level. | eminus/scf.py | verbose | wangenau/eminus | 0 | python | @property
def verbose(self):
return self._verbose | @property
def verbose(self):
return self._verbose<|docstring|>Verbosity level.<|endoftext|> |
4257a56039db4073a1f2dab00703412a98b631733a219c23050ebecae64f4ec4 | @verbose.setter
def verbose(self, level):
'Verbosity setter to sync the logger with the property.'
self._verbose = get_level(level)
self.log.setLevel(self._verbose)
return | Verbosity setter to sync the logger with the property. | eminus/scf.py | verbose | wangenau/eminus | 0 | python | @verbose.setter
def verbose(self, level):
self._verbose = get_level(level)
self.log.setLevel(self._verbose)
return | @verbose.setter
def verbose(self, level):
self._verbose = get_level(level)
self.log.setLevel(self._verbose)
return<|docstring|>Verbosity setter to sync the logger with the property.<|endoftext|> |
a895703bce135a6cca40d60fbc14c947883b555abceb60d3a18b1cf6dfe45dc6 | def load(stream):
'Parse the first YAML document in a stream using the AstropyLoader and\n produce the corresponding Python object.\n\n Parameters\n ----------\n stream : str or file-like object\n YAML input\n\n Returns\n -------\n obj : object\n Object corresponding to YAML document\n '
return yaml.load(stream, Loader=AstropyLoader) | Parse the first YAML document in a stream using the AstropyLoader and
produce the corresponding Python object.
Parameters
----------
stream : str or file-like object
YAML input
Returns
-------
obj : object
Object corresponding to YAML document | astropy/io/misc/yaml.py | load | SharonGoliath/astropy | 445 | python | def load(stream):
'Parse the first YAML document in a stream using the AstropyLoader and\n produce the corresponding Python object.\n\n Parameters\n ----------\n stream : str or file-like object\n YAML input\n\n Returns\n -------\n obj : object\n Object corresponding to YAML document\n '
return yaml.load(stream, Loader=AstropyLoader) | def load(stream):
'Parse the first YAML document in a stream using the AstropyLoader and\n produce the corresponding Python object.\n\n Parameters\n ----------\n stream : str or file-like object\n YAML input\n\n Returns\n -------\n obj : object\n Object corresponding to YAML document\n '
return yaml.load(stream, Loader=AstropyLoader)<|docstring|>Parse the first YAML document in a stream using the AstropyLoader and
produce the corresponding Python object.
Parameters
----------
stream : str or file-like object
YAML input
Returns
-------
obj : object
Object corresponding to YAML document<|endoftext|> |
7711c6f4b3de9e60e9b168b89294304c1c31693abae6ae7a8881e6d8d6c70beb | def load_all(stream):
'Parse the all YAML documents in a stream using the AstropyLoader class and\n produce the corresponding Python object.\n\n Parameters\n ----------\n stream : str or file-like object\n YAML input\n\n Returns\n -------\n obj : object\n Object corresponding to YAML document\n\n '
return yaml.load_all(stream, Loader=AstropyLoader) | Parse the all YAML documents in a stream using the AstropyLoader class and
produce the corresponding Python object.
Parameters
----------
stream : str or file-like object
YAML input
Returns
-------
obj : object
Object corresponding to YAML document | astropy/io/misc/yaml.py | load_all | SharonGoliath/astropy | 445 | python | def load_all(stream):
'Parse the all YAML documents in a stream using the AstropyLoader class and\n produce the corresponding Python object.\n\n Parameters\n ----------\n stream : str or file-like object\n YAML input\n\n Returns\n -------\n obj : object\n Object corresponding to YAML document\n\n '
return yaml.load_all(stream, Loader=AstropyLoader) | def load_all(stream):
'Parse the all YAML documents in a stream using the AstropyLoader class and\n produce the corresponding Python object.\n\n Parameters\n ----------\n stream : str or file-like object\n YAML input\n\n Returns\n -------\n obj : object\n Object corresponding to YAML document\n\n '
return yaml.load_all(stream, Loader=AstropyLoader)<|docstring|>Parse the all YAML documents in a stream using the AstropyLoader class and
produce the corresponding Python object.
Parameters
----------
stream : str or file-like object
YAML input
Returns
-------
obj : object
Object corresponding to YAML document<|endoftext|> |
0eb313f877ea250f7ce0133172509a10db45a4f5ab9b83b41e82e4d8a97b325a | def dump(data, stream=None, **kwargs):
'Serialize a Python object into a YAML stream using the AstropyDumper class.\n If stream is None, return the produced string instead.\n\n Parameters\n ----------\n data: object\n Object to serialize to YAML\n stream : file-like object, optional\n YAML output (if not supplied a string is returned)\n **kwargs\n Other keyword arguments that get passed to yaml.dump()\n\n Returns\n -------\n out : str or None\n If no ``stream`` is supplied then YAML output is returned as str\n\n '
kwargs['Dumper'] = AstropyDumper
kwargs.setdefault('default_flow_style', None)
return yaml.dump(data, stream=stream, **kwargs) | Serialize a Python object into a YAML stream using the AstropyDumper class.
If stream is None, return the produced string instead.
Parameters
----------
data: object
Object to serialize to YAML
stream : file-like object, optional
YAML output (if not supplied a string is returned)
**kwargs
Other keyword arguments that get passed to yaml.dump()
Returns
-------
out : str or None
If no ``stream`` is supplied then YAML output is returned as str | astropy/io/misc/yaml.py | dump | SharonGoliath/astropy | 445 | python | def dump(data, stream=None, **kwargs):
'Serialize a Python object into a YAML stream using the AstropyDumper class.\n If stream is None, return the produced string instead.\n\n Parameters\n ----------\n data: object\n Object to serialize to YAML\n stream : file-like object, optional\n YAML output (if not supplied a string is returned)\n **kwargs\n Other keyword arguments that get passed to yaml.dump()\n\n Returns\n -------\n out : str or None\n If no ``stream`` is supplied then YAML output is returned as str\n\n '
kwargs['Dumper'] = AstropyDumper
kwargs.setdefault('default_flow_style', None)
return yaml.dump(data, stream=stream, **kwargs) | def dump(data, stream=None, **kwargs):
'Serialize a Python object into a YAML stream using the AstropyDumper class.\n If stream is None, return the produced string instead.\n\n Parameters\n ----------\n data: object\n Object to serialize to YAML\n stream : file-like object, optional\n YAML output (if not supplied a string is returned)\n **kwargs\n Other keyword arguments that get passed to yaml.dump()\n\n Returns\n -------\n out : str or None\n If no ``stream`` is supplied then YAML output is returned as str\n\n '
kwargs['Dumper'] = AstropyDumper
kwargs.setdefault('default_flow_style', None)
return yaml.dump(data, stream=stream, **kwargs)<|docstring|>Serialize a Python object into a YAML stream using the AstropyDumper class.
If stream is None, return the produced string instead.
Parameters
----------
data: object
Object to serialize to YAML
stream : file-like object, optional
YAML output (if not supplied a string is returned)
**kwargs
Other keyword arguments that get passed to yaml.dump()
Returns
-------
out : str or None
If no ``stream`` is supplied then YAML output is returned as str<|endoftext|> |
5a983358301ece67af48b8b42d6435bc35f96ba3447b4546a2ce3ca5543eae78 | def __init__(self, enog_list, enog_dict):
'\n at initialization, the "EnogList" sorts the information that is required later. i.e. the dictionary of weights\n as used in completeness/contamination calculation. Additionally all used OGs (from the parameter enog_list) make\n up the total maximal weight (self.total)\n\n :param enog_list: a list of orthoglogous groups\n :param enog_dict: a dictionary containing all information from the weights file per orthologous group\n '
self.weights = {}
self.enogs = enog_list[:]
self.total = 0
for enog in self.enogs:
if (not enog_dict.get(enog)):
self.weights[enog] = 1
else:
percent_presence = enog_dict[enog].get('%present', 1)
average_count = enog_dict[enog].get('av.count_if_present', 1)
self.weights[enog] = (float(percent_presence) / float(average_count))
self.total = (self.total + self.weights[enog]) | at initialization, the "EnogList" sorts the information that is required later. i.e. the dictionary of weights
as used in completeness/contamination calculation. Additionally all used OGs (from the parameter enog_list) make
up the total maximal weight (self.total)
:param enog_list: a list of orthoglogous groups
:param enog_dict: a dictionary containing all information from the weights file per orthologous group | compleconta/EnogLists.py | __init__ | phyden/compleconta | 0 | python | def __init__(self, enog_list, enog_dict):
'\n at initialization, the "EnogList" sorts the information that is required later. i.e. the dictionary of weights\n as used in completeness/contamination calculation. Additionally all used OGs (from the parameter enog_list) make\n up the total maximal weight (self.total)\n\n :param enog_list: a list of orthoglogous groups\n :param enog_dict: a dictionary containing all information from the weights file per orthologous group\n '
self.weights = {}
self.enogs = enog_list[:]
self.total = 0
for enog in self.enogs:
if (not enog_dict.get(enog)):
self.weights[enog] = 1
else:
percent_presence = enog_dict[enog].get('%present', 1)
average_count = enog_dict[enog].get('av.count_if_present', 1)
self.weights[enog] = (float(percent_presence) / float(average_count))
self.total = (self.total + self.weights[enog]) | def __init__(self, enog_list, enog_dict):
'\n at initialization, the "EnogList" sorts the information that is required later. i.e. the dictionary of weights\n as used in completeness/contamination calculation. Additionally all used OGs (from the parameter enog_list) make\n up the total maximal weight (self.total)\n\n :param enog_list: a list of orthoglogous groups\n :param enog_dict: a dictionary containing all information from the weights file per orthologous group\n '
self.weights = {}
self.enogs = enog_list[:]
self.total = 0
for enog in self.enogs:
if (not enog_dict.get(enog)):
self.weights[enog] = 1
else:
percent_presence = enog_dict[enog].get('%present', 1)
average_count = enog_dict[enog].get('av.count_if_present', 1)
self.weights[enog] = (float(percent_presence) / float(average_count))
self.total = (self.total + self.weights[enog])<|docstring|>at initialization, the "EnogList" sorts the information that is required later. i.e. the dictionary of weights
as used in completeness/contamination calculation. Additionally all used OGs (from the parameter enog_list) make
up the total maximal weight (self.total)
:param enog_list: a list of orthoglogous groups
:param enog_dict: a dictionary containing all information from the weights file per orthologous group<|endoftext|> |
b4ac93528ee7950865ba4b12da18e8c12eeaff265f8eb23de24943daa1f30501 | def get_weight(self, enog):
'\n\n :param enog: id of the orthologous group\n :return: specific weight for the orthologous group id\n '
weight = self.weights.get(enog, 0)
return weight | :param enog: id of the orthologous group
:return: specific weight for the orthologous group id | compleconta/EnogLists.py | get_weight | phyden/compleconta | 0 | python | def get_weight(self, enog):
'\n\n :param enog: id of the orthologous group\n :return: specific weight for the orthologous group id\n '
weight = self.weights.get(enog, 0)
return weight | def get_weight(self, enog):
'\n\n :param enog: id of the orthologous group\n :return: specific weight for the orthologous group id\n '
weight = self.weights.get(enog, 0)
return weight<|docstring|>:param enog: id of the orthologous group
:return: specific weight for the orthologous group id<|endoftext|> |
503c039f11075f514341a57d945c452b2072ad14b121cb37c0414547aaf379eb | def get_total(self):
'\n :return: total maximal score that can be reached (sum of weights)\n '
return self.total | :return: total maximal score that can be reached (sum of weights) | compleconta/EnogLists.py | get_total | phyden/compleconta | 0 | python | def get_total(self):
'\n \n '
return self.total | def get_total(self):
'\n \n '
return self.total<|docstring|>:return: total maximal score that can be reached (sum of weights)<|endoftext|> |
446335a9802f8d32f7fffed7025a0507121d96f79e577211155f9e4f80f8bbb7 | def get_dict(self):
'\n :return: dictionary of weights as calculated\n '
return self.weights | :return: dictionary of weights as calculated | compleconta/EnogLists.py | get_dict | phyden/compleconta | 0 | python | def get_dict(self):
'\n \n '
return self.weights | def get_dict(self):
'\n \n '
return self.weights<|docstring|>:return: dictionary of weights as calculated<|endoftext|> |
dea54d9f43be059c618ff05f24e404a7e6e621a89ae149a3cafd33ca2a9360d1 | def parse_price(s: str) -> float:
' The calculation is two parts as represented below\n [pounds] + [pennies] -> int(...) + float(...)\n \n returns: float value representing price of item\n '
m = len(s)
return (int(s[1:(m - 3)].replace(',', '')) + float(s[(m - 3):])) | The calculation is two parts as represented below
[pounds] + [pennies] -> int(...) + float(...)
returns: float value representing price of item | amazon_item_tracker.py | parse_price | Tesla-CEO/amazon-item-tracker | 0 | python | def parse_price(s: str) -> float:
' The calculation is two parts as represented below\n [pounds] + [pennies] -> int(...) + float(...)\n \n returns: float value representing price of item\n '
m = len(s)
return (int(s[1:(m - 3)].replace(',', )) + float(s[(m - 3):])) | def parse_price(s: str) -> float:
' The calculation is two parts as represented below\n [pounds] + [pennies] -> int(...) + float(...)\n \n returns: float value representing price of item\n '
m = len(s)
return (int(s[1:(m - 3)].replace(',', )) + float(s[(m - 3):]))<|docstring|>The calculation is two parts as represented below
[pounds] + [pennies] -> int(...) + float(...)
returns: float value representing price of item<|endoftext|> |
d28a51f35e6a62c72ec40bd1aaaae6295ca1e966c4dda11dc0d1ab54636e2b83 | def check_price(desired: str, actual: str) -> bool:
' given two prices as strings using func -> parse_price()\n the strings are converted to float values then compared\n '
if (actual.lower() == 'price not found'):
return False
return (True if (parse_price(actual) < float(desired)) else False) | given two prices as strings using func -> parse_price()
the strings are converted to float values then compared | amazon_item_tracker.py | check_price | Tesla-CEO/amazon-item-tracker | 0 | python | def check_price(desired: str, actual: str) -> bool:
' given two prices as strings using func -> parse_price()\n the strings are converted to float values then compared\n '
if (actual.lower() == 'price not found'):
return False
return (True if (parse_price(actual) < float(desired)) else False) | def check_price(desired: str, actual: str) -> bool:
' given two prices as strings using func -> parse_price()\n the strings are converted to float values then compared\n '
if (actual.lower() == 'price not found'):
return False
return (True if (parse_price(actual) < float(desired)) else False)<|docstring|>given two prices as strings using func -> parse_price()
the strings are converted to float values then compared<|endoftext|> |
a33cac99ad93e2b811e70ce0838d52eb559a6feb658cf02abbd82eb9cfeb4401 | def seek_kindle_price(soup: BS) -> str:
' Locating the kindle edition price returns multiple objects in a list.\n As the price never contains letters or specific punctuation.\n Using regular expression whose object is the actual price is returned.\n '
kindle_arr = soup.find_all('span', class_='a-size-base a-color-secondary')
for soup_obj in kindle_arr:
string = soup_obj.get_text()
if ((len(re.findall('[+*|(){}%!]', string)) > 0) or (len(re.findall('[a-z]', string)) > 0)):
continue
else:
return string.strip()
return 'Price Not Found' | Locating the kindle edition price returns multiple objects in a list.
As the price never contains letters or specific punctuation.
Using regular expression whose object is the actual price is returned. | amazon_item_tracker.py | seek_kindle_price | Tesla-CEO/amazon-item-tracker | 0 | python | def seek_kindle_price(soup: BS) -> str:
' Locating the kindle edition price returns multiple objects in a list.\n As the price never contains letters or specific punctuation.\n Using regular expression whose object is the actual price is returned.\n '
kindle_arr = soup.find_all('span', class_='a-size-base a-color-secondary')
for soup_obj in kindle_arr:
string = soup_obj.get_text()
if ((len(re.findall('[+*|(){}%!]', string)) > 0) or (len(re.findall('[a-z]', string)) > 0)):
continue
else:
return string.strip()
return 'Price Not Found' | def seek_kindle_price(soup: BS) -> str:
' Locating the kindle edition price returns multiple objects in a list.\n As the price never contains letters or specific punctuation.\n Using regular expression whose object is the actual price is returned.\n '
kindle_arr = soup.find_all('span', class_='a-size-base a-color-secondary')
for soup_obj in kindle_arr:
string = soup_obj.get_text()
if ((len(re.findall('[+*|(){}%!]', string)) > 0) or (len(re.findall('[a-z]', string)) > 0)):
continue
else:
return string.strip()
return 'Price Not Found'<|docstring|>Locating the kindle edition price returns multiple objects in a list.
As the price never contains letters or specific punctuation.
Using regular expression whose object is the actual price is returned.<|endoftext|> |
ab54ee7943e98b933d99d53a4a45896816e66e1aeada808b28518e39d6e99010 | def analyze_items(lines: List[str]) -> Tuple[(str, str, str)]:
" extracts actual name & price of item from given URLs\n\n an invalid link is defined by not having a price on the webpage\n or, not starting with a protocol such as 'https'\n\n an invalid link calls 'continue' to skip to the next URL\n\n yields: (amazon_item_name: str, amazon_price: str, desired_price: str)\n "
for line in lines:
parts = line.split(',')
(link, desired_price) = (parts[0], parts[1])
try:
page = requests.get(link, headers=HEADERS)
except requests.exceptions.MissingSchema:
print(f'''
[WARINING] - <link: {link}> - INVALID!''')
print("LINK DOES NOT CONTAIN PROTOCOL i.e 'https://'")
continue
soup = BS(page.content, 'html5lib')
if (len(parts) == 2):
try:
(item_name, item_a_price) = analyze_non_book(soup)
except AttributeError:
print(f'''
[WARNING] - <link: {link}> - INVALID!''')
print('WEBPAGE DOES NOT CONTAIN PRICE')
continue
(yield (item_name, item_a_price, desired_price))
else:
(pb_price, ke_price) = analyze_book(soup)
(desired_pb_price, desired_ke_price) = (parts[1], parts[2])
(yield (soup.title.get_text(), pb_price, ke_price, desired_pb_price, desired_ke_price))
pass | extracts actual name & price of item from given URLs
an invalid link is defined by not having a price on the webpage
or, not starting with a protocol such as 'https'
an invalid link calls 'continue' to skip to the next URL
yields: (amazon_item_name: str, amazon_price: str, desired_price: str) | amazon_item_tracker.py | analyze_items | Tesla-CEO/amazon-item-tracker | 0 | python | def analyze_items(lines: List[str]) -> Tuple[(str, str, str)]:
" extracts actual name & price of item from given URLs\n\n an invalid link is defined by not having a price on the webpage\n or, not starting with a protocol such as 'https'\n\n an invalid link calls 'continue' to skip to the next URL\n\n yields: (amazon_item_name: str, amazon_price: str, desired_price: str)\n "
for line in lines:
parts = line.split(',')
(link, desired_price) = (parts[0], parts[1])
try:
page = requests.get(link, headers=HEADERS)
except requests.exceptions.MissingSchema:
print(f'
[WARINING] - <link: {link}> - INVALID!')
print("LINK DOES NOT CONTAIN PROTOCOL i.e 'https://'")
continue
soup = BS(page.content, 'html5lib')
if (len(parts) == 2):
try:
(item_name, item_a_price) = analyze_non_book(soup)
except AttributeError:
print(f'
[WARNING] - <link: {link}> - INVALID!')
print('WEBPAGE DOES NOT CONTAIN PRICE')
continue
(yield (item_name, item_a_price, desired_price))
else:
(pb_price, ke_price) = analyze_book(soup)
(desired_pb_price, desired_ke_price) = (parts[1], parts[2])
(yield (soup.title.get_text(), pb_price, ke_price, desired_pb_price, desired_ke_price))
pass | def analyze_items(lines: List[str]) -> Tuple[(str, str, str)]:
" extracts actual name & price of item from given URLs\n\n an invalid link is defined by not having a price on the webpage\n or, not starting with a protocol such as 'https'\n\n an invalid link calls 'continue' to skip to the next URL\n\n yields: (amazon_item_name: str, amazon_price: str, desired_price: str)\n "
for line in lines:
parts = line.split(',')
(link, desired_price) = (parts[0], parts[1])
try:
page = requests.get(link, headers=HEADERS)
except requests.exceptions.MissingSchema:
print(f'
[WARINING] - <link: {link}> - INVALID!')
print("LINK DOES NOT CONTAIN PROTOCOL i.e 'https://'")
continue
soup = BS(page.content, 'html5lib')
if (len(parts) == 2):
try:
(item_name, item_a_price) = analyze_non_book(soup)
except AttributeError:
print(f'
[WARNING] - <link: {link}> - INVALID!')
print('WEBPAGE DOES NOT CONTAIN PRICE')
continue
(yield (item_name, item_a_price, desired_price))
else:
(pb_price, ke_price) = analyze_book(soup)
(desired_pb_price, desired_ke_price) = (parts[1], parts[2])
(yield (soup.title.get_text(), pb_price, ke_price, desired_pb_price, desired_ke_price))
pass<|docstring|>extracts actual name & price of item from given URLs
an invalid link is defined by not having a price on the webpage
or, not starting with a protocol such as 'https'
an invalid link calls 'continue' to skip to the next URL
yields: (amazon_item_name: str, amazon_price: str, desired_price: str)<|endoftext|> |
051a1a7014e85d63930fabda7da6513d62a8b170b45d1bf239031f4d1773dc34 | def ping_prices() -> None:
" returns: dictionary 'name: str' : 'a_price: float' for each item "
(non_books, books) = ({}, {})
links = REG.load_links()[0]
data = analyze_items(links)
for loops in range(len(links)):
try:
output = next(data)
except StopIteration:
break
if (len(output) == 3):
non_books.update({output[0]: output[1]})
elif (len(output) == 5):
books.update({output[0]: [output[1], output[2]]})
for (k, v) in non_books.items():
print(f'''
<NAME -> {k}>
<PRICE -> {v}>''')
for (k, v) in books.items():
print(f'''
<NAME -> {k}>
<PAPERBACK PRICE -> {v[0]}>
<KINDLE PRICE -> {v[1]}>''')
pass | returns: dictionary 'name: str' : 'a_price: float' for each item | amazon_item_tracker.py | ping_prices | Tesla-CEO/amazon-item-tracker | 0 | python | def ping_prices() -> None:
" "
(non_books, books) = ({}, {})
links = REG.load_links()[0]
data = analyze_items(links)
for loops in range(len(links)):
try:
output = next(data)
except StopIteration:
break
if (len(output) == 3):
non_books.update({output[0]: output[1]})
elif (len(output) == 5):
books.update({output[0]: [output[1], output[2]]})
for (k, v) in non_books.items():
print(f'
<NAME -> {k}>
<PRICE -> {v}>')
for (k, v) in books.items():
print(f'
<NAME -> {k}>
<PAPERBACK PRICE -> {v[0]}>
<KINDLE PRICE -> {v[1]}>')
pass | def ping_prices() -> None:
" "
(non_books, books) = ({}, {})
links = REG.load_links()[0]
data = analyze_items(links)
for loops in range(len(links)):
try:
output = next(data)
except StopIteration:
break
if (len(output) == 3):
non_books.update({output[0]: output[1]})
elif (len(output) == 5):
books.update({output[0]: [output[1], output[2]]})
for (k, v) in non_books.items():
print(f'
<NAME -> {k}>
<PRICE -> {v}>')
for (k, v) in books.items():
print(f'
<NAME -> {k}>
<PAPERBACK PRICE -> {v[0]}>
<KINDLE PRICE -> {v[1]}>')
pass<|docstring|>returns: dictionary 'name: str' : 'a_price: float' for each item<|endoftext|> |
cd18fabf9786db9493f5fb38a994c80eb25b66aed3805878b8be6575a504a55c | def monitor_prices() -> None:
' continuously compares prices approx. every 50sec and sends an email when\n the price of an item has been reduced to a specified desired price '
email_component.instructions()
try:
sender_email = email_component.set_sender_email()
sender_email_pw = email_component.set_sender_pw()
receiver_email = email_component.set_receiver_email()
except TypeError as e:
print(e)
return
while True:
lines = REG.load_links()[0]
data = analyze_items(lines)
results = []
for loops in range(len(lines)):
try:
output = next(data)
except StopIteration:
break
print()
if (len(output) == 3):
if check_price(output[2], output[1]):
email_component.send_email(receiver_email, sender_email, sender_email_pw, {output[0]})
elif (len(output) == 5):
if check_price(output[3], output[1]):
email_component.send_email(receiver_email, sender_email, sender_email_pw, {output[0]})
if check_price(output[4], output[1]):
email_component.send_email(receiver_email, sender_email, sender_email_pw, {output[0]})
time.sleep(INTERVAL)
pass | continuously compares prices approx. every 50sec and sends an email when
the price of an item has been reduced to a specified desired price | amazon_item_tracker.py | monitor_prices | Tesla-CEO/amazon-item-tracker | 0 | python | def monitor_prices() -> None:
' continuously compares prices approx. every 50sec and sends an email when\n the price of an item has been reduced to a specified desired price '
email_component.instructions()
try:
sender_email = email_component.set_sender_email()
sender_email_pw = email_component.set_sender_pw()
receiver_email = email_component.set_receiver_email()
except TypeError as e:
print(e)
return
while True:
lines = REG.load_links()[0]
data = analyze_items(lines)
results = []
for loops in range(len(lines)):
try:
output = next(data)
except StopIteration:
break
print()
if (len(output) == 3):
if check_price(output[2], output[1]):
email_component.send_email(receiver_email, sender_email, sender_email_pw, {output[0]})
elif (len(output) == 5):
if check_price(output[3], output[1]):
email_component.send_email(receiver_email, sender_email, sender_email_pw, {output[0]})
if check_price(output[4], output[1]):
email_component.send_email(receiver_email, sender_email, sender_email_pw, {output[0]})
time.sleep(INTERVAL)
pass | def monitor_prices() -> None:
' continuously compares prices approx. every 50sec and sends an email when\n the price of an item has been reduced to a specified desired price '
email_component.instructions()
try:
sender_email = email_component.set_sender_email()
sender_email_pw = email_component.set_sender_pw()
receiver_email = email_component.set_receiver_email()
except TypeError as e:
print(e)
return
while True:
lines = REG.load_links()[0]
data = analyze_items(lines)
results = []
for loops in range(len(lines)):
try:
output = next(data)
except StopIteration:
break
print()
if (len(output) == 3):
if check_price(output[2], output[1]):
email_component.send_email(receiver_email, sender_email, sender_email_pw, {output[0]})
elif (len(output) == 5):
if check_price(output[3], output[1]):
email_component.send_email(receiver_email, sender_email, sender_email_pw, {output[0]})
if check_price(output[4], output[1]):
email_component.send_email(receiver_email, sender_email, sender_email_pw, {output[0]})
time.sleep(INTERVAL)
pass<|docstring|>continuously compares prices approx. every 50sec and sends an email when
the price of an item has been reduced to a specified desired price<|endoftext|> |
aaa0c15f6e29972252766c8ad32527782001d9ad30f7a3fc43e956688bb35981 | def index(self, values, location):
'Takes values found under location and reflects that in the index\n for future search.\n\n Args:\n values: list of terms (e.g lemma, exact term ...)\n location: str representing the location where those values were\n found.\n '
for value in values:
self.db.add_location(value, location) | Takes values found under location and reflects that in the index
for future search.
Args:
values: list of terms (e.g lemma, exact term ...)
location: str representing the location where those values were
found. | simplesearch/index.py | index | youben11/simplesearch | 0 | python | def index(self, values, location):
'Takes values found under location and reflects that in the index\n for future search.\n\n Args:\n values: list of terms (e.g lemma, exact term ...)\n location: str representing the location where those values were\n found.\n '
for value in values:
self.db.add_location(value, location) | def index(self, values, location):
'Takes values found under location and reflects that in the index\n for future search.\n\n Args:\n values: list of terms (e.g lemma, exact term ...)\n location: str representing the location where those values were\n found.\n '
for value in values:
self.db.add_location(value, location)<|docstring|>Takes values found under location and reflects that in the index
for future search.
Args:
values: list of terms (e.g lemma, exact term ...)
location: str representing the location where those values were
found.<|endoftext|> |
4152a74de5cb77de8d5bc2d6f6af4179a786c6867083042cb37dca1240024016 | def read_by_lines(path, encoding='utf-8'):
'read the data by line'
result = list()
with open(path, 'r') as infile:
for line in infile:
result.append(line.strip().decode(encoding))
return result | read the data by line | KG/DuEE_baseline/DuEE-PaddleHub/data_process.py | read_by_lines | parap1uie-s/Research | 1,319 | python | def read_by_lines(path, encoding='utf-8'):
result = list()
with open(path, 'r') as infile:
for line in infile:
result.append(line.strip().decode(encoding))
return result | def read_by_lines(path, encoding='utf-8'):
result = list()
with open(path, 'r') as infile:
for line in infile:
result.append(line.strip().decode(encoding))
return result<|docstring|>read the data by line<|endoftext|> |
29b62407c65e2bce3905e26bbe3ba7c16672bb57ebeef117e61509223f2c7869 | def write_by_lines(path, data, t_code='utf-8'):
'write the data'
with open(path, 'w') as outfile:
[outfile.write((d.encode(t_code) + '\n')) for d in data] | write the data | KG/DuEE_baseline/DuEE-PaddleHub/data_process.py | write_by_lines | parap1uie-s/Research | 1,319 | python | def write_by_lines(path, data, t_code='utf-8'):
with open(path, 'w') as outfile:
[outfile.write((d.encode(t_code) + '\n')) for d in data] | def write_by_lines(path, data, t_code='utf-8'):
with open(path, 'w') as outfile:
[outfile.write((d.encode(t_code) + '\n')) for d in data]<|docstring|>write the data<|endoftext|> |
23d84162bc0ba1692084ec9336760b4032774def43688b978ca03449d2bf1029 | def data_process(path, model='trigger', is_predict=False):
'data_process'
def label_data(data, start, l, _type):
'label_data'
for i in range(start, (start + l)):
suffix = (u'B-' if (i == start) else u'I-')
data[i] = u'{}{}'.format(suffix, _type)
return data
sentences = []
output = ([u'text_a'] if is_predict else [u'text_a\tlabel'])
with open(path) as f:
for line in f:
d_json = json.loads(line.strip().decode('utf-8'))
_id = d_json['id']
text_a = [(u',' if ((t == u' ') or (t == u'\n') or (t == u'\t')) else t) for t in list(d_json['text'].lower())]
if is_predict:
sentences.append({'text': d_json['text'], 'id': _id})
output.append(u'\x02'.join(text_a))
elif (model == u'trigger'):
labels = ([u'O'] * len(text_a))
for event in d_json['event_list']:
event_type = event['event_type']
start = event['trigger_start_index']
trigger = event['trigger']
labels = label_data(labels, start, len(trigger), event_type)
output.append(u'{}\t{}'.format(u'\x02'.join(text_a), u'\x02'.join(labels)))
elif (model == u'role'):
for event in d_json['event_list']:
labels = ([u'O'] * len(text_a))
for arg in event['arguments']:
role_type = arg['role']
argument = arg['argument']
start = arg['argument_start_index']
labels = label_data(labels, start, len(argument), role_type)
output.append(u'{}\t{}'.format(u'\x02'.join(text_a), u'\x02'.join(labels)))
if is_predict:
return (sentences, output)
else:
return output | data_process | KG/DuEE_baseline/DuEE-PaddleHub/data_process.py | data_process | parap1uie-s/Research | 1,319 | python | def (path, model='trigger', is_predict=False):
def label_data(data, start, l, _type):
'label_data'
for i in range(start, (start + l)):
suffix = (u'B-' if (i == start) else u'I-')
data[i] = u'{}{}'.format(suffix, _type)
return data
sentences = []
output = ([u'text_a'] if is_predict else [u'text_a\tlabel'])
with open(path) as f:
for line in f:
d_json = json.loads(line.strip().decode('utf-8'))
_id = d_json['id']
text_a = [(u',' if ((t == u' ') or (t == u'\n') or (t == u'\t')) else t) for t in list(d_json['text'].lower())]
if is_predict:
sentences.append({'text': d_json['text'], 'id': _id})
output.append(u'\x02'.join(text_a))
elif (model == u'trigger'):
labels = ([u'O'] * len(text_a))
for event in d_json['event_list']:
event_type = event['event_type']
start = event['trigger_start_index']
trigger = event['trigger']
labels = label_data(labels, start, len(trigger), event_type)
output.append(u'{}\t{}'.format(u'\x02'.join(text_a), u'\x02'.join(labels)))
elif (model == u'role'):
for event in d_json['event_list']:
labels = ([u'O'] * len(text_a))
for arg in event['arguments']:
role_type = arg['role']
argument = arg['argument']
start = arg['argument_start_index']
labels = label_data(labels, start, len(argument), role_type)
output.append(u'{}\t{}'.format(u'\x02'.join(text_a), u'\x02'.join(labels)))
if is_predict:
return (sentences, output)
else:
return output | def (path, model='trigger', is_predict=False):
def label_data(data, start, l, _type):
'label_data'
for i in range(start, (start + l)):
suffix = (u'B-' if (i == start) else u'I-')
data[i] = u'{}{}'.format(suffix, _type)
return data
sentences = []
output = ([u'text_a'] if is_predict else [u'text_a\tlabel'])
with open(path) as f:
for line in f:
d_json = json.loads(line.strip().decode('utf-8'))
_id = d_json['id']
text_a = [(u',' if ((t == u' ') or (t == u'\n') or (t == u'\t')) else t) for t in list(d_json['text'].lower())]
if is_predict:
sentences.append({'text': d_json['text'], 'id': _id})
output.append(u'\x02'.join(text_a))
elif (model == u'trigger'):
labels = ([u'O'] * len(text_a))
for event in d_json['event_list']:
event_type = event['event_type']
start = event['trigger_start_index']
trigger = event['trigger']
labels = label_data(labels, start, len(trigger), event_type)
output.append(u'{}\t{}'.format(u'\x02'.join(text_a), u'\x02'.join(labels)))
elif (model == u'role'):
for event in d_json['event_list']:
labels = ([u'O'] * len(text_a))
for arg in event['arguments']:
role_type = arg['role']
argument = arg['argument']
start = arg['argument_start_index']
labels = label_data(labels, start, len(argument), role_type)
output.append(u'{}\t{}'.format(u'\x02'.join(text_a), u'\x02'.join(labels)))
if is_predict:
return (sentences, output)
else:
return output<|docstring|>data_process<|endoftext|> |
dd2f1aa28ad91dbabf6eec03c78cd55ef26e1a3eb31eb607557a95d02d315419 | def schema_process(path, model='trigger'):
'schema_process'
def label_add(labels, _type):
'label_add'
if (u'B-{}'.format(_type) not in labels):
labels.extend([u'B-{}'.format(_type), u'I-{}'.format(_type)])
return labels
labels = []
with open(path) as f:
for line in f:
d_json = json.loads(line.strip().decode('utf-8'))
if (model == u'trigger'):
labels = label_add(labels, d_json['event_type'])
elif (model == u'role'):
for role in d_json['role_list']:
labels = label_add(labels, role['role'])
labels.append(u'O')
return labels | schema_process | KG/DuEE_baseline/DuEE-PaddleHub/data_process.py | schema_process | parap1uie-s/Research | 1,319 | python | def (path, model='trigger'):
def label_add(labels, _type):
'label_add'
if (u'B-{}'.format(_type) not in labels):
labels.extend([u'B-{}'.format(_type), u'I-{}'.format(_type)])
return labels
labels = []
with open(path) as f:
for line in f:
d_json = json.loads(line.strip().decode('utf-8'))
if (model == u'trigger'):
labels = label_add(labels, d_json['event_type'])
elif (model == u'role'):
for role in d_json['role_list']:
labels = label_add(labels, role['role'])
labels.append(u'O')
return labels | def (path, model='trigger'):
def label_add(labels, _type):
'label_add'
if (u'B-{}'.format(_type) not in labels):
labels.extend([u'B-{}'.format(_type), u'I-{}'.format(_type)])
return labels
labels = []
with open(path) as f:
for line in f:
d_json = json.loads(line.strip().decode('utf-8'))
if (model == u'trigger'):
labels = label_add(labels, d_json['event_type'])
elif (model == u'role'):
for role in d_json['role_list']:
labels = label_add(labels, role['role'])
labels.append(u'O')
return labels<|docstring|>schema_process<|endoftext|> |
364ea51c45b7b2316155b1d88caa3ddfaeec821110a38eae41baa21596c993fa | def extract_result(text, labels):
'extract_result'
(ret, is_start, cur_type) = ([], False, None)
for (i, label) in enumerate(labels):
if (label != u'O'):
_type = label[2:]
if label.startswith(u'B-'):
is_start = True
cur_type = _type
ret.append({'start': i, 'text': [text[i]], 'type': _type})
elif (_type != cur_type):
'\n # 如果是没有B-开头的,则不要这部分数据\n cur_type = None\n is_start = False\n '
cur_type = _type
is_start = True
ret.append({'start': i, 'text': [text[i]], 'type': _type})
elif is_start:
ret[(- 1)]['text'].append(text[i])
else:
cur_type = None
is_start = False
else:
cur_type = None
is_start = False
return ret | extract_result | KG/DuEE_baseline/DuEE-PaddleHub/data_process.py | extract_result | parap1uie-s/Research | 1,319 | python | def (text, labels):
(ret, is_start, cur_type) = ([], False, None)
for (i, label) in enumerate(labels):
if (label != u'O'):
_type = label[2:]
if label.startswith(u'B-'):
is_start = True
cur_type = _type
ret.append({'start': i, 'text': [text[i]], 'type': _type})
elif (_type != cur_type):
'\n # 如果是没有B-开头的,则不要这部分数据\n cur_type = None\n is_start = False\n '
cur_type = _type
is_start = True
ret.append({'start': i, 'text': [text[i]], 'type': _type})
elif is_start:
ret[(- 1)]['text'].append(text[i])
else:
cur_type = None
is_start = False
else:
cur_type = None
is_start = False
return ret | def (text, labels):
(ret, is_start, cur_type) = ([], False, None)
for (i, label) in enumerate(labels):
if (label != u'O'):
_type = label[2:]
if label.startswith(u'B-'):
is_start = True
cur_type = _type
ret.append({'start': i, 'text': [text[i]], 'type': _type})
elif (_type != cur_type):
'\n # 如果是没有B-开头的,则不要这部分数据\n cur_type = None\n is_start = False\n '
cur_type = _type
is_start = True
ret.append({'start': i, 'text': [text[i]], 'type': _type})
elif is_start:
ret[(- 1)]['text'].append(text[i])
else:
cur_type = None
is_start = False
else:
cur_type = None
is_start = False
return ret<|docstring|>extract_result<|endoftext|> |
69ed6d0acfb5fe76d3d5d9b3e9d286b3dda55dee03f94dfe3656b4715062af8e | def predict_data_process(trigger_file, role_file, schema_file, save_path):
'predict_data_process'
pred_ret = []
trigger_datas = read_by_lines(trigger_file)
role_datas = read_by_lines(role_file)
schema_datas = read_by_lines(schema_file)
schema = {}
for s in schema_datas:
d_json = json.loads(s)
schema[d_json['event_type']] = [r['role'] for r in d_json['role_list']]
sent_role_mapping = {}
for d in role_datas:
d_json = json.loads(d)
r_ret = extract_result(d_json['text'], d_json['labels'])
role_ret = {}
for r in r_ret:
role_type = r['type']
if (role_type not in role_ret):
role_ret[role_type] = []
role_ret[role_type].append(u''.join(r['text']))
sent_role_mapping[d_json['id']] = role_ret
for d in trigger_datas:
d_json = json.loads(d)
t_ret = extract_result(d_json['text'], d_json['labels'])
pred_event_types = list(set([t['type'] for t in t_ret]))
event_list = []
for event_type in pred_event_types:
role_list = schema[event_type]
arguments = []
for (role_type, ags) in sent_role_mapping[d_json['id']].items():
if (role_type not in role_list):
continue
for arg in ags:
if (len(arg) == 1):
continue
arguments.append({'role': role_type, 'argument': arg})
event = {'event_type': event_type, 'arguments': arguments}
event_list.append(event)
pred_ret.append({'id': d_json['id'], 'text': d_json['text'], 'event_list': event_list})
pred_ret = [json.dumps(r, ensure_ascii=False) for r in pred_ret]
write_by_lines(save_path, pred_ret) | predict_data_process | KG/DuEE_baseline/DuEE-PaddleHub/data_process.py | predict_data_process | parap1uie-s/Research | 1,319 | python | def (trigger_file, role_file, schema_file, save_path):
pred_ret = []
trigger_datas = read_by_lines(trigger_file)
role_datas = read_by_lines(role_file)
schema_datas = read_by_lines(schema_file)
schema = {}
for s in schema_datas:
d_json = json.loads(s)
schema[d_json['event_type']] = [r['role'] for r in d_json['role_list']]
sent_role_mapping = {}
for d in role_datas:
d_json = json.loads(d)
r_ret = extract_result(d_json['text'], d_json['labels'])
role_ret = {}
for r in r_ret:
role_type = r['type']
if (role_type not in role_ret):
role_ret[role_type] = []
role_ret[role_type].append(u.join(r['text']))
sent_role_mapping[d_json['id']] = role_ret
for d in trigger_datas:
d_json = json.loads(d)
t_ret = extract_result(d_json['text'], d_json['labels'])
pred_event_types = list(set([t['type'] for t in t_ret]))
event_list = []
for event_type in pred_event_types:
role_list = schema[event_type]
arguments = []
for (role_type, ags) in sent_role_mapping[d_json['id']].items():
if (role_type not in role_list):
continue
for arg in ags:
if (len(arg) == 1):
continue
arguments.append({'role': role_type, 'argument': arg})
event = {'event_type': event_type, 'arguments': arguments}
event_list.append(event)
pred_ret.append({'id': d_json['id'], 'text': d_json['text'], 'event_list': event_list})
pred_ret = [json.dumps(r, ensure_ascii=False) for r in pred_ret]
write_by_lines(save_path, pred_ret) | def (trigger_file, role_file, schema_file, save_path):
pred_ret = []
trigger_datas = read_by_lines(trigger_file)
role_datas = read_by_lines(role_file)
schema_datas = read_by_lines(schema_file)
schema = {}
for s in schema_datas:
d_json = json.loads(s)
schema[d_json['event_type']] = [r['role'] for r in d_json['role_list']]
sent_role_mapping = {}
for d in role_datas:
d_json = json.loads(d)
r_ret = extract_result(d_json['text'], d_json['labels'])
role_ret = {}
for r in r_ret:
role_type = r['type']
if (role_type not in role_ret):
role_ret[role_type] = []
role_ret[role_type].append(u.join(r['text']))
sent_role_mapping[d_json['id']] = role_ret
for d in trigger_datas:
d_json = json.loads(d)
t_ret = extract_result(d_json['text'], d_json['labels'])
pred_event_types = list(set([t['type'] for t in t_ret]))
event_list = []
for event_type in pred_event_types:
role_list = schema[event_type]
arguments = []
for (role_type, ags) in sent_role_mapping[d_json['id']].items():
if (role_type not in role_list):
continue
for arg in ags:
if (len(arg) == 1):
continue
arguments.append({'role': role_type, 'argument': arg})
event = {'event_type': event_type, 'arguments': arguments}
event_list.append(event)
pred_ret.append({'id': d_json['id'], 'text': d_json['text'], 'event_list': event_list})
pred_ret = [json.dumps(r, ensure_ascii=False) for r in pred_ret]
write_by_lines(save_path, pred_ret)<|docstring|>predict_data_process<|endoftext|> |
e75ddad70dbd51af017e25ef0f4e1b828b803c87adefd5126726d5743af72f63 | def label_data(data, start, l, _type):
'label_data'
for i in range(start, (start + l)):
suffix = (u'B-' if (i == start) else u'I-')
data[i] = u'{}{}'.format(suffix, _type)
return data | label_data | KG/DuEE_baseline/DuEE-PaddleHub/data_process.py | label_data | parap1uie-s/Research | 1,319 | python | def (data, start, l, _type):
for i in range(start, (start + l)):
suffix = (u'B-' if (i == start) else u'I-')
data[i] = u'{}{}'.format(suffix, _type)
return data | def (data, start, l, _type):
for i in range(start, (start + l)):
suffix = (u'B-' if (i == start) else u'I-')
data[i] = u'{}{}'.format(suffix, _type)
return data<|docstring|>label_data<|endoftext|> |
b5ac2baf37b6449972597c60d8b44a30552510ca1cb6b0a3d8681b4ff34a4e5e | def label_add(labels, _type):
'label_add'
if (u'B-{}'.format(_type) not in labels):
labels.extend([u'B-{}'.format(_type), u'I-{}'.format(_type)])
return labels | label_add | KG/DuEE_baseline/DuEE-PaddleHub/data_process.py | label_add | parap1uie-s/Research | 1,319 | python | def (labels, _type):
if (u'B-{}'.format(_type) not in labels):
labels.extend([u'B-{}'.format(_type), u'I-{}'.format(_type)])
return labels | def (labels, _type):
if (u'B-{}'.format(_type) not in labels):
labels.extend([u'B-{}'.format(_type), u'I-{}'.format(_type)])
return labels<|docstring|>label_add<|endoftext|> |
7db47d86be72c1efdeeb9117ec6fc3766a4533617712c0eefc18428ac19bed26 | def generating_data():
'Reading and generating necessary data about random word.'
with open('word-meaning-examples.csv', encoding='utf-8') as csv_file:
csv_reader = csv.DictReader(csv_file)
num = random.randint(0, 13160)
data = {}
for row in csv_reader:
data[row['Word']] = [row['Meaning']]
examples = [row[example] for example in ['Examples/0', 'Examples/1', 'Examples/2', 'Examples/3', 'Examples/4', 'Examples/5', 'Examples/6', 'Examples/7', 'Examples/8', 'Examples/9'] if (row[example] != '')]
data[row['Word']].append(examples)
key = random.choice(list(data.keys()))
data = data[key]
return ([key] + data) | Reading and generating necessary data about random word. | data_generating.py | generating_data | juliaaz/Spice-Girls-Alarm | 5 | python | def generating_data():
with open('word-meaning-examples.csv', encoding='utf-8') as csv_file:
csv_reader = csv.DictReader(csv_file)
num = random.randint(0, 13160)
data = {}
for row in csv_reader:
data[row['Word']] = [row['Meaning']]
examples = [row[example] for example in ['Examples/0', 'Examples/1', 'Examples/2', 'Examples/3', 'Examples/4', 'Examples/5', 'Examples/6', 'Examples/7', 'Examples/8', 'Examples/9'] if (row[example] != )]
data[row['Word']].append(examples)
key = random.choice(list(data.keys()))
data = data[key]
return ([key] + data) | def generating_data():
with open('word-meaning-examples.csv', encoding='utf-8') as csv_file:
csv_reader = csv.DictReader(csv_file)
num = random.randint(0, 13160)
data = {}
for row in csv_reader:
data[row['Word']] = [row['Meaning']]
examples = [row[example] for example in ['Examples/0', 'Examples/1', 'Examples/2', 'Examples/3', 'Examples/4', 'Examples/5', 'Examples/6', 'Examples/7', 'Examples/8', 'Examples/9'] if (row[example] != )]
data[row['Word']].append(examples)
key = random.choice(list(data.keys()))
data = data[key]
return ([key] + data)<|docstring|>Reading and generating necessary data about random word.<|endoftext|> |
a1e37c605d7fc79a6fd3083a1627c86c6d629ad3584821b06e58fee348969024 | def quize_definitions():
'Definition quize generation.'
data_1 = generating_data()
word_correct = data_1[0]
words = [generating_data()[0], generating_data()[0], word_correct]
words = random.sample(words, len(words))
words_str = '| '
for word in words:
words_str += (word + ' | ')
print(('\nPrint the correct word for this definition:' + f'''
"{data_1[1]}"'''))
print(f'''
Choose among: {words_str}''')
word_input = str(input('\nYour answer: '))
if (word_input == word_correct):
print('Good job!')
return True
else:
print("It's wrong word :(")
print(f'''Correct answer: {word_correct}
''')
return False | Definition quize generation. | data_generating.py | quize_definitions | juliaaz/Spice-Girls-Alarm | 5 | python | def quize_definitions():
data_1 = generating_data()
word_correct = data_1[0]
words = [generating_data()[0], generating_data()[0], word_correct]
words = random.sample(words, len(words))
words_str = '| '
for word in words:
words_str += (word + ' | ')
print(('\nPrint the correct word for this definition:' + f'
"{data_1[1]}"'))
print(f'
Choose among: {words_str}')
word_input = str(input('\nYour answer: '))
if (word_input == word_correct):
print('Good job!')
return True
else:
print("It's wrong word :(")
print(f'Correct answer: {word_correct}
')
return False | def quize_definitions():
data_1 = generating_data()
word_correct = data_1[0]
words = [generating_data()[0], generating_data()[0], word_correct]
words = random.sample(words, len(words))
words_str = '| '
for word in words:
words_str += (word + ' | ')
print(('\nPrint the correct word for this definition:' + f'
"{data_1[1]}"'))
print(f'
Choose among: {words_str}')
word_input = str(input('\nYour answer: '))
if (word_input == word_correct):
print('Good job!')
return True
else:
print("It's wrong word :(")
print(f'Correct answer: {word_correct}
')
return False<|docstring|>Definition quize generation.<|endoftext|> |
6513e096acd831e614f61bbe3b82788a0f988dee719c06dbae421a056daa8d3f | def quize_exampes():
'Example quize generation.'
data_1 = generating_data()
word_correct = data_1[0]
words = [generating_data()[0], generating_data()[0], word_correct]
words = random.sample(words, len(words))
words_str = '| '
for word in words:
words_str += (word + ' | ')
sentence = random.choice(data_1[2]).lower().replace(word_correct.lower(), '_________').capitalize()
print(('\nPut in the correct word into the sentence:' + f'''
"{sentence}"'''))
print(f'''
Choose among: {words_str}''')
word_input = str(input('\nYour answer: '))
if (word_input == word_correct):
print('Good job!')
return True
else:
print("It's wrong word :(")
print(f'''
Correct answer: {word_correct}
''')
return False | Example quize generation. | data_generating.py | quize_exampes | juliaaz/Spice-Girls-Alarm | 5 | python | def quize_exampes():
data_1 = generating_data()
word_correct = data_1[0]
words = [generating_data()[0], generating_data()[0], word_correct]
words = random.sample(words, len(words))
words_str = '| '
for word in words:
words_str += (word + ' | ')
sentence = random.choice(data_1[2]).lower().replace(word_correct.lower(), '_________').capitalize()
print(('\nPut in the correct word into the sentence:' + f'
"{sentence}"'))
print(f'
Choose among: {words_str}')
word_input = str(input('\nYour answer: '))
if (word_input == word_correct):
print('Good job!')
return True
else:
print("It's wrong word :(")
print(f'
Correct answer: {word_correct}
')
return False | def quize_exampes():
data_1 = generating_data()
word_correct = data_1[0]
words = [generating_data()[0], generating_data()[0], word_correct]
words = random.sample(words, len(words))
words_str = '| '
for word in words:
words_str += (word + ' | ')
sentence = random.choice(data_1[2]).lower().replace(word_correct.lower(), '_________').capitalize()
print(('\nPut in the correct word into the sentence:' + f'
"{sentence}"'))
print(f'
Choose among: {words_str}')
word_input = str(input('\nYour answer: '))
if (word_input == word_correct):
print('Good job!')
return True
else:
print("It's wrong word :(")
print(f'
Correct answer: {word_correct}
')
return False<|docstring|>Example quize generation.<|endoftext|> |
57bcb0b8efb1ebfef3da7cb411f026484110ccbd20cb832be96792a2267d2098 | def choosing_quiz():
'Choosing one of quizes in random way.'
num = random.randint(0, 1)
if (num == 0):
return quize_exampes()
else:
return quize_definitions() | Choosing one of quizes in random way. | data_generating.py | choosing_quiz | juliaaz/Spice-Girls-Alarm | 5 | python | def choosing_quiz():
num = random.randint(0, 1)
if (num == 0):
return quize_exampes()
else:
return quize_definitions() | def choosing_quiz():
num = random.randint(0, 1)
if (num == 0):
return quize_exampes()
else:
return quize_definitions()<|docstring|>Choosing one of quizes in random way.<|endoftext|> |
304cf0b3af5ad17fe2ad0db4ef77d7e0e4c6c9b566fc21016df5e05038359bc3 | def generating_quiz():
'Generating the whole quize process.'
for _ in range(2):
res = choosing_quiz()
if (res == False):
return False
return True | Generating the whole quize process. | data_generating.py | generating_quiz | juliaaz/Spice-Girls-Alarm | 5 | python | def generating_quiz():
for _ in range(2):
res = choosing_quiz()
if (res == False):
return False
return True | def generating_quiz():
for _ in range(2):
res = choosing_quiz()
if (res == False):
return False
return True<|docstring|>Generating the whole quize process.<|endoftext|> |
7c150d3e44820e901a84c3078c2ef5711f9ac56602f5c3bb8ad12861f5fc73c5 | @staticmethod
def encode(msg: Message) -> bytes:
"\n Encode a 'MlTrade' message into bytes.\n\n :param msg: the message object.\n :return: the bytes.\n "
msg = cast(MlTradeMessage, msg)
message_pb = ProtobufMessage()
dialogue_message_pb = DialogueMessage()
ml_trade_msg = ml_trade_pb2.MlTradeMessage()
dialogue_message_pb.message_id = msg.message_id
dialogue_reference = msg.dialogue_reference
dialogue_message_pb.dialogue_starter_reference = dialogue_reference[0]
dialogue_message_pb.dialogue_responder_reference = dialogue_reference[1]
dialogue_message_pb.target = msg.target
performative_id = msg.performative
if (performative_id == MlTradeMessage.Performative.CFP):
performative = ml_trade_pb2.MlTradeMessage.Cfp_Performative()
query = msg.query
Query.encode(performative.query, query)
ml_trade_msg.cfp.CopyFrom(performative)
elif (performative_id == MlTradeMessage.Performative.TERMS):
performative = ml_trade_pb2.MlTradeMessage.Terms_Performative()
terms = msg.terms
Description.encode(performative.terms, terms)
ml_trade_msg.terms.CopyFrom(performative)
elif (performative_id == MlTradeMessage.Performative.ACCEPT):
performative = ml_trade_pb2.MlTradeMessage.Accept_Performative()
terms = msg.terms
Description.encode(performative.terms, terms)
tx_digest = msg.tx_digest
performative.tx_digest = tx_digest
ml_trade_msg.accept.CopyFrom(performative)
elif (performative_id == MlTradeMessage.Performative.DATA):
performative = ml_trade_pb2.MlTradeMessage.Data_Performative()
terms = msg.terms
Description.encode(performative.terms, terms)
payload = msg.payload
performative.payload = payload
ml_trade_msg.data.CopyFrom(performative)
else:
raise ValueError('Performative not valid: {}'.format(performative_id))
dialogue_message_pb.content = ml_trade_msg.SerializeToString()
message_pb.dialogue_message.CopyFrom(dialogue_message_pb)
message_bytes = message_pb.SerializeToString()
return message_bytes | Encode a 'MlTrade' message into bytes.
:param msg: the message object.
:return: the bytes. | packages/fetchai/protocols/ml_trade/serialization.py | encode | BuildJet/agents-aea | 126 | python | @staticmethod
def encode(msg: Message) -> bytes:
"\n Encode a 'MlTrade' message into bytes.\n\n :param msg: the message object.\n :return: the bytes.\n "
msg = cast(MlTradeMessage, msg)
message_pb = ProtobufMessage()
dialogue_message_pb = DialogueMessage()
ml_trade_msg = ml_trade_pb2.MlTradeMessage()
dialogue_message_pb.message_id = msg.message_id
dialogue_reference = msg.dialogue_reference
dialogue_message_pb.dialogue_starter_reference = dialogue_reference[0]
dialogue_message_pb.dialogue_responder_reference = dialogue_reference[1]
dialogue_message_pb.target = msg.target
performative_id = msg.performative
if (performative_id == MlTradeMessage.Performative.CFP):
performative = ml_trade_pb2.MlTradeMessage.Cfp_Performative()
query = msg.query
Query.encode(performative.query, query)
ml_trade_msg.cfp.CopyFrom(performative)
elif (performative_id == MlTradeMessage.Performative.TERMS):
performative = ml_trade_pb2.MlTradeMessage.Terms_Performative()
terms = msg.terms
Description.encode(performative.terms, terms)
ml_trade_msg.terms.CopyFrom(performative)
elif (performative_id == MlTradeMessage.Performative.ACCEPT):
performative = ml_trade_pb2.MlTradeMessage.Accept_Performative()
terms = msg.terms
Description.encode(performative.terms, terms)
tx_digest = msg.tx_digest
performative.tx_digest = tx_digest
ml_trade_msg.accept.CopyFrom(performative)
elif (performative_id == MlTradeMessage.Performative.DATA):
performative = ml_trade_pb2.MlTradeMessage.Data_Performative()
terms = msg.terms
Description.encode(performative.terms, terms)
payload = msg.payload
performative.payload = payload
ml_trade_msg.data.CopyFrom(performative)
else:
raise ValueError('Performative not valid: {}'.format(performative_id))
dialogue_message_pb.content = ml_trade_msg.SerializeToString()
message_pb.dialogue_message.CopyFrom(dialogue_message_pb)
message_bytes = message_pb.SerializeToString()
return message_bytes | @staticmethod
def encode(msg: Message) -> bytes:
"\n Encode a 'MlTrade' message into bytes.\n\n :param msg: the message object.\n :return: the bytes.\n "
msg = cast(MlTradeMessage, msg)
message_pb = ProtobufMessage()
dialogue_message_pb = DialogueMessage()
ml_trade_msg = ml_trade_pb2.MlTradeMessage()
dialogue_message_pb.message_id = msg.message_id
dialogue_reference = msg.dialogue_reference
dialogue_message_pb.dialogue_starter_reference = dialogue_reference[0]
dialogue_message_pb.dialogue_responder_reference = dialogue_reference[1]
dialogue_message_pb.target = msg.target
performative_id = msg.performative
if (performative_id == MlTradeMessage.Performative.CFP):
performative = ml_trade_pb2.MlTradeMessage.Cfp_Performative()
query = msg.query
Query.encode(performative.query, query)
ml_trade_msg.cfp.CopyFrom(performative)
elif (performative_id == MlTradeMessage.Performative.TERMS):
performative = ml_trade_pb2.MlTradeMessage.Terms_Performative()
terms = msg.terms
Description.encode(performative.terms, terms)
ml_trade_msg.terms.CopyFrom(performative)
elif (performative_id == MlTradeMessage.Performative.ACCEPT):
performative = ml_trade_pb2.MlTradeMessage.Accept_Performative()
terms = msg.terms
Description.encode(performative.terms, terms)
tx_digest = msg.tx_digest
performative.tx_digest = tx_digest
ml_trade_msg.accept.CopyFrom(performative)
elif (performative_id == MlTradeMessage.Performative.DATA):
performative = ml_trade_pb2.MlTradeMessage.Data_Performative()
terms = msg.terms
Description.encode(performative.terms, terms)
payload = msg.payload
performative.payload = payload
ml_trade_msg.data.CopyFrom(performative)
else:
raise ValueError('Performative not valid: {}'.format(performative_id))
dialogue_message_pb.content = ml_trade_msg.SerializeToString()
message_pb.dialogue_message.CopyFrom(dialogue_message_pb)
message_bytes = message_pb.SerializeToString()
return message_bytes<|docstring|>Encode a 'MlTrade' message into bytes.
:param msg: the message object.
:return: the bytes.<|endoftext|> |
daa7a467a586842f325dcd41225468821328c7b2849ea63b51247cf45322718b | @staticmethod
def decode(obj: bytes) -> Message:
"\n Decode bytes into a 'MlTrade' message.\n\n :param obj: the bytes object.\n :return: the 'MlTrade' message.\n "
message_pb = ProtobufMessage()
ml_trade_pb = ml_trade_pb2.MlTradeMessage()
message_pb.ParseFromString(obj)
message_id = message_pb.dialogue_message.message_id
dialogue_reference = (message_pb.dialogue_message.dialogue_starter_reference, message_pb.dialogue_message.dialogue_responder_reference)
target = message_pb.dialogue_message.target
ml_trade_pb.ParseFromString(message_pb.dialogue_message.content)
performative = ml_trade_pb.WhichOneof('performative')
performative_id = MlTradeMessage.Performative(str(performative))
performative_content = dict()
if (performative_id == MlTradeMessage.Performative.CFP):
pb2_query = ml_trade_pb.cfp.query
query = Query.decode(pb2_query)
performative_content['query'] = query
elif (performative_id == MlTradeMessage.Performative.TERMS):
pb2_terms = ml_trade_pb.terms.terms
terms = Description.decode(pb2_terms)
performative_content['terms'] = terms
elif (performative_id == MlTradeMessage.Performative.ACCEPT):
pb2_terms = ml_trade_pb.accept.terms
terms = Description.decode(pb2_terms)
performative_content['terms'] = terms
tx_digest = ml_trade_pb.accept.tx_digest
performative_content['tx_digest'] = tx_digest
elif (performative_id == MlTradeMessage.Performative.DATA):
pb2_terms = ml_trade_pb.data.terms
terms = Description.decode(pb2_terms)
performative_content['terms'] = terms
payload = ml_trade_pb.data.payload
performative_content['payload'] = payload
else:
raise ValueError('Performative not valid: {}.'.format(performative_id))
return MlTradeMessage(message_id=message_id, dialogue_reference=dialogue_reference, target=target, performative=performative, **performative_content) | Decode bytes into a 'MlTrade' message.
:param obj: the bytes object.
:return: the 'MlTrade' message. | packages/fetchai/protocols/ml_trade/serialization.py | decode | BuildJet/agents-aea | 126 | python | @staticmethod
def decode(obj: bytes) -> Message:
"\n Decode bytes into a 'MlTrade' message.\n\n :param obj: the bytes object.\n :return: the 'MlTrade' message.\n "
message_pb = ProtobufMessage()
ml_trade_pb = ml_trade_pb2.MlTradeMessage()
message_pb.ParseFromString(obj)
message_id = message_pb.dialogue_message.message_id
dialogue_reference = (message_pb.dialogue_message.dialogue_starter_reference, message_pb.dialogue_message.dialogue_responder_reference)
target = message_pb.dialogue_message.target
ml_trade_pb.ParseFromString(message_pb.dialogue_message.content)
performative = ml_trade_pb.WhichOneof('performative')
performative_id = MlTradeMessage.Performative(str(performative))
performative_content = dict()
if (performative_id == MlTradeMessage.Performative.CFP):
pb2_query = ml_trade_pb.cfp.query
query = Query.decode(pb2_query)
performative_content['query'] = query
elif (performative_id == MlTradeMessage.Performative.TERMS):
pb2_terms = ml_trade_pb.terms.terms
terms = Description.decode(pb2_terms)
performative_content['terms'] = terms
elif (performative_id == MlTradeMessage.Performative.ACCEPT):
pb2_terms = ml_trade_pb.accept.terms
terms = Description.decode(pb2_terms)
performative_content['terms'] = terms
tx_digest = ml_trade_pb.accept.tx_digest
performative_content['tx_digest'] = tx_digest
elif (performative_id == MlTradeMessage.Performative.DATA):
pb2_terms = ml_trade_pb.data.terms
terms = Description.decode(pb2_terms)
performative_content['terms'] = terms
payload = ml_trade_pb.data.payload
performative_content['payload'] = payload
else:
raise ValueError('Performative not valid: {}.'.format(performative_id))
return MlTradeMessage(message_id=message_id, dialogue_reference=dialogue_reference, target=target, performative=performative, **performative_content) | @staticmethod
def decode(obj: bytes) -> Message:
"\n Decode bytes into a 'MlTrade' message.\n\n :param obj: the bytes object.\n :return: the 'MlTrade' message.\n "
message_pb = ProtobufMessage()
ml_trade_pb = ml_trade_pb2.MlTradeMessage()
message_pb.ParseFromString(obj)
message_id = message_pb.dialogue_message.message_id
dialogue_reference = (message_pb.dialogue_message.dialogue_starter_reference, message_pb.dialogue_message.dialogue_responder_reference)
target = message_pb.dialogue_message.target
ml_trade_pb.ParseFromString(message_pb.dialogue_message.content)
performative = ml_trade_pb.WhichOneof('performative')
performative_id = MlTradeMessage.Performative(str(performative))
performative_content = dict()
if (performative_id == MlTradeMessage.Performative.CFP):
pb2_query = ml_trade_pb.cfp.query
query = Query.decode(pb2_query)
performative_content['query'] = query
elif (performative_id == MlTradeMessage.Performative.TERMS):
pb2_terms = ml_trade_pb.terms.terms
terms = Description.decode(pb2_terms)
performative_content['terms'] = terms
elif (performative_id == MlTradeMessage.Performative.ACCEPT):
pb2_terms = ml_trade_pb.accept.terms
terms = Description.decode(pb2_terms)
performative_content['terms'] = terms
tx_digest = ml_trade_pb.accept.tx_digest
performative_content['tx_digest'] = tx_digest
elif (performative_id == MlTradeMessage.Performative.DATA):
pb2_terms = ml_trade_pb.data.terms
terms = Description.decode(pb2_terms)
performative_content['terms'] = terms
payload = ml_trade_pb.data.payload
performative_content['payload'] = payload
else:
raise ValueError('Performative not valid: {}.'.format(performative_id))
return MlTradeMessage(message_id=message_id, dialogue_reference=dialogue_reference, target=target, performative=performative, **performative_content)<|docstring|>Decode bytes into a 'MlTrade' message.
:param obj: the bytes object.
:return: the 'MlTrade' message.<|endoftext|> |
d8d9f49ba7e83f5cef720684929d76fa6fa28cb85cbdb82b9956e7f7db6cfbde | def login(self):
'\n\t\tLogs in the student portal to retrieve cookies\n\t\t\n\t\tself.cookies -> request.Response.cookies\n\n\t\t'
payload = str({'username': self.regdno, 'password': self.password, 'MemberType': 'S'})
response = requests.post(Student.LOGIN_URL, data=payload, headers=Student.HEADERS)
if (response.status_code == 200):
return response.cookies
else:
print('Error: ', response.status_code)
return None | Logs in the student portal to retrieve cookies
self.cookies -> request.Response.cookies | iterapi/iterapi.py | login | Pawan0411/iterapi | 0 | python | def login(self):
'\n\t\tLogs in the student portal to retrieve cookies\n\t\t\n\t\tself.cookies -> request.Response.cookies\n\n\t\t'
payload = str({'username': self.regdno, 'password': self.password, 'MemberType': 'S'})
response = requests.post(Student.LOGIN_URL, data=payload, headers=Student.HEADERS)
if (response.status_code == 200):
return response.cookies
else:
print('Error: ', response.status_code)
return None | def login(self):
'\n\t\tLogs in the student portal to retrieve cookies\n\t\t\n\t\tself.cookies -> request.Response.cookies\n\n\t\t'
payload = str({'username': self.regdno, 'password': self.password, 'MemberType': 'S'})
response = requests.post(Student.LOGIN_URL, data=payload, headers=Student.HEADERS)
if (response.status_code == 200):
return response.cookies
else:
print('Error: ', response.status_code)
return None<|docstring|>Logs in the student portal to retrieve cookies
self.cookies -> request.Response.cookies<|endoftext|> |
734453bca877ffc32fdbd7c8775d0332430e41fae71b0980928e5e6e8f9b5925 | def getInfo(self):
'\n\t\tGets studentinfo\n\n\t\tself.details -> dict()\n\n\t\t'
response = requests.post(Student.STUDENTINFO_URL, data={}, headers=Student.HEADERS, cookies=self.cookies)
res = response.json()
if (response.status_code == 200):
self.details = response.json()
return self.details
else:
print('Error: ', response.status_code)
return None | Gets studentinfo
self.details -> dict() | iterapi/iterapi.py | getInfo | Pawan0411/iterapi | 0 | python | def getInfo(self):
'\n\t\tGets studentinfo\n\n\t\tself.details -> dict()\n\n\t\t'
response = requests.post(Student.STUDENTINFO_URL, data={}, headers=Student.HEADERS, cookies=self.cookies)
res = response.json()
if (response.status_code == 200):
self.details = response.json()
return self.details
else:
print('Error: ', response.status_code)
return None | def getInfo(self):
'\n\t\tGets studentinfo\n\n\t\tself.details -> dict()\n\n\t\t'
response = requests.post(Student.STUDENTINFO_URL, data={}, headers=Student.HEADERS, cookies=self.cookies)
res = response.json()
if (response.status_code == 200):
self.details = response.json()
return self.details
else:
print('Error: ', response.status_code)
return None<|docstring|>Gets studentinfo
self.details -> dict()<|endoftext|> |
dd5816f62dfd01d305da2de0564ae9dc58cfdb67c132d61e1282511f928ef1c2 | def getPhoto(self):
' \n\t\tDownloads Student Profile Picture\n\t\t\n\t\tself.img_path -> str # Path to the image written\n\n\t\t'
response = requests.get(Student.STUDENTPHOTO_URL, data={}, headers=Student.HEADERS, cookies=self.cookies)
res = response.content
if (response.content == None):
print('Error: ', response.status_code)
return None
else:
self.img_path = (self.regdno + '.jpg')
with open(self.img_path, 'wb+') as image:
image.write(res)
print('File written to {}'.format(self.img_path))
return self.img_path | Downloads Student Profile Picture
self.img_path -> str # Path to the image written | iterapi/iterapi.py | getPhoto | Pawan0411/iterapi | 0 | python | def getPhoto(self):
' \n\t\tDownloads Student Profile Picture\n\t\t\n\t\tself.img_path -> str # Path to the image written\n\n\t\t'
response = requests.get(Student.STUDENTPHOTO_URL, data={}, headers=Student.HEADERS, cookies=self.cookies)
res = response.content
if (response.content == None):
print('Error: ', response.status_code)
return None
else:
self.img_path = (self.regdno + '.jpg')
with open(self.img_path, 'wb+') as image:
image.write(res)
print('File written to {}'.format(self.img_path))
return self.img_path | def getPhoto(self):
' \n\t\tDownloads Student Profile Picture\n\t\t\n\t\tself.img_path -> str # Path to the image written\n\n\t\t'
response = requests.get(Student.STUDENTPHOTO_URL, data={}, headers=Student.HEADERS, cookies=self.cookies)
res = response.content
if (response.content == None):
print('Error: ', response.status_code)
return None
else:
self.img_path = (self.regdno + '.jpg')
with open(self.img_path, 'wb+') as image:
image.write(res)
print('File written to {}'.format(self.img_path))
return self.img_path<|docstring|>Downloads Student Profile Picture
self.img_path -> str # Path to the image written<|endoftext|> |
5bd8f9c9bdad34c31c263dc501081ff5b3a73957d3a424e216d621f1268be444 | def getAttendance(self):
'\n\t\tGets current Attendance \n\n\t\tself.attendance -> dict()\n\n\t\t'
payload = str({'registerationid': 'ITERRETD2001A0000001'})
response = requests.post(Student.ATTENDANCE_URL, data=payload, headers=Student.HEADERS, cookies=self.cookies)
if (response.status_code == 200):
self.attendance = response.json()
return self.attendance
else:
print('Error: ', response.status_code)
return None | Gets current Attendance
self.attendance -> dict() | iterapi/iterapi.py | getAttendance | Pawan0411/iterapi | 0 | python | def getAttendance(self):
'\n\t\tGets current Attendance \n\n\t\tself.attendance -> dict()\n\n\t\t'
payload = str({'registerationid': 'ITERRETD2001A0000001'})
response = requests.post(Student.ATTENDANCE_URL, data=payload, headers=Student.HEADERS, cookies=self.cookies)
if (response.status_code == 200):
self.attendance = response.json()
return self.attendance
else:
print('Error: ', response.status_code)
return None | def getAttendance(self):
'\n\t\tGets current Attendance \n\n\t\tself.attendance -> dict()\n\n\t\t'
payload = str({'registerationid': 'ITERRETD2001A0000001'})
response = requests.post(Student.ATTENDANCE_URL, data=payload, headers=Student.HEADERS, cookies=self.cookies)
if (response.status_code == 200):
self.attendance = response.json()
return self.attendance
else:
print('Error: ', response.status_code)
return None<|docstring|>Gets current Attendance
self.attendance -> dict()<|endoftext|> |
a3ca2ae167d1282085d40b193d14dfcd61c13727fc227c484e1a65ac97a3e061 | def getResult(self):
'\n\t\tGets results\n\n\t\tself.result -> dict()\n\n\t\t'
payload = '{}'
response = requests.post(Student.STUDENTRESULT_URL, data=payload, headers=Student.HEADERS, cookies=self.cookies)
if (response.status_code == 200):
self.results = response.json()
return self.results
else:
print('Cannot fetch results.', response.status_code)
return None | Gets results
self.result -> dict() | iterapi/iterapi.py | getResult | Pawan0411/iterapi | 0 | python | def getResult(self):
'\n\t\tGets results\n\n\t\tself.result -> dict()\n\n\t\t'
payload = '{}'
response = requests.post(Student.STUDENTRESULT_URL, data=payload, headers=Student.HEADERS, cookies=self.cookies)
if (response.status_code == 200):
self.results = response.json()
return self.results
else:
print('Cannot fetch results.', response.status_code)
return None | def getResult(self):
'\n\t\tGets results\n\n\t\tself.result -> dict()\n\n\t\t'
payload = '{}'
response = requests.post(Student.STUDENTRESULT_URL, data=payload, headers=Student.HEADERS, cookies=self.cookies)
if (response.status_code == 200):
self.results = response.json()
return self.results
else:
print('Cannot fetch results.', response.status_code)
return None<|docstring|>Gets results
self.result -> dict()<|endoftext|> |
5527dd31775db8ab52f5048e9f38c4032b3ee589e1568c8bdc9af67b6f94ad30 | def getDetailedResult(self, sem):
'\n\t\tGets result details of a semester\n\n\t\tStored in self.resultDetail[sem] -> dict()\n\n\t\t'
payload = {'styno': str(sem)}
response = requests.post(Student.RESULTDETAIL_URL, data=str(payload), headers=Student.HEADERS, cookies=self.cookies)
if (response.status_code == 200):
self.resultDetail[sem] = response.json()
return self.resultDetail[sem]
else:
print('Cannot fetch results.', response.status_code)
return None | Gets result details of a semester
Stored in self.resultDetail[sem] -> dict() | iterapi/iterapi.py | getDetailedResult | Pawan0411/iterapi | 0 | python | def getDetailedResult(self, sem):
'\n\t\tGets result details of a semester\n\n\t\tStored in self.resultDetail[sem] -> dict()\n\n\t\t'
payload = {'styno': str(sem)}
response = requests.post(Student.RESULTDETAIL_URL, data=str(payload), headers=Student.HEADERS, cookies=self.cookies)
if (response.status_code == 200):
self.resultDetail[sem] = response.json()
return self.resultDetail[sem]
else:
print('Cannot fetch results.', response.status_code)
return None | def getDetailedResult(self, sem):
'\n\t\tGets result details of a semester\n\n\t\tStored in self.resultDetail[sem] -> dict()\n\n\t\t'
payload = {'styno': str(sem)}
response = requests.post(Student.RESULTDETAIL_URL, data=str(payload), headers=Student.HEADERS, cookies=self.cookies)
if (response.status_code == 200):
self.resultDetail[sem] = response.json()
return self.resultDetail[sem]
else:
print('Cannot fetch results.', response.status_code)
return None<|docstring|>Gets result details of a semester
Stored in self.resultDetail[sem] -> dict()<|endoftext|> |
e15a0a7445f2a44751957d2b9c667ae38dfb011572e02d10e6269297580e9221 | def _checkpointed_forward(self, hidden_states, attention_mask):
'Forward method with activation checkpointing.'
def custom(start, end):
def custom_forward(*inputs):
x_ = inputs[0]
for index in range(start, end):
layer = self._get_layer(index)
x_ = layer(x_, inputs[1])
return x_
return custom_forward
mpu.reset_checkpointed_activations_memory_buffer()
l = 0
while (l < self.num_layers):
hidden_states = mpu.checkpoint(custom(l, (l + self.checkpoint_num_layers)), hidden_states, attention_mask)
l += self.checkpoint_num_layers
return hidden_states | Forward method with activation checkpointing. | megatron/model/transformer.py | _checkpointed_forward | fplk/gpt-neox | 1 | python | def _checkpointed_forward(self, hidden_states, attention_mask):
def custom(start, end):
def custom_forward(*inputs):
x_ = inputs[0]
for index in range(start, end):
layer = self._get_layer(index)
x_ = layer(x_, inputs[1])
return x_
return custom_forward
mpu.reset_checkpointed_activations_memory_buffer()
l = 0
while (l < self.num_layers):
hidden_states = mpu.checkpoint(custom(l, (l + self.checkpoint_num_layers)), hidden_states, attention_mask)
l += self.checkpoint_num_layers
return hidden_states | def _checkpointed_forward(self, hidden_states, attention_mask):
def custom(start, end):
def custom_forward(*inputs):
x_ = inputs[0]
for index in range(start, end):
layer = self._get_layer(index)
x_ = layer(x_, inputs[1])
return x_
return custom_forward
mpu.reset_checkpointed_activations_memory_buffer()
l = 0
while (l < self.num_layers):
hidden_states = mpu.checkpoint(custom(l, (l + self.checkpoint_num_layers)), hidden_states, attention_mask)
l += self.checkpoint_num_layers
return hidden_states<|docstring|>Forward method with activation checkpointing.<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.