body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
6dad94e54ecdebe942218e0c4c84366feb2e5a6872b714ec1b6a5b78c3330914
async def retrieve_metadata_document(self, metadata_url): '\n Retrieve the remote metadata document and make any necessary\n transformations on it.\n\n Parameters\n ----------\n metadata_url : str\n URL of remote metadata document\n identifier : str\n ID from JSON-LD description\n\n Returns\n -------\n the ElementTree object corresponding to the XML document\n ' msg = f'Requesting metadata URL {metadata_url}' self.logger.debug(msg) (content, _) = (await self.retrieve_url(metadata_url)) try: doc = lxml.etree.parse(io.BytesIO(content)) except Exception as e: msg = f'Unable to parse the metadata document at {metadata_url}: {e}.' raise XMLMetadataParsingError(msg) self.logger.debug('Got the metadata document') return doc
Retrieve the remote metadata document and make any necessary transformations on it. Parameters ---------- metadata_url : str URL of remote metadata document identifier : str ID from JSON-LD description Returns ------- the ElementTree object corresponding to the XML document
schema_org/schema_org/core.py
retrieve_metadata_document
DataONEorg/d1_ncei_adapter
1
python
async def retrieve_metadata_document(self, metadata_url): '\n Retrieve the remote metadata document and make any necessary\n transformations on it.\n\n Parameters\n ----------\n metadata_url : str\n URL of remote metadata document\n identifier : str\n ID from JSON-LD description\n\n Returns\n -------\n the ElementTree object corresponding to the XML document\n ' msg = f'Requesting metadata URL {metadata_url}' self.logger.debug(msg) (content, _) = (await self.retrieve_url(metadata_url)) try: doc = lxml.etree.parse(io.BytesIO(content)) except Exception as e: msg = f'Unable to parse the metadata document at {metadata_url}: {e}.' raise XMLMetadataParsingError(msg) self.logger.debug('Got the metadata document') return doc
async def retrieve_metadata_document(self, metadata_url): '\n Retrieve the remote metadata document and make any necessary\n transformations on it.\n\n Parameters\n ----------\n metadata_url : str\n URL of remote metadata document\n identifier : str\n ID from JSON-LD description\n\n Returns\n -------\n the ElementTree object corresponding to the XML document\n ' msg = f'Requesting metadata URL {metadata_url}' self.logger.debug(msg) (content, _) = (await self.retrieve_url(metadata_url)) try: doc = lxml.etree.parse(io.BytesIO(content)) except Exception as e: msg = f'Unable to parse the metadata document at {metadata_url}: {e}.' raise XMLMetadataParsingError(msg) self.logger.debug('Got the metadata document') return doc<|docstring|>Retrieve the remote metadata document and make any necessary transformations on it. Parameters ---------- metadata_url : str URL of remote metadata document identifier : str ID from JSON-LD description Returns ------- the ElementTree object corresponding to the XML document<|endoftext|>
c8677e176c121676c34a87cc210a08bb2a6802012d7321a5a880d34fc71d9dcb
async def consume_sitemap(self, idx, sitemap_queue): '\n In a producer/consumer paradigm, here we are consuming work items\n from the sitemap.\n\n Parameters\n ----------\n idx: int\n The only purpose for this is to identify the consumer in the logs.\n sitemap_queue : asyncio.Queue\n Holds jobs associated with the sitemap. Each job includes, among\n other things, a URL and a modification time.\n ' while True: try: job = (await sitemap_queue.get()) self.logger.debug(f'sitemap_consumer[{idx}] ==> {job.url}') msg = f'last mod = {job.lastmod}: num failures so far = {self.failed_count}, queue size = {sitemap_queue.qsize()}' self.logger.info(msg) (await self.process_job(job)) except asyncio.CancelledError: self.logger.debug('CancelledError') break except SkipError as e: job.result = e self.job_records.append(copy.copy(job)) msg = f'Unable to process {job.url}: {e}' self.logger.warning(msg) except Exception as e: job.result = e self.job_records.append(copy.copy(job)) msg = f'Unable to process {job.url}: {e}, {job.identifier} failures so far = {job.num_failures}' self.logger.error(msg) self.failed_count += 1 if (self.failed_count == self.max_num_errors): self.logger.warning('Error threshold reached.') (await self.shutdown()) if (job.num_failures < self.retry): if isinstance(e, ERROR_RETRY_CANDIDATES): self.logger.info(f'Throwing {job.url} back on queue') job.num_failures += 1 sitemap_queue.put_nowait(job) else: job.result = None self.job_records.append(copy.copy(job)) msg = f'sitemap_consumer[{idx}]: {SUCCESSFUL_INGEST_MESSAGE}: {job.identifier}' self.logger.debug(msg) msg = f'{SUCCESSFUL_INGEST_MESSAGE}: {job.identifier}' self.logger.info(msg) sitemap_queue.task_done()
In a producer/consumer paradigm, here we are consuming work items from the sitemap. Parameters ---------- idx: int The only purpose for this is to identify the consumer in the logs. sitemap_queue : asyncio.Queue Holds jobs associated with the sitemap. Each job includes, among other things, a URL and a modification time.
schema_org/schema_org/core.py
consume_sitemap
DataONEorg/d1_ncei_adapter
1
python
async def consume_sitemap(self, idx, sitemap_queue): '\n In a producer/consumer paradigm, here we are consuming work items\n from the sitemap.\n\n Parameters\n ----------\n idx: int\n The only purpose for this is to identify the consumer in the logs.\n sitemap_queue : asyncio.Queue\n Holds jobs associated with the sitemap. Each job includes, among\n other things, a URL and a modification time.\n ' while True: try: job = (await sitemap_queue.get()) self.logger.debug(f'sitemap_consumer[{idx}] ==> {job.url}') msg = f'last mod = {job.lastmod}: num failures so far = {self.failed_count}, queue size = {sitemap_queue.qsize()}' self.logger.info(msg) (await self.process_job(job)) except asyncio.CancelledError: self.logger.debug('CancelledError') break except SkipError as e: job.result = e self.job_records.append(copy.copy(job)) msg = f'Unable to process {job.url}: {e}' self.logger.warning(msg) except Exception as e: job.result = e self.job_records.append(copy.copy(job)) msg = f'Unable to process {job.url}: {e}, {job.identifier} failures so far = {job.num_failures}' self.logger.error(msg) self.failed_count += 1 if (self.failed_count == self.max_num_errors): self.logger.warning('Error threshold reached.') (await self.shutdown()) if (job.num_failures < self.retry): if isinstance(e, ERROR_RETRY_CANDIDATES): self.logger.info(f'Throwing {job.url} back on queue') job.num_failures += 1 sitemap_queue.put_nowait(job) else: job.result = None self.job_records.append(copy.copy(job)) msg = f'sitemap_consumer[{idx}]: {SUCCESSFUL_INGEST_MESSAGE}: {job.identifier}' self.logger.debug(msg) msg = f'{SUCCESSFUL_INGEST_MESSAGE}: {job.identifier}' self.logger.info(msg) sitemap_queue.task_done()
async def consume_sitemap(self, idx, sitemap_queue): '\n In a producer/consumer paradigm, here we are consuming work items\n from the sitemap.\n\n Parameters\n ----------\n idx: int\n The only purpose for this is to identify the consumer in the logs.\n sitemap_queue : asyncio.Queue\n Holds jobs associated with the sitemap. Each job includes, among\n other things, a URL and a modification time.\n ' while True: try: job = (await sitemap_queue.get()) self.logger.debug(f'sitemap_consumer[{idx}] ==> {job.url}') msg = f'last mod = {job.lastmod}: num failures so far = {self.failed_count}, queue size = {sitemap_queue.qsize()}' self.logger.info(msg) (await self.process_job(job)) except asyncio.CancelledError: self.logger.debug('CancelledError') break except SkipError as e: job.result = e self.job_records.append(copy.copy(job)) msg = f'Unable to process {job.url}: {e}' self.logger.warning(msg) except Exception as e: job.result = e self.job_records.append(copy.copy(job)) msg = f'Unable to process {job.url}: {e}, {job.identifier} failures so far = {job.num_failures}' self.logger.error(msg) self.failed_count += 1 if (self.failed_count == self.max_num_errors): self.logger.warning('Error threshold reached.') (await self.shutdown()) if (job.num_failures < self.retry): if isinstance(e, ERROR_RETRY_CANDIDATES): self.logger.info(f'Throwing {job.url} back on queue') job.num_failures += 1 sitemap_queue.put_nowait(job) else: job.result = None self.job_records.append(copy.copy(job)) msg = f'sitemap_consumer[{idx}]: {SUCCESSFUL_INGEST_MESSAGE}: {job.identifier}' self.logger.debug(msg) msg = f'{SUCCESSFUL_INGEST_MESSAGE}: {job.identifier}' self.logger.info(msg) sitemap_queue.task_done()<|docstring|>In a producer/consumer paradigm, here we are consuming work items from the sitemap. Parameters ---------- idx: int The only purpose for this is to identify the consumer in the logs. sitemap_queue : asyncio.Queue Holds jobs associated with the sitemap. Each job includes, among other things, a URL and a modification time.<|endoftext|>
f5381eb49dde041f6595b44a5a9ba192a448615e803c37424ed3154413adc6ec
async def process_job(self, job): '\n Now that we have the record, validate and harvest it.\n\n Parameters\n ----------\n job : SlenderNodeJob\n Record containing at least the following attributes: landing page\n URL, last document modification time according to the site map.\n ' self.logger.debug(f'process_job: starting') (series_id, pid, lastmod, doc) = (await self.retrieve_record(job.url)) if (lastmod is not None): job.lastmod = lastmod job.identifier = series_id self.validate_document(doc) (await self.harvest_document(series_id, pid, doc, job.lastmod)) self.logger.debug(f'process_job: finished')
Now that we have the record, validate and harvest it. Parameters ---------- job : SlenderNodeJob Record containing at least the following attributes: landing page URL, last document modification time according to the site map.
schema_org/schema_org/core.py
process_job
DataONEorg/d1_ncei_adapter
1
python
async def process_job(self, job): '\n Now that we have the record, validate and harvest it.\n\n Parameters\n ----------\n job : SlenderNodeJob\n Record containing at least the following attributes: landing page\n URL, last document modification time according to the site map.\n ' self.logger.debug(f'process_job: starting') (series_id, pid, lastmod, doc) = (await self.retrieve_record(job.url)) if (lastmod is not None): job.lastmod = lastmod job.identifier = series_id self.validate_document(doc) (await self.harvest_document(series_id, pid, doc, job.lastmod)) self.logger.debug(f'process_job: finished')
async def process_job(self, job): '\n Now that we have the record, validate and harvest it.\n\n Parameters\n ----------\n job : SlenderNodeJob\n Record containing at least the following attributes: landing page\n URL, last document modification time according to the site map.\n ' self.logger.debug(f'process_job: starting') (series_id, pid, lastmod, doc) = (await self.retrieve_record(job.url)) if (lastmod is not None): job.lastmod = lastmod job.identifier = series_id self.validate_document(doc) (await self.harvest_document(series_id, pid, doc, job.lastmod)) self.logger.debug(f'process_job: finished')<|docstring|>Now that we have the record, validate and harvest it. Parameters ---------- job : SlenderNodeJob Record containing at least the following attributes: landing page URL, last document modification time according to the site map.<|endoftext|>
befa4d3ef1fed6bb11bbfc55e9cd07f757ceaff0db4718e48ee2ebf0b58de4ff
def validate_document(self, doc): '\n Verify that the format ID we have for the document is correct.\n\n Parameters\n ----------\n doc : bytes\n serialized version of XML metadata document\n ' format_id = self.sys_meta_dict['formatId_custom'] try: d1_scimeta.validate.assert_valid(format_id, doc) except Exception: msg = f'Default validation failed with format ID {format_id}.' self.logger.info(msg) validator = XMLValidator(logger=self.logger) format_id = validator.validate(doc) if (format_id is None): raise XMLValidationError('XML metadata validation failed.') else: self.sys_meta_dict['formatId_custom'] = format_id
Verify that the format ID we have for the document is correct. Parameters ---------- doc : bytes serialized version of XML metadata document
schema_org/schema_org/core.py
validate_document
DataONEorg/d1_ncei_adapter
1
python
def validate_document(self, doc): '\n Verify that the format ID we have for the document is correct.\n\n Parameters\n ----------\n doc : bytes\n serialized version of XML metadata document\n ' format_id = self.sys_meta_dict['formatId_custom'] try: d1_scimeta.validate.assert_valid(format_id, doc) except Exception: msg = f'Default validation failed with format ID {format_id}.' self.logger.info(msg) validator = XMLValidator(logger=self.logger) format_id = validator.validate(doc) if (format_id is None): raise XMLValidationError('XML metadata validation failed.') else: self.sys_meta_dict['formatId_custom'] = format_id
def validate_document(self, doc): '\n Verify that the format ID we have for the document is correct.\n\n Parameters\n ----------\n doc : bytes\n serialized version of XML metadata document\n ' format_id = self.sys_meta_dict['formatId_custom'] try: d1_scimeta.validate.assert_valid(format_id, doc) except Exception: msg = f'Default validation failed with format ID {format_id}.' self.logger.info(msg) validator = XMLValidator(logger=self.logger) format_id = validator.validate(doc) if (format_id is None): raise XMLValidationError('XML metadata validation failed.') else: self.sys_meta_dict['formatId_custom'] = format_id<|docstring|>Verify that the format ID we have for the document is correct. Parameters ---------- doc : bytes serialized version of XML metadata document<|endoftext|>
ae93fd515b2aa782c6dcd65b7b9729378b03296ced85c1564cecd8cac8497099
async def retrieve_record(self, document_url): '\n Parameters\n ----------\n document_url : str\n URL for a remote document, could be a landing page, could be an\n XML document\n\n Returns\n -------\n identifier : str\n Ideally this is a DOI, but here it is a UUID.\n doc : ElementTree\n Metadata document\n ' raise NotImplementedError('must implement retrieve_record in sub class')
Parameters ---------- document_url : str URL for a remote document, could be a landing page, could be an XML document Returns ------- identifier : str Ideally this is a DOI, but here it is a UUID. doc : ElementTree Metadata document
schema_org/schema_org/core.py
retrieve_record
DataONEorg/d1_ncei_adapter
1
python
async def retrieve_record(self, document_url): '\n Parameters\n ----------\n document_url : str\n URL for a remote document, could be a landing page, could be an\n XML document\n\n Returns\n -------\n identifier : str\n Ideally this is a DOI, but here it is a UUID.\n doc : ElementTree\n Metadata document\n ' raise NotImplementedError('must implement retrieve_record in sub class')
async def retrieve_record(self, document_url): '\n Parameters\n ----------\n document_url : str\n URL for a remote document, could be a landing page, could be an\n XML document\n\n Returns\n -------\n identifier : str\n Ideally this is a DOI, but here it is a UUID.\n doc : ElementTree\n Metadata document\n ' raise NotImplementedError('must implement retrieve_record in sub class')<|docstring|>Parameters ---------- document_url : str URL for a remote document, could be a landing page, could be an XML document Returns ------- identifier : str Ideally this is a DOI, but here it is a UUID. doc : ElementTree Metadata document<|endoftext|>
07552ae0cadb67719c38d6eb84e0a967e17c8423b7525752f2ee5b7c81105694
async def process_sitemap(self, sitemap_url, last_harvest): '\n Determine if the sitemap (or RSS feed or whatever) is an index file\n or whether it is a single document. If an index file, we need to\n descend recursively into it.\n\n Parameters\n ----------\n sitemap_url : str\n URL for a sitemap or sitemap index file\n last_harvest : datetime\n According to the MN, this is the last time we, uh, harvested any\n document.\n ' msg = f'process_sitemap: {sitemap_url}, {last_harvest}' self.logger.debug(msg) doc = (await self.get_sitemap_document(sitemap_url)) if self.is_sitemap_index_file(doc): msg = 'process_sitemap: This is a sitemap index file.' self.logger.debug(msg) path = 'sm:sitemap/sm:loc/text()' sitemap_urls = doc.xpath(path, namespaces=SITEMAP_NS) for sitemap_url in sitemap_urls: (await self.process_sitemap(sitemap_url, last_harvest)) else: self.logger.debug('process_sitemap: This is a sitemap leaf.') self._sitemaps.append(sitemap_url) (await self.process_sitemap_leaf(doc, last_harvest))
Determine if the sitemap (or RSS feed or whatever) is an index file or whether it is a single document. If an index file, we need to descend recursively into it. Parameters ---------- sitemap_url : str URL for a sitemap or sitemap index file last_harvest : datetime According to the MN, this is the last time we, uh, harvested any document.
schema_org/schema_org/core.py
process_sitemap
DataONEorg/d1_ncei_adapter
1
python
async def process_sitemap(self, sitemap_url, last_harvest): '\n Determine if the sitemap (or RSS feed or whatever) is an index file\n or whether it is a single document. If an index file, we need to\n descend recursively into it.\n\n Parameters\n ----------\n sitemap_url : str\n URL for a sitemap or sitemap index file\n last_harvest : datetime\n According to the MN, this is the last time we, uh, harvested any\n document.\n ' msg = f'process_sitemap: {sitemap_url}, {last_harvest}' self.logger.debug(msg) doc = (await self.get_sitemap_document(sitemap_url)) if self.is_sitemap_index_file(doc): msg = 'process_sitemap: This is a sitemap index file.' self.logger.debug(msg) path = 'sm:sitemap/sm:loc/text()' sitemap_urls = doc.xpath(path, namespaces=SITEMAP_NS) for sitemap_url in sitemap_urls: (await self.process_sitemap(sitemap_url, last_harvest)) else: self.logger.debug('process_sitemap: This is a sitemap leaf.') self._sitemaps.append(sitemap_url) (await self.process_sitemap_leaf(doc, last_harvest))
async def process_sitemap(self, sitemap_url, last_harvest): '\n Determine if the sitemap (or RSS feed or whatever) is an index file\n or whether it is a single document. If an index file, we need to\n descend recursively into it.\n\n Parameters\n ----------\n sitemap_url : str\n URL for a sitemap or sitemap index file\n last_harvest : datetime\n According to the MN, this is the last time we, uh, harvested any\n document.\n ' msg = f'process_sitemap: {sitemap_url}, {last_harvest}' self.logger.debug(msg) doc = (await self.get_sitemap_document(sitemap_url)) if self.is_sitemap_index_file(doc): msg = 'process_sitemap: This is a sitemap index file.' self.logger.debug(msg) path = 'sm:sitemap/sm:loc/text()' sitemap_urls = doc.xpath(path, namespaces=SITEMAP_NS) for sitemap_url in sitemap_urls: (await self.process_sitemap(sitemap_url, last_harvest)) else: self.logger.debug('process_sitemap: This is a sitemap leaf.') self._sitemaps.append(sitemap_url) (await self.process_sitemap_leaf(doc, last_harvest))<|docstring|>Determine if the sitemap (or RSS feed or whatever) is an index file or whether it is a single document. If an index file, we need to descend recursively into it. Parameters ---------- sitemap_url : str URL for a sitemap or sitemap index file last_harvest : datetime According to the MN, this is the last time we, uh, harvested any document.<|endoftext|>
00e57a199f1b05802821b73bc12f1baa8999752039d9e2cba6bcfb99a7309f42
async def get_sitemap_document(self, sitemap_url): '\n Retrieve a remote sitemap document.\n\n Parameters\n ---------\n sitemap_url : str\n URL for a sitemap or sitemap index file.\n ' self.logger.info(f'Requesting sitemap document from {sitemap_url}') try: (content, headers) = (await self.retrieve_url(sitemap_url)) self.check_xml_headers(headers) except Exception as e: msg = f'{SITEMAP_RETRIEVAL_FAILURE_MESSAGE} due to {repr(e)}' self.logger.error(msg) raise try: doc = lxml.etree.parse(io.BytesIO(content)) except lxml.etree.XMLSyntaxError as e: msg1 = str(e) try: doc = lxml.etree.parse(io.BytesIO(gzip.decompress(content))) except OSError: msg = f'XMLSyntaxError: sitemap document at {sitemap_url}: {msg1}' self.logger.error(msg) msg = f'Unable to process the sitemap retrieved from {sitemap_url}.' raise InvalidSitemapError(msg) return doc
Retrieve a remote sitemap document. Parameters --------- sitemap_url : str URL for a sitemap or sitemap index file.
schema_org/schema_org/core.py
get_sitemap_document
DataONEorg/d1_ncei_adapter
1
python
async def get_sitemap_document(self, sitemap_url): '\n Retrieve a remote sitemap document.\n\n Parameters\n ---------\n sitemap_url : str\n URL for a sitemap or sitemap index file.\n ' self.logger.info(f'Requesting sitemap document from {sitemap_url}') try: (content, headers) = (await self.retrieve_url(sitemap_url)) self.check_xml_headers(headers) except Exception as e: msg = f'{SITEMAP_RETRIEVAL_FAILURE_MESSAGE} due to {repr(e)}' self.logger.error(msg) raise try: doc = lxml.etree.parse(io.BytesIO(content)) except lxml.etree.XMLSyntaxError as e: msg1 = str(e) try: doc = lxml.etree.parse(io.BytesIO(gzip.decompress(content))) except OSError: msg = f'XMLSyntaxError: sitemap document at {sitemap_url}: {msg1}' self.logger.error(msg) msg = f'Unable to process the sitemap retrieved from {sitemap_url}.' raise InvalidSitemapError(msg) return doc
async def get_sitemap_document(self, sitemap_url): '\n Retrieve a remote sitemap document.\n\n Parameters\n ---------\n sitemap_url : str\n URL for a sitemap or sitemap index file.\n ' self.logger.info(f'Requesting sitemap document from {sitemap_url}') try: (content, headers) = (await self.retrieve_url(sitemap_url)) self.check_xml_headers(headers) except Exception as e: msg = f'{SITEMAP_RETRIEVAL_FAILURE_MESSAGE} due to {repr(e)}' self.logger.error(msg) raise try: doc = lxml.etree.parse(io.BytesIO(content)) except lxml.etree.XMLSyntaxError as e: msg1 = str(e) try: doc = lxml.etree.parse(io.BytesIO(gzip.decompress(content))) except OSError: msg = f'XMLSyntaxError: sitemap document at {sitemap_url}: {msg1}' self.logger.error(msg) msg = f'Unable to process the sitemap retrieved from {sitemap_url}.' raise InvalidSitemapError(msg) return doc<|docstring|>Retrieve a remote sitemap document. Parameters --------- sitemap_url : str URL for a sitemap or sitemap index file.<|endoftext|>
5084072162c1cd8de089f8fa4e284308dbf5c6f7e373114da4fbe7d7261d2b0d
async def process_sitemap_leaf(self, doc, last_harvest): '\n We are at a sitemap leaf, i.e. the sitemap does not reference other\n sitemaps. This is where we can retrieve landing pages instead of\n other sitemaps.\n\n Parameters\n ----------\n doc : ElementTree object\n Describes the sitemap leaf.\n last_harvest : datetime\n According to the MN, this is the last time we, uh, harvested any\n document.\n ' self.logger.debug(f'process_sitemap_leaf:') records = self.extract_records_from_sitemap(doc) records = self.post_process_sitemap_records(records, last_harvest) self._sitemap_records.extend(records) if self.no_harvest: return sitemap_queue = asyncio.Queue() for (url, lastmod_time) in records: job = SlenderNodeJob(url, '', lastmod_time, 0, None) sitemap_queue.put_nowait(job) tasks = [] for j in range(self.num_workers): msg = f'process_sitemap_leaf: create task for sitemap_consumer[{j}]' self.logger.debug(msg) task = asyncio.create_task(self.consume_sitemap(j, sitemap_queue)) tasks.append(task) (await sitemap_queue.join()) for task in tasks: task.cancel() (await asyncio.gather(*tasks, return_exceptions=True))
We are at a sitemap leaf, i.e. the sitemap does not reference other sitemaps. This is where we can retrieve landing pages instead of other sitemaps. Parameters ---------- doc : ElementTree object Describes the sitemap leaf. last_harvest : datetime According to the MN, this is the last time we, uh, harvested any document.
schema_org/schema_org/core.py
process_sitemap_leaf
DataONEorg/d1_ncei_adapter
1
python
async def process_sitemap_leaf(self, doc, last_harvest): '\n We are at a sitemap leaf, i.e. the sitemap does not reference other\n sitemaps. This is where we can retrieve landing pages instead of\n other sitemaps.\n\n Parameters\n ----------\n doc : ElementTree object\n Describes the sitemap leaf.\n last_harvest : datetime\n According to the MN, this is the last time we, uh, harvested any\n document.\n ' self.logger.debug(f'process_sitemap_leaf:') records = self.extract_records_from_sitemap(doc) records = self.post_process_sitemap_records(records, last_harvest) self._sitemap_records.extend(records) if self.no_harvest: return sitemap_queue = asyncio.Queue() for (url, lastmod_time) in records: job = SlenderNodeJob(url, , lastmod_time, 0, None) sitemap_queue.put_nowait(job) tasks = [] for j in range(self.num_workers): msg = f'process_sitemap_leaf: create task for sitemap_consumer[{j}]' self.logger.debug(msg) task = asyncio.create_task(self.consume_sitemap(j, sitemap_queue)) tasks.append(task) (await sitemap_queue.join()) for task in tasks: task.cancel() (await asyncio.gather(*tasks, return_exceptions=True))
async def process_sitemap_leaf(self, doc, last_harvest): '\n We are at a sitemap leaf, i.e. the sitemap does not reference other\n sitemaps. This is where we can retrieve landing pages instead of\n other sitemaps.\n\n Parameters\n ----------\n doc : ElementTree object\n Describes the sitemap leaf.\n last_harvest : datetime\n According to the MN, this is the last time we, uh, harvested any\n document.\n ' self.logger.debug(f'process_sitemap_leaf:') records = self.extract_records_from_sitemap(doc) records = self.post_process_sitemap_records(records, last_harvest) self._sitemap_records.extend(records) if self.no_harvest: return sitemap_queue = asyncio.Queue() for (url, lastmod_time) in records: job = SlenderNodeJob(url, , lastmod_time, 0, None) sitemap_queue.put_nowait(job) tasks = [] for j in range(self.num_workers): msg = f'process_sitemap_leaf: create task for sitemap_consumer[{j}]' self.logger.debug(msg) task = asyncio.create_task(self.consume_sitemap(j, sitemap_queue)) tasks.append(task) (await sitemap_queue.join()) for task in tasks: task.cancel() (await asyncio.gather(*tasks, return_exceptions=True))<|docstring|>We are at a sitemap leaf, i.e. the sitemap does not reference other sitemaps. This is where we can retrieve landing pages instead of other sitemaps. Parameters ---------- doc : ElementTree object Describes the sitemap leaf. last_harvest : datetime According to the MN, this is the last time we, uh, harvested any document.<|endoftext|>
2696cd3adf18d48a0469d65fe8f51e49052d1c77b4f7fbc03022beecee810607
def get_joke(): 'Returns a joke from the WebKnox one liner API.\n\n Returns None if unable to retrieve a joke.\n ' headers = {'Accept': 'application/json'} page = requests.get('https://icanhazdadjoke.com', headers=headers) if (page.status_code == 200): joke = json.loads(page.content.decode('UTF-8')) return joke['joke'] return None
Returns a joke from the WebKnox one liner API. Returns None if unable to retrieve a joke.
laughs/services/dadjokes.py
get_joke
seancallaway/laughs
0
python
def get_joke(): 'Returns a joke from the WebKnox one liner API.\n\n Returns None if unable to retrieve a joke.\n ' headers = {'Accept': 'application/json'} page = requests.get('https://icanhazdadjoke.com', headers=headers) if (page.status_code == 200): joke = json.loads(page.content.decode('UTF-8')) return joke['joke'] return None
def get_joke(): 'Returns a joke from the WebKnox one liner API.\n\n Returns None if unable to retrieve a joke.\n ' headers = {'Accept': 'application/json'} page = requests.get('https://icanhazdadjoke.com', headers=headers) if (page.status_code == 200): joke = json.loads(page.content.decode('UTF-8')) return joke['joke'] return None<|docstring|>Returns a joke from the WebKnox one liner API. Returns None if unable to retrieve a joke.<|endoftext|>
c77365c637ac1afb1c1d36a0d78a367719ef501ca33b4e95c0607f1a2ad1901a
@property def cursor(self) -> Position: '\n The cursor position of the 1st selection.\n ' return self.selection.active
The cursor position of the 1st selection.
vscode/window.py
cursor
TTitcombe/vscode-ext
140
python
@property def cursor(self) -> Position: '\n \n ' return self.selection.active
@property def cursor(self) -> Position: '\n \n ' return self.selection.active<|docstring|>The cursor position of the 1st selection.<|endoftext|>
31461040ae3a5d0a1f8de8636dcc77bedae46ad8f9b839b004f80c29e657894d
def read_data(fp='../data/pmadata.csv'): '\n Read clinic data from a csv into a pandas dataframe.\n\n :param str fp: the file path of the csv file\n ' return pd.read_csv(fp)
Read clinic data from a csv into a pandas dataframe. :param str fp: the file path of the csv file
pmareport/predictors.py
read_data
gautsi/pmareport
0
python
def read_data(fp='../data/pmadata.csv'): '\n Read clinic data from a csv into a pandas dataframe.\n\n :param str fp: the file path of the csv file\n ' return pd.read_csv(fp)
def read_data(fp='../data/pmadata.csv'): '\n Read clinic data from a csv into a pandas dataframe.\n\n :param str fp: the file path of the csv file\n ' return pd.read_csv(fp)<|docstring|>Read clinic data from a csv into a pandas dataframe. :param str fp: the file path of the csv file<|endoftext|>
c97eee076903c4d6163e8e4216bfedf26a7b53a7674efc813df4003d83a135e0
def percent_within(y_true, y_pred, thresh=5): '\n Calculate the percentage of predictions are within\n `thresh` of the true value.\n\n :param array-like y_true: the true values\n :param array-like y_pred: the predicted values\n :param float thresh: the threshold for a close prediction\n\n :returns:\n the percent of predictions within the treshold from the true value\n :rtype: float\n ' return ((np.sum((np.abs((y_true - y_pred)) < thresh)) / float(len(y_true))) * 100)
Calculate the percentage of predictions are within `thresh` of the true value. :param array-like y_true: the true values :param array-like y_pred: the predicted values :param float thresh: the threshold for a close prediction :returns: the percent of predictions within the treshold from the true value :rtype: float
pmareport/predictors.py
percent_within
gautsi/pmareport
0
python
def percent_within(y_true, y_pred, thresh=5): '\n Calculate the percentage of predictions are within\n `thresh` of the true value.\n\n :param array-like y_true: the true values\n :param array-like y_pred: the predicted values\n :param float thresh: the threshold for a close prediction\n\n :returns:\n the percent of predictions within the treshold from the true value\n :rtype: float\n ' return ((np.sum((np.abs((y_true - y_pred)) < thresh)) / float(len(y_true))) * 100)
def percent_within(y_true, y_pred, thresh=5): '\n Calculate the percentage of predictions are within\n `thresh` of the true value.\n\n :param array-like y_true: the true values\n :param array-like y_pred: the predicted values\n :param float thresh: the threshold for a close prediction\n\n :returns:\n the percent of predictions within the treshold from the true value\n :rtype: float\n ' return ((np.sum((np.abs((y_true - y_pred)) < thresh)) / float(len(y_true))) * 100)<|docstring|>Calculate the percentage of predictions are within `thresh` of the true value. :param array-like y_true: the true values :param array-like y_pred: the predicted values :param float thresh: the threshold for a close prediction :returns: the percent of predictions within the treshold from the true value :rtype: float<|endoftext|>
ae6e021b660226d2641167800b542dba7fd2a8603ec434115beb2f6483ae0048
def make_int(self, col): '\n Encode categorical variables of type other than int\n as ints for input into the decision tree.\n\n :param str col: the name of the column with categorical values\n ' categories = list(set(self.df[col])) int_func = (lambda x: categories.index(x)) self.df[(col + 'i')] = self.df[col].apply(int_func) self.feat_cols.remove(col) self.feat_cols.append((col + 'i')) self.int_funcs[col] = int_func
Encode categorical variables of type other than int as ints for input into the decision tree. :param str col: the name of the column with categorical values
pmareport/predictors.py
make_int
gautsi/pmareport
0
python
def make_int(self, col): '\n Encode categorical variables of type other than int\n as ints for input into the decision tree.\n\n :param str col: the name of the column with categorical values\n ' categories = list(set(self.df[col])) int_func = (lambda x: categories.index(x)) self.df[(col + 'i')] = self.df[col].apply(int_func) self.feat_cols.remove(col) self.feat_cols.append((col + 'i')) self.int_funcs[col] = int_func
def make_int(self, col): '\n Encode categorical variables of type other than int\n as ints for input into the decision tree.\n\n :param str col: the name of the column with categorical values\n ' categories = list(set(self.df[col])) int_func = (lambda x: categories.index(x)) self.df[(col + 'i')] = self.df[col].apply(int_func) self.feat_cols.remove(col) self.feat_cols.append((col + 'i')) self.int_funcs[col] = int_func<|docstring|>Encode categorical variables of type other than int as ints for input into the decision tree. :param str col: the name of the column with categorical values<|endoftext|>
ce8ce04ef68fb32d601b01790c27f7e156aef21c4d92db45cd259cd57f570dd4
def train_test(self, test_size=0.1): '\n Split the data into train and test sets.\n\n :param float test_size: the percentage of rows to leave out as test\n ' (self.train, self.test) = cross_validation.train_test_split(self.df, test_size=test_size) self.Xtrain = self.train[self.feat_cols] self.ytrain = self.train[self.response_col] self.Xtest = self.test[self.feat_cols] self.ytest = self.test[self.response_col]
Split the data into train and test sets. :param float test_size: the percentage of rows to leave out as test
pmareport/predictors.py
train_test
gautsi/pmareport
0
python
def train_test(self, test_size=0.1): '\n Split the data into train and test sets.\n\n :param float test_size: the percentage of rows to leave out as test\n ' (self.train, self.test) = cross_validation.train_test_split(self.df, test_size=test_size) self.Xtrain = self.train[self.feat_cols] self.ytrain = self.train[self.response_col] self.Xtest = self.test[self.feat_cols] self.ytest = self.test[self.response_col]
def train_test(self, test_size=0.1): '\n Split the data into train and test sets.\n\n :param float test_size: the percentage of rows to leave out as test\n ' (self.train, self.test) = cross_validation.train_test_split(self.df, test_size=test_size) self.Xtrain = self.train[self.feat_cols] self.ytrain = self.train[self.response_col] self.Xtest = self.test[self.feat_cols] self.ytest = self.test[self.response_col]<|docstring|>Split the data into train and test sets. :param float test_size: the percentage of rows to leave out as test<|endoftext|>
0fbbfd19a8970a98e3fe68cb51d7d1aa9689fe06be8e697be6a1964fc5a3b37b
def make_model(self, max_depth=3): '\n Make the model, a decision tree with maximum depth `max_depth`.\n\n :param max_depth: the maximum depth of the decision tree\n ' self.model = tree.DecisionTreeRegressor(max_depth=max_depth)
Make the model, a decision tree with maximum depth `max_depth`. :param max_depth: the maximum depth of the decision tree
pmareport/predictors.py
make_model
gautsi/pmareport
0
python
def make_model(self, max_depth=3): '\n Make the model, a decision tree with maximum depth `max_depth`.\n\n :param max_depth: the maximum depth of the decision tree\n ' self.model = tree.DecisionTreeRegressor(max_depth=max_depth)
def make_model(self, max_depth=3): '\n Make the model, a decision tree with maximum depth `max_depth`.\n\n :param max_depth: the maximum depth of the decision tree\n ' self.model = tree.DecisionTreeRegressor(max_depth=max_depth)<|docstring|>Make the model, a decision tree with maximum depth `max_depth`. :param max_depth: the maximum depth of the decision tree<|endoftext|>
66f8e09f51c531f271885d89acb3f1ec1177e4f41f135da80f85027d52cadef6
def cv_evalution(self, n_folds=10, thresh=5): '\n Evaluate the model on a cross valdation split\n of the training data with `n_folds` nmber of folds.\n The metric is the percent of predictions within `thresh`\n of the true value.\n\n :param int n_folds: the number of folds for the cross validation\n :param float thresh:\n the threshold for considering a prediction close to the true value\n\n :returns: the average of metric values over the folds\n :rtype: float\n ' cv = cross_validation.KFold(len(self.train), n_folds=n_folds) score_list = [] for (train, test) in cv: cvXtrain = self.Xtrain.iloc[train] cvXtest = self.Xtrain.iloc[test] cvytrain = self.ytrain.iloc[train] cvytest = self.ytrain.iloc[test] self.model.fit(cvXtrain, cvytrain) pred = self.model.predict(cvXtest) score = percent_within(y_true=cvytest, y_pred=pred, thresh=5) score_list.append(score) return np.mean(score_list)
Evaluate the model on a cross valdation split of the training data with `n_folds` nmber of folds. The metric is the percent of predictions within `thresh` of the true value. :param int n_folds: the number of folds for the cross validation :param float thresh: the threshold for considering a prediction close to the true value :returns: the average of metric values over the folds :rtype: float
pmareport/predictors.py
cv_evalution
gautsi/pmareport
0
python
def cv_evalution(self, n_folds=10, thresh=5): '\n Evaluate the model on a cross valdation split\n of the training data with `n_folds` nmber of folds.\n The metric is the percent of predictions within `thresh`\n of the true value.\n\n :param int n_folds: the number of folds for the cross validation\n :param float thresh:\n the threshold for considering a prediction close to the true value\n\n :returns: the average of metric values over the folds\n :rtype: float\n ' cv = cross_validation.KFold(len(self.train), n_folds=n_folds) score_list = [] for (train, test) in cv: cvXtrain = self.Xtrain.iloc[train] cvXtest = self.Xtrain.iloc[test] cvytrain = self.ytrain.iloc[train] cvytest = self.ytrain.iloc[test] self.model.fit(cvXtrain, cvytrain) pred = self.model.predict(cvXtest) score = percent_within(y_true=cvytest, y_pred=pred, thresh=5) score_list.append(score) return np.mean(score_list)
def cv_evalution(self, n_folds=10, thresh=5): '\n Evaluate the model on a cross valdation split\n of the training data with `n_folds` nmber of folds.\n The metric is the percent of predictions within `thresh`\n of the true value.\n\n :param int n_folds: the number of folds for the cross validation\n :param float thresh:\n the threshold for considering a prediction close to the true value\n\n :returns: the average of metric values over the folds\n :rtype: float\n ' cv = cross_validation.KFold(len(self.train), n_folds=n_folds) score_list = [] for (train, test) in cv: cvXtrain = self.Xtrain.iloc[train] cvXtest = self.Xtrain.iloc[test] cvytrain = self.ytrain.iloc[train] cvytest = self.ytrain.iloc[test] self.model.fit(cvXtrain, cvytrain) pred = self.model.predict(cvXtest) score = percent_within(y_true=cvytest, y_pred=pred, thresh=5) score_list.append(score) return np.mean(score_list)<|docstring|>Evaluate the model on a cross valdation split of the training data with `n_folds` nmber of folds. The metric is the percent of predictions within `thresh` of the true value. :param int n_folds: the number of folds for the cross validation :param float thresh: the threshold for considering a prediction close to the true value :returns: the average of metric values over the folds :rtype: float<|endoftext|>
5540ebd126194e0bbfb4942a1665eb5f64865cc9889e88ec9c0b25972762f90d
def fit(self, thresh=5): '\n Fit the model on the training set and evaluate it\n on the test set. The metric is the percent of\n predictions within `thresh` of the true value.\n\n :param float thresh:\n the threshold for considering a prediction close to the true value\n\n :returns: the score of the model on the test set\n :rtype: float\n ' self.model.fit(self.Xtrain, self.ytrain) predictions = self.model.predict(self.Xtest) score = percent_within(y_true=self.ytest, y_pred=predictions, thresh=thresh) return score
Fit the model on the training set and evaluate it on the test set. The metric is the percent of predictions within `thresh` of the true value. :param float thresh: the threshold for considering a prediction close to the true value :returns: the score of the model on the test set :rtype: float
pmareport/predictors.py
fit
gautsi/pmareport
0
python
def fit(self, thresh=5): '\n Fit the model on the training set and evaluate it\n on the test set. The metric is the percent of\n predictions within `thresh` of the true value.\n\n :param float thresh:\n the threshold for considering a prediction close to the true value\n\n :returns: the score of the model on the test set\n :rtype: float\n ' self.model.fit(self.Xtrain, self.ytrain) predictions = self.model.predict(self.Xtest) score = percent_within(y_true=self.ytest, y_pred=predictions, thresh=thresh) return score
def fit(self, thresh=5): '\n Fit the model on the training set and evaluate it\n on the test set. The metric is the percent of\n predictions within `thresh` of the true value.\n\n :param float thresh:\n the threshold for considering a prediction close to the true value\n\n :returns: the score of the model on the test set\n :rtype: float\n ' self.model.fit(self.Xtrain, self.ytrain) predictions = self.model.predict(self.Xtest) score = percent_within(y_true=self.ytest, y_pred=predictions, thresh=thresh) return score<|docstring|>Fit the model on the training set and evaluate it on the test set. The metric is the percent of predictions within `thresh` of the true value. :param float thresh: the threshold for considering a prediction close to the true value :returns: the score of the model on the test set :rtype: float<|endoftext|>
74c6162a0aba99ad0b13e6285809f0974a787a60af4ea31c6076cabc91231c6b
def load_collections(fname): '\n Open a geocodr mapping file and return all geocodr.search.Collection\n subclasses.\n ' collections = [] with open(fname, 'r') as f: code = compile(f.read(), fname, 'exec') ns = {} exec(code, ns) src_proj = None for v in ns.values(): if (inspect.isclass(v) and issubclass(v, Collection) and (v != Collection) and v.name): coll = v() if src_proj: assert (coll.src_proj == src_proj), 'all Collections need the same src_proj' else: src_proj = coll.src_proj collections.append(coll) return collections
Open a geocodr mapping file and return all geocodr.search.Collection subclasses.
api/geocodr/mapping.py
load_collections
axza/geocodr
3
python
def load_collections(fname): '\n Open a geocodr mapping file and return all geocodr.search.Collection\n subclasses.\n ' collections = [] with open(fname, 'r') as f: code = compile(f.read(), fname, 'exec') ns = {} exec(code, ns) src_proj = None for v in ns.values(): if (inspect.isclass(v) and issubclass(v, Collection) and (v != Collection) and v.name): coll = v() if src_proj: assert (coll.src_proj == src_proj), 'all Collections need the same src_proj' else: src_proj = coll.src_proj collections.append(coll) return collections
def load_collections(fname): '\n Open a geocodr mapping file and return all geocodr.search.Collection\n subclasses.\n ' collections = [] with open(fname, 'r') as f: code = compile(f.read(), fname, 'exec') ns = {} exec(code, ns) src_proj = None for v in ns.values(): if (inspect.isclass(v) and issubclass(v, Collection) and (v != Collection) and v.name): coll = v() if src_proj: assert (coll.src_proj == src_proj), 'all Collections need the same src_proj' else: src_proj = coll.src_proj collections.append(coll) return collections<|docstring|>Open a geocodr mapping file and return all geocodr.search.Collection subclasses.<|endoftext|>
fd8bc83e4dc3507f3117ef33adf48571cedd2bd391348c4209c1070e72ec6cc8
@classmethod def from_dict(cls, adict): 'Convert dictionary to Arxiv document object\n\n Args:\n adict (dict): a python dictionary\n\n Returns:\n dict: a filtered list object\n ' invalid_req = req.InvalidRequestObject() if (('filters' in adict) and (not isinstance(adict['filters'], collections.Mapping))): invalid_req.add_error('filters', 'Is not iterable') if invalid_req.has_errors(): return invalid_req return ArxivDocumentListRequestObject(filters=adict.get('filters', None))
Convert dictionary to Arxiv document object Args: adict (dict): a python dictionary Returns: dict: a filtered list object
webminer/interface_adapters/rest_adapters/request_objects.py
from_dict
liadmagen/Keep-Current-Crawler
38
python
@classmethod def from_dict(cls, adict): 'Convert dictionary to Arxiv document object\n\n Args:\n adict (dict): a python dictionary\n\n Returns:\n dict: a filtered list object\n ' invalid_req = req.InvalidRequestObject() if (('filters' in adict) and (not isinstance(adict['filters'], collections.Mapping))): invalid_req.add_error('filters', 'Is not iterable') if invalid_req.has_errors(): return invalid_req return ArxivDocumentListRequestObject(filters=adict.get('filters', None))
@classmethod def from_dict(cls, adict): 'Convert dictionary to Arxiv document object\n\n Args:\n adict (dict): a python dictionary\n\n Returns:\n dict: a filtered list object\n ' invalid_req = req.InvalidRequestObject() if (('filters' in adict) and (not isinstance(adict['filters'], collections.Mapping))): invalid_req.add_error('filters', 'Is not iterable') if invalid_req.has_errors(): return invalid_req return ArxivDocumentListRequestObject(filters=adict.get('filters', None))<|docstring|>Convert dictionary to Arxiv document object Args: adict (dict): a python dictionary Returns: dict: a filtered list object<|endoftext|>
c160410c16a50d7a9b086330c9537e4120b8eaa162a7b1509373cd562f39cdc6
def test_nonSpecialCaseEvents(self): 'Test that the list of events without special cases matches expectations\n\t\t' self.assertEqual(39, len(nonSpecialCaseEvents))
Test that the list of events without special cases matches expectations
tests/unit/test_orderedWinEventLimiter.py
test_nonSpecialCaseEvents
lukaszgo1/nvda
1,592
python
def test_nonSpecialCaseEvents(self): '\n\t\t' self.assertEqual(39, len(nonSpecialCaseEvents))
def test_nonSpecialCaseEvents(self): '\n\t\t' self.assertEqual(39, len(nonSpecialCaseEvents))<|docstring|>Test that the list of events without special cases matches expectations<|endoftext|>
ac4f2ae1edfb66ed3793d4a57915e9a3449589676dd332bd62a76edb65a74e90
def test_threadLimit_singleObject(self): 'Test that only the latest events are kept when the thread limit is exceeded\n\t\t' source = (2, 2, 2) limiter = OrderedWinEventLimiter(maxFocusItems=4) for n in range(500): eventId = nonSpecialCaseEvents[(n % len(nonSpecialCaseEvents))] limiter.addEvent(eventId, *source, threadID=0) events = limiter.flushEvents() expectedEventCount = orderedWinEventLimiter.MAX_WINEVENTS_PER_THREAD self.assertEqual(expectedEventCount, len(events))
Test that only the latest events are kept when the thread limit is exceeded
tests/unit/test_orderedWinEventLimiter.py
test_threadLimit_singleObject
lukaszgo1/nvda
1,592
python
def test_threadLimit_singleObject(self): '\n\t\t' source = (2, 2, 2) limiter = OrderedWinEventLimiter(maxFocusItems=4) for n in range(500): eventId = nonSpecialCaseEvents[(n % len(nonSpecialCaseEvents))] limiter.addEvent(eventId, *source, threadID=0) events = limiter.flushEvents() expectedEventCount = orderedWinEventLimiter.MAX_WINEVENTS_PER_THREAD self.assertEqual(expectedEventCount, len(events))
def test_threadLimit_singleObject(self): '\n\t\t' source = (2, 2, 2) limiter = OrderedWinEventLimiter(maxFocusItems=4) for n in range(500): eventId = nonSpecialCaseEvents[(n % len(nonSpecialCaseEvents))] limiter.addEvent(eventId, *source, threadID=0) events = limiter.flushEvents() expectedEventCount = orderedWinEventLimiter.MAX_WINEVENTS_PER_THREAD self.assertEqual(expectedEventCount, len(events))<|docstring|>Test that only the latest events are kept when the thread limit is exceeded<|endoftext|>
94e71ad6762dd7bd5a7bb1b8b6a8a2dd518a5f8141d22cca0decd0ae9a71f2cc
def test_threadLimit_noCanary(self): 'Test that only the latest events are kept when the thread limit is exceeded\n\t\t' limiter = OrderedWinEventLimiter(maxFocusItems=4) for n in range(500): eventId = nonSpecialCaseEvents[(n % len(nonSpecialCaseEvents))] source = (2, 2, n) limiter.addEvent(eventId, *source, threadID=0) events = limiter.flushEvents() errors = [] expectedEventCount = orderedWinEventLimiter.MAX_WINEVENTS_PER_THREAD softAssert(errors, self.assertEqual, expectedEventCount, len(events)) self.assertListEqual([], errors)
Test that only the latest events are kept when the thread limit is exceeded
tests/unit/test_orderedWinEventLimiter.py
test_threadLimit_noCanary
lukaszgo1/nvda
1,592
python
def test_threadLimit_noCanary(self): '\n\t\t' limiter = OrderedWinEventLimiter(maxFocusItems=4) for n in range(500): eventId = nonSpecialCaseEvents[(n % len(nonSpecialCaseEvents))] source = (2, 2, n) limiter.addEvent(eventId, *source, threadID=0) events = limiter.flushEvents() errors = [] expectedEventCount = orderedWinEventLimiter.MAX_WINEVENTS_PER_THREAD softAssert(errors, self.assertEqual, expectedEventCount, len(events)) self.assertListEqual([], errors)
def test_threadLimit_noCanary(self): '\n\t\t' limiter = OrderedWinEventLimiter(maxFocusItems=4) for n in range(500): eventId = nonSpecialCaseEvents[(n % len(nonSpecialCaseEvents))] source = (2, 2, n) limiter.addEvent(eventId, *source, threadID=0) events = limiter.flushEvents() errors = [] expectedEventCount = orderedWinEventLimiter.MAX_WINEVENTS_PER_THREAD softAssert(errors, self.assertEqual, expectedEventCount, len(events)) self.assertListEqual([], errors)<|docstring|>Test that only the latest events are kept when the thread limit is exceeded<|endoftext|>
07d268f361cc6a0778c3c4692d1715b80f33aeb1d646c8b99964221ff4189d0b
def test_threadLimit_withCanaryAtStart(self): 'Test that only the latest events are kept when the thread limit is exceeded\n\t\t' limiter = OrderedWinEventLimiter(maxFocusItems=4) canaryObject = (1, 1, 1) eventStartCanary = (winUser.EVENT_OBJECT_VALUECHANGE, *canaryObject) limiter.addEvent(*eventStartCanary, threadID=0) for n in range(500): eventId = nonSpecialCaseEvents[(n % len(nonSpecialCaseEvents))] source = (2, 2, n) limiter.addEvent(eventId, *source, threadID=0) events = limiter.flushEvents() errors = [] expectedEventCount = orderedWinEventLimiter.MAX_WINEVENTS_PER_THREAD softAssert(errors, self.assertEqual, expectedEventCount, len(events)) softAssert(errors, self.assertNotIn, eventStartCanary, events) self.assertListEqual([], errors)
Test that only the latest events are kept when the thread limit is exceeded
tests/unit/test_orderedWinEventLimiter.py
test_threadLimit_withCanaryAtStart
lukaszgo1/nvda
1,592
python
def test_threadLimit_withCanaryAtStart(self): '\n\t\t' limiter = OrderedWinEventLimiter(maxFocusItems=4) canaryObject = (1, 1, 1) eventStartCanary = (winUser.EVENT_OBJECT_VALUECHANGE, *canaryObject) limiter.addEvent(*eventStartCanary, threadID=0) for n in range(500): eventId = nonSpecialCaseEvents[(n % len(nonSpecialCaseEvents))] source = (2, 2, n) limiter.addEvent(eventId, *source, threadID=0) events = limiter.flushEvents() errors = [] expectedEventCount = orderedWinEventLimiter.MAX_WINEVENTS_PER_THREAD softAssert(errors, self.assertEqual, expectedEventCount, len(events)) softAssert(errors, self.assertNotIn, eventStartCanary, events) self.assertListEqual([], errors)
def test_threadLimit_withCanaryAtStart(self): '\n\t\t' limiter = OrderedWinEventLimiter(maxFocusItems=4) canaryObject = (1, 1, 1) eventStartCanary = (winUser.EVENT_OBJECT_VALUECHANGE, *canaryObject) limiter.addEvent(*eventStartCanary, threadID=0) for n in range(500): eventId = nonSpecialCaseEvents[(n % len(nonSpecialCaseEvents))] source = (2, 2, n) limiter.addEvent(eventId, *source, threadID=0) events = limiter.flushEvents() errors = [] expectedEventCount = orderedWinEventLimiter.MAX_WINEVENTS_PER_THREAD softAssert(errors, self.assertEqual, expectedEventCount, len(events)) softAssert(errors, self.assertNotIn, eventStartCanary, events) self.assertListEqual([], errors)<|docstring|>Test that only the latest events are kept when the thread limit is exceeded<|endoftext|>
81da0583d3566d88bf31df756f8a53ae45cab150b0e2367f3458ddee65555696
def test_threadLimit_canaryStartAndEnd(self): 'Test that only the latest events are kept when the thread limit is exceeded\n\t\t' limiter = OrderedWinEventLimiter(maxFocusItems=4) canaryObject = (1, 1, 1) eventStartCanary = (winUser.EVENT_OBJECT_VALUECHANGE, *canaryObject) limiter.addEvent(*eventStartCanary, threadID=0) for n in range(500): eventId = nonSpecialCaseEvents[(n % len(nonSpecialCaseEvents))] source = (2, 2, n) limiter.addEvent(eventId, *source, threadID=0) eventEndCanary = (winUser.EVENT_OBJECT_NAMECHANGE, *canaryObject) limiter.addEvent(*eventEndCanary, threadID=0) events = limiter.flushEvents() errors = [] expectedEventCount = orderedWinEventLimiter.MAX_WINEVENTS_PER_THREAD softAssert(errors, self.assertEqual, expectedEventCount, len(events)) softAssert(errors, self.assertIn, eventEndCanary, events) softAssert(errors, self.assertNotIn, eventStartCanary, events) self.assertListEqual([], errors)
Test that only the latest events are kept when the thread limit is exceeded
tests/unit/test_orderedWinEventLimiter.py
test_threadLimit_canaryStartAndEnd
lukaszgo1/nvda
1,592
python
def test_threadLimit_canaryStartAndEnd(self): '\n\t\t' limiter = OrderedWinEventLimiter(maxFocusItems=4) canaryObject = (1, 1, 1) eventStartCanary = (winUser.EVENT_OBJECT_VALUECHANGE, *canaryObject) limiter.addEvent(*eventStartCanary, threadID=0) for n in range(500): eventId = nonSpecialCaseEvents[(n % len(nonSpecialCaseEvents))] source = (2, 2, n) limiter.addEvent(eventId, *source, threadID=0) eventEndCanary = (winUser.EVENT_OBJECT_NAMECHANGE, *canaryObject) limiter.addEvent(*eventEndCanary, threadID=0) events = limiter.flushEvents() errors = [] expectedEventCount = orderedWinEventLimiter.MAX_WINEVENTS_PER_THREAD softAssert(errors, self.assertEqual, expectedEventCount, len(events)) softAssert(errors, self.assertIn, eventEndCanary, events) softAssert(errors, self.assertNotIn, eventStartCanary, events) self.assertListEqual([], errors)
def test_threadLimit_canaryStartAndEnd(self): '\n\t\t' limiter = OrderedWinEventLimiter(maxFocusItems=4) canaryObject = (1, 1, 1) eventStartCanary = (winUser.EVENT_OBJECT_VALUECHANGE, *canaryObject) limiter.addEvent(*eventStartCanary, threadID=0) for n in range(500): eventId = nonSpecialCaseEvents[(n % len(nonSpecialCaseEvents))] source = (2, 2, n) limiter.addEvent(eventId, *source, threadID=0) eventEndCanary = (winUser.EVENT_OBJECT_NAMECHANGE, *canaryObject) limiter.addEvent(*eventEndCanary, threadID=0) events = limiter.flushEvents() errors = [] expectedEventCount = orderedWinEventLimiter.MAX_WINEVENTS_PER_THREAD softAssert(errors, self.assertEqual, expectedEventCount, len(events)) softAssert(errors, self.assertIn, eventEndCanary, events) softAssert(errors, self.assertNotIn, eventStartCanary, events) self.assertListEqual([], errors)<|docstring|>Test that only the latest events are kept when the thread limit is exceeded<|endoftext|>
be02cd8566e02c71dabc3ad50149ac68be95ba0df63486a55205f583723dade0
def test_alwaysAllowedObjects(self): 'Matches test_threadLimit_canaryStartAndEnd, but allows events from the first object\n\t\t' limiter = OrderedWinEventLimiter(maxFocusItems=4) canaryObject = (1, 1, 1) eventStartCanary = (winUser.EVENT_OBJECT_VALUECHANGE, *canaryObject) limiter.addEvent(*eventStartCanary, threadID=0) for n in range(orderedWinEventLimiter.MAX_WINEVENTS_PER_THREAD): eventId = nonSpecialCaseEvents[(n % len(nonSpecialCaseEvents))] source = (2, 2, n) limiter.addEvent(eventId, *source, threadID=0) eventEndCanary = (winUser.EVENT_OBJECT_NAMECHANGE, *canaryObject) limiter.addEvent(*eventEndCanary, threadID=0) events = limiter.flushEvents(alwaysAllowedObjects=[canaryObject]) self.assertEqual(11, len(events)) self.assertIn(eventStartCanary, events) self.assertEqual(eventStartCanary, events[0]) self.assertIn(eventEndCanary, events)
Matches test_threadLimit_canaryStartAndEnd, but allows events from the first object
tests/unit/test_orderedWinEventLimiter.py
test_alwaysAllowedObjects
lukaszgo1/nvda
1,592
python
def test_alwaysAllowedObjects(self): '\n\t\t' limiter = OrderedWinEventLimiter(maxFocusItems=4) canaryObject = (1, 1, 1) eventStartCanary = (winUser.EVENT_OBJECT_VALUECHANGE, *canaryObject) limiter.addEvent(*eventStartCanary, threadID=0) for n in range(orderedWinEventLimiter.MAX_WINEVENTS_PER_THREAD): eventId = nonSpecialCaseEvents[(n % len(nonSpecialCaseEvents))] source = (2, 2, n) limiter.addEvent(eventId, *source, threadID=0) eventEndCanary = (winUser.EVENT_OBJECT_NAMECHANGE, *canaryObject) limiter.addEvent(*eventEndCanary, threadID=0) events = limiter.flushEvents(alwaysAllowedObjects=[canaryObject]) self.assertEqual(11, len(events)) self.assertIn(eventStartCanary, events) self.assertEqual(eventStartCanary, events[0]) self.assertIn(eventEndCanary, events)
def test_alwaysAllowedObjects(self): '\n\t\t' limiter = OrderedWinEventLimiter(maxFocusItems=4) canaryObject = (1, 1, 1) eventStartCanary = (winUser.EVENT_OBJECT_VALUECHANGE, *canaryObject) limiter.addEvent(*eventStartCanary, threadID=0) for n in range(orderedWinEventLimiter.MAX_WINEVENTS_PER_THREAD): eventId = nonSpecialCaseEvents[(n % len(nonSpecialCaseEvents))] source = (2, 2, n) limiter.addEvent(eventId, *source, threadID=0) eventEndCanary = (winUser.EVENT_OBJECT_NAMECHANGE, *canaryObject) limiter.addEvent(*eventEndCanary, threadID=0) events = limiter.flushEvents(alwaysAllowedObjects=[canaryObject]) self.assertEqual(11, len(events)) self.assertIn(eventStartCanary, events) self.assertEqual(eventStartCanary, events[0]) self.assertIn(eventEndCanary, events)<|docstring|>Matches test_threadLimit_canaryStartAndEnd, but allows events from the first object<|endoftext|>
ed1b8ec65f68ee3eaff51901d79275bd7b04fe38dbb8d3daaf8a67eba9c8703a
@staticmethod @CachedMethods.register def query_oxo(uid): '\n This takes a curie id and send that id to EMBL-EBI OXO to convert to cui\n ' url_str = ('https://www.ebi.ac.uk/spot/oxo/api/mappings?fromId=' + str(uid)) requests = CacheControlHelper() try: res = requests.get(url_str, headers={'accept': 'application/json'}, timeout=120) except requests.exceptions.Timeout: print(('HTTP timeout in SemMedInterface.py; URL: ' + url_str), file=sys.stderr) time.sleep(1) return None except requests.exceptions.ConnectionError: print(('HTTP connection error in SemMedInterface.py; URL: ' + url_str), file=sys.stderr) time.sleep(1) return None except sqlite3.OperationalError: print(('Error reading sqlite cache; URL: ' + url_str), file=sys.stderr) return None status_code = res.status_code if (status_code != 200): print(((('HTTP response status code: ' + str(status_code)) + ' for URL:\n') + url_str), file=sys.stderr) res = None return res
This takes a curie id and send that id to EMBL-EBI OXO to convert to cui
code/reasoningtool/kg-construction/NormGoogleDistance.py
query_oxo
rtx-travis-tester/RTX
31
python
@staticmethod @CachedMethods.register def query_oxo(uid): '\n \n ' url_str = ('https://www.ebi.ac.uk/spot/oxo/api/mappings?fromId=' + str(uid)) requests = CacheControlHelper() try: res = requests.get(url_str, headers={'accept': 'application/json'}, timeout=120) except requests.exceptions.Timeout: print(('HTTP timeout in SemMedInterface.py; URL: ' + url_str), file=sys.stderr) time.sleep(1) return None except requests.exceptions.ConnectionError: print(('HTTP connection error in SemMedInterface.py; URL: ' + url_str), file=sys.stderr) time.sleep(1) return None except sqlite3.OperationalError: print(('Error reading sqlite cache; URL: ' + url_str), file=sys.stderr) return None status_code = res.status_code if (status_code != 200): print(((('HTTP response status code: ' + str(status_code)) + ' for URL:\n') + url_str), file=sys.stderr) res = None return res
@staticmethod @CachedMethods.register def query_oxo(uid): '\n \n ' url_str = ('https://www.ebi.ac.uk/spot/oxo/api/mappings?fromId=' + str(uid)) requests = CacheControlHelper() try: res = requests.get(url_str, headers={'accept': 'application/json'}, timeout=120) except requests.exceptions.Timeout: print(('HTTP timeout in SemMedInterface.py; URL: ' + url_str), file=sys.stderr) time.sleep(1) return None except requests.exceptions.ConnectionError: print(('HTTP connection error in SemMedInterface.py; URL: ' + url_str), file=sys.stderr) time.sleep(1) return None except sqlite3.OperationalError: print(('Error reading sqlite cache; URL: ' + url_str), file=sys.stderr) return None status_code = res.status_code if (status_code != 200): print(((('HTTP response status code: ' + str(status_code)) + ' for URL:\n') + url_str), file=sys.stderr) res = None return res<|docstring|>This takes a curie id and send that id to EMBL-EBI OXO to convert to cui<|endoftext|>
ee239bfd5168ffe7f80bf17e7beae6c6c63c301dd0cfba2b78930ab93ae2403f
@staticmethod @CachedMethods.register def get_mesh_term_for_all(curie_id, description): '\n Takes a curie ID, detects the ontology from the curie id, and then finds the mesh term\n Params:\n curie_id - A string containing the curie id of the node. Formatted <source abbreviation>:<number> e.g. DOID:8398\n description - A string containing the English name for the node\n current functionality (+ means has it, - means does not have it)\n "Reactome" +\n "GO" - found gene conversion but no biological process conversion\n "UniProt" +\n "HP" - +\n "UBERON" +\n "CL" - not supposed to be here?\n "NCBIGene" +\n "DOID" +\n "OMIM" +\n "ChEMBL" +\n ' if (type(description) != str): description = str(description) curie_list = curie_id.split(':') names = None if QueryNCBIeUtils.is_mesh_term(description): return [(description + '[MeSH Terms]')] names = NormGoogleDistance.get_mesh_from_oxo(curie_id) if (names is None): if curie_list[0].lower().startswith('react'): res = QueryNCBIeUtils.get_reactome_names(curie_list[1]) if (res is not None): names = res.split('|') elif (curie_list[0] == 'GO'): pass elif curie_list[0].startswith('UniProt'): res = QueryNCBIeUtils.get_uniprot_names(curie_list[1]) if (res is not None): names = res.split('|') elif (curie_list[0] == 'HP'): names = QueryNCBIeUtils.get_mesh_terms_for_hp_id(curie_id) elif (curie_list[0] == 'UBERON'): if curie_id.endswith('PHENOTYPE'): curie_id = curie_id[:(- 9)] mesh_id = QueryEBIOLS.get_mesh_id_for_uberon_id(curie_id) names = [] for entry in mesh_id: if (len(entry.split('.')) > 1): uids = QueryNCBIeUtils.get_mesh_uids_for_mesh_tree(entry.split(':')[1]) for uid in uids: try: uid_num = (int(uid.split(':')[1][1:]) + 68000000) names += QueryNCBIeUtils.get_mesh_terms_for_mesh_uid(uid_num) except IndexError: uid_num = int(uid) names += QueryNCBIeUtils.get_mesh_terms_for_mesh_uid(uid_num) else: try: uid = entry.split(':')[1] uid_num = (int(uid[1:]) + 68000000) names += QueryNCBIeUtils.get_mesh_terms_for_mesh_uid(uid_num) except IndexError: uid_num = int(entry) names += QueryNCBIeUtils.get_mesh_terms_for_mesh_uid(uid_num) if (len(names) == 0): names = None else: names[0] = (names[0] + '[MeSH Terms]') elif (curie_list[0] == 'NCBIGene'): gene_id = curie_id.split(':')[1] names = QueryNCBIeUtils.get_pubmed_from_ncbi_gene(gene_id) elif (curie_list[0] == 'DOID'): mesh_id = QueryDisont.query_disont_to_mesh_id(curie_id) names = [] for uid in mesh_id: uid_num = (int(uid[1:]) + 68000000) name = QueryNCBIeUtils.get_mesh_terms_for_mesh_uid(uid_num) if (name is not None): names += name if (len(names) == 0): names = None else: names[0] = (names[0] + '[MeSH Terms]') elif (curie_list[0] == 'OMIM'): names = QueryNCBIeUtils.get_mesh_terms_for_omim_id(curie_list[1]) elif (curie_list[0] == 'ChEMBL'): chembl_id = curie_id.replace(':', '').upper() mesh_id = QueryMyChem.get_mesh_id(chembl_id) if (mesh_id is not None): mesh_id = (int(mesh_id[1:]) + 68000000) names = QueryNCBIeUtils.get_mesh_terms_for_mesh_uid(mesh_id) if (names is not None): if (type(names) == list): for name in names: if name.endswith('[MeSH Terms]'): return [name] return names return [description.replace(';', '|')]
Takes a curie ID, detects the ontology from the curie id, and then finds the mesh term Params: curie_id - A string containing the curie id of the node. Formatted <source abbreviation>:<number> e.g. DOID:8398 description - A string containing the English name for the node current functionality (+ means has it, - means does not have it) "Reactome" + "GO" - found gene conversion but no biological process conversion "UniProt" + "HP" - + "UBERON" + "CL" - not supposed to be here? "NCBIGene" + "DOID" + "OMIM" + "ChEMBL" +
code/reasoningtool/kg-construction/NormGoogleDistance.py
get_mesh_term_for_all
rtx-travis-tester/RTX
31
python
@staticmethod @CachedMethods.register def get_mesh_term_for_all(curie_id, description): '\n Takes a curie ID, detects the ontology from the curie id, and then finds the mesh term\n Params:\n curie_id - A string containing the curie id of the node. Formatted <source abbreviation>:<number> e.g. DOID:8398\n description - A string containing the English name for the node\n current functionality (+ means has it, - means does not have it)\n "Reactome" +\n "GO" - found gene conversion but no biological process conversion\n "UniProt" +\n "HP" - +\n "UBERON" +\n "CL" - not supposed to be here?\n "NCBIGene" +\n "DOID" +\n "OMIM" +\n "ChEMBL" +\n ' if (type(description) != str): description = str(description) curie_list = curie_id.split(':') names = None if QueryNCBIeUtils.is_mesh_term(description): return [(description + '[MeSH Terms]')] names = NormGoogleDistance.get_mesh_from_oxo(curie_id) if (names is None): if curie_list[0].lower().startswith('react'): res = QueryNCBIeUtils.get_reactome_names(curie_list[1]) if (res is not None): names = res.split('|') elif (curie_list[0] == 'GO'): pass elif curie_list[0].startswith('UniProt'): res = QueryNCBIeUtils.get_uniprot_names(curie_list[1]) if (res is not None): names = res.split('|') elif (curie_list[0] == 'HP'): names = QueryNCBIeUtils.get_mesh_terms_for_hp_id(curie_id) elif (curie_list[0] == 'UBERON'): if curie_id.endswith('PHENOTYPE'): curie_id = curie_id[:(- 9)] mesh_id = QueryEBIOLS.get_mesh_id_for_uberon_id(curie_id) names = [] for entry in mesh_id: if (len(entry.split('.')) > 1): uids = QueryNCBIeUtils.get_mesh_uids_for_mesh_tree(entry.split(':')[1]) for uid in uids: try: uid_num = (int(uid.split(':')[1][1:]) + 68000000) names += QueryNCBIeUtils.get_mesh_terms_for_mesh_uid(uid_num) except IndexError: uid_num = int(uid) names += QueryNCBIeUtils.get_mesh_terms_for_mesh_uid(uid_num) else: try: uid = entry.split(':')[1] uid_num = (int(uid[1:]) + 68000000) names += QueryNCBIeUtils.get_mesh_terms_for_mesh_uid(uid_num) except IndexError: uid_num = int(entry) names += QueryNCBIeUtils.get_mesh_terms_for_mesh_uid(uid_num) if (len(names) == 0): names = None else: names[0] = (names[0] + '[MeSH Terms]') elif (curie_list[0] == 'NCBIGene'): gene_id = curie_id.split(':')[1] names = QueryNCBIeUtils.get_pubmed_from_ncbi_gene(gene_id) elif (curie_list[0] == 'DOID'): mesh_id = QueryDisont.query_disont_to_mesh_id(curie_id) names = [] for uid in mesh_id: uid_num = (int(uid[1:]) + 68000000) name = QueryNCBIeUtils.get_mesh_terms_for_mesh_uid(uid_num) if (name is not None): names += name if (len(names) == 0): names = None else: names[0] = (names[0] + '[MeSH Terms]') elif (curie_list[0] == 'OMIM'): names = QueryNCBIeUtils.get_mesh_terms_for_omim_id(curie_list[1]) elif (curie_list[0] == 'ChEMBL'): chembl_id = curie_id.replace(':', ).upper() mesh_id = QueryMyChem.get_mesh_id(chembl_id) if (mesh_id is not None): mesh_id = (int(mesh_id[1:]) + 68000000) names = QueryNCBIeUtils.get_mesh_terms_for_mesh_uid(mesh_id) if (names is not None): if (type(names) == list): for name in names: if name.endswith('[MeSH Terms]'): return [name] return names return [description.replace(';', '|')]
@staticmethod @CachedMethods.register def get_mesh_term_for_all(curie_id, description): '\n Takes a curie ID, detects the ontology from the curie id, and then finds the mesh term\n Params:\n curie_id - A string containing the curie id of the node. Formatted <source abbreviation>:<number> e.g. DOID:8398\n description - A string containing the English name for the node\n current functionality (+ means has it, - means does not have it)\n "Reactome" +\n "GO" - found gene conversion but no biological process conversion\n "UniProt" +\n "HP" - +\n "UBERON" +\n "CL" - not supposed to be here?\n "NCBIGene" +\n "DOID" +\n "OMIM" +\n "ChEMBL" +\n ' if (type(description) != str): description = str(description) curie_list = curie_id.split(':') names = None if QueryNCBIeUtils.is_mesh_term(description): return [(description + '[MeSH Terms]')] names = NormGoogleDistance.get_mesh_from_oxo(curie_id) if (names is None): if curie_list[0].lower().startswith('react'): res = QueryNCBIeUtils.get_reactome_names(curie_list[1]) if (res is not None): names = res.split('|') elif (curie_list[0] == 'GO'): pass elif curie_list[0].startswith('UniProt'): res = QueryNCBIeUtils.get_uniprot_names(curie_list[1]) if (res is not None): names = res.split('|') elif (curie_list[0] == 'HP'): names = QueryNCBIeUtils.get_mesh_terms_for_hp_id(curie_id) elif (curie_list[0] == 'UBERON'): if curie_id.endswith('PHENOTYPE'): curie_id = curie_id[:(- 9)] mesh_id = QueryEBIOLS.get_mesh_id_for_uberon_id(curie_id) names = [] for entry in mesh_id: if (len(entry.split('.')) > 1): uids = QueryNCBIeUtils.get_mesh_uids_for_mesh_tree(entry.split(':')[1]) for uid in uids: try: uid_num = (int(uid.split(':')[1][1:]) + 68000000) names += QueryNCBIeUtils.get_mesh_terms_for_mesh_uid(uid_num) except IndexError: uid_num = int(uid) names += QueryNCBIeUtils.get_mesh_terms_for_mesh_uid(uid_num) else: try: uid = entry.split(':')[1] uid_num = (int(uid[1:]) + 68000000) names += QueryNCBIeUtils.get_mesh_terms_for_mesh_uid(uid_num) except IndexError: uid_num = int(entry) names += QueryNCBIeUtils.get_mesh_terms_for_mesh_uid(uid_num) if (len(names) == 0): names = None else: names[0] = (names[0] + '[MeSH Terms]') elif (curie_list[0] == 'NCBIGene'): gene_id = curie_id.split(':')[1] names = QueryNCBIeUtils.get_pubmed_from_ncbi_gene(gene_id) elif (curie_list[0] == 'DOID'): mesh_id = QueryDisont.query_disont_to_mesh_id(curie_id) names = [] for uid in mesh_id: uid_num = (int(uid[1:]) + 68000000) name = QueryNCBIeUtils.get_mesh_terms_for_mesh_uid(uid_num) if (name is not None): names += name if (len(names) == 0): names = None else: names[0] = (names[0] + '[MeSH Terms]') elif (curie_list[0] == 'OMIM'): names = QueryNCBIeUtils.get_mesh_terms_for_omim_id(curie_list[1]) elif (curie_list[0] == 'ChEMBL'): chembl_id = curie_id.replace(':', ).upper() mesh_id = QueryMyChem.get_mesh_id(chembl_id) if (mesh_id is not None): mesh_id = (int(mesh_id[1:]) + 68000000) names = QueryNCBIeUtils.get_mesh_terms_for_mesh_uid(mesh_id) if (names is not None): if (type(names) == list): for name in names: if name.endswith('[MeSH Terms]'): return [name] return names return [description.replace(';', '|')]<|docstring|>Takes a curie ID, detects the ontology from the curie id, and then finds the mesh term Params: curie_id - A string containing the curie id of the node. Formatted <source abbreviation>:<number> e.g. DOID:8398 description - A string containing the English name for the node current functionality (+ means has it, - means does not have it) "Reactome" + "GO" - found gene conversion but no biological process conversion "UniProt" + "HP" - + "UBERON" + "CL" - not supposed to be here? "NCBIGene" + "DOID" + "OMIM" + "ChEMBL" +<|endoftext|>
f2fee9fefa93787b84d525ce03335d195d612552bbaac29783ddcf1445262ff0
@staticmethod def get_ngd_for_all(curie_id_list, description_list): '\n Takes a list of currie ids and descriptions then calculates the normalized google distance for the set of nodes.\n Params:\n curie_id_list - a list of strings containing the curie ids of the nodes. Formatted <source abbreviation>:<number> e.g. DOID:8398\n description_list - a list of strings containing the English names for the nodes\n ' assert (len(curie_id_list) == len(description_list)) terms = ([None] * len(curie_id_list)) for a in range(len(description_list)): terms[a] = NormGoogleDistance.get_mesh_term_for_all(curie_id_list[a], description_list[a]) if (type(terms[a]) != list): terms[a] = [terms[a]] if (len(terms[a]) == 0): terms[a] = [description_list[a]] if (len(terms[a]) > 30): terms[a] = terms[a][:30] terms_combined = ([''] * len(terms)) mesh_flags = ([True] * len(terms)) for a in range(len(terms)): if (len(terms[a]) > 1): if (not terms[a][0].endswith('[uid]')): for b in range(len(terms[a])): if (QueryNCBIeUtils.is_mesh_term(terms[a][b]) and (not terms[a][b].endswith('[MeSH Terms]'))): terms[a][b] += '[MeSH Terms]' terms_combined[a] = '|'.join(terms[a]) mesh_flags[a] = False else: terms_combined[a] = terms[a][0] if terms[a][0].endswith('[MeSH Terms]'): terms_combined[a] = terms[a][0][:(- 12)] elif (not QueryNCBIeUtils.is_mesh_term(terms[a][0])): mesh_flags[a] = False ngd = QueryNCBIeUtils.multi_normalized_google_distance(terms_combined, mesh_flags) return ngd
Takes a list of currie ids and descriptions then calculates the normalized google distance for the set of nodes. Params: curie_id_list - a list of strings containing the curie ids of the nodes. Formatted <source abbreviation>:<number> e.g. DOID:8398 description_list - a list of strings containing the English names for the nodes
code/reasoningtool/kg-construction/NormGoogleDistance.py
get_ngd_for_all
rtx-travis-tester/RTX
31
python
@staticmethod def get_ngd_for_all(curie_id_list, description_list): '\n Takes a list of currie ids and descriptions then calculates the normalized google distance for the set of nodes.\n Params:\n curie_id_list - a list of strings containing the curie ids of the nodes. Formatted <source abbreviation>:<number> e.g. DOID:8398\n description_list - a list of strings containing the English names for the nodes\n ' assert (len(curie_id_list) == len(description_list)) terms = ([None] * len(curie_id_list)) for a in range(len(description_list)): terms[a] = NormGoogleDistance.get_mesh_term_for_all(curie_id_list[a], description_list[a]) if (type(terms[a]) != list): terms[a] = [terms[a]] if (len(terms[a]) == 0): terms[a] = [description_list[a]] if (len(terms[a]) > 30): terms[a] = terms[a][:30] terms_combined = ([] * len(terms)) mesh_flags = ([True] * len(terms)) for a in range(len(terms)): if (len(terms[a]) > 1): if (not terms[a][0].endswith('[uid]')): for b in range(len(terms[a])): if (QueryNCBIeUtils.is_mesh_term(terms[a][b]) and (not terms[a][b].endswith('[MeSH Terms]'))): terms[a][b] += '[MeSH Terms]' terms_combined[a] = '|'.join(terms[a]) mesh_flags[a] = False else: terms_combined[a] = terms[a][0] if terms[a][0].endswith('[MeSH Terms]'): terms_combined[a] = terms[a][0][:(- 12)] elif (not QueryNCBIeUtils.is_mesh_term(terms[a][0])): mesh_flags[a] = False ngd = QueryNCBIeUtils.multi_normalized_google_distance(terms_combined, mesh_flags) return ngd
@staticmethod def get_ngd_for_all(curie_id_list, description_list): '\n Takes a list of currie ids and descriptions then calculates the normalized google distance for the set of nodes.\n Params:\n curie_id_list - a list of strings containing the curie ids of the nodes. Formatted <source abbreviation>:<number> e.g. DOID:8398\n description_list - a list of strings containing the English names for the nodes\n ' assert (len(curie_id_list) == len(description_list)) terms = ([None] * len(curie_id_list)) for a in range(len(description_list)): terms[a] = NormGoogleDistance.get_mesh_term_for_all(curie_id_list[a], description_list[a]) if (type(terms[a]) != list): terms[a] = [terms[a]] if (len(terms[a]) == 0): terms[a] = [description_list[a]] if (len(terms[a]) > 30): terms[a] = terms[a][:30] terms_combined = ([] * len(terms)) mesh_flags = ([True] * len(terms)) for a in range(len(terms)): if (len(terms[a]) > 1): if (not terms[a][0].endswith('[uid]')): for b in range(len(terms[a])): if (QueryNCBIeUtils.is_mesh_term(terms[a][b]) and (not terms[a][b].endswith('[MeSH Terms]'))): terms[a][b] += '[MeSH Terms]' terms_combined[a] = '|'.join(terms[a]) mesh_flags[a] = False else: terms_combined[a] = terms[a][0] if terms[a][0].endswith('[MeSH Terms]'): terms_combined[a] = terms[a][0][:(- 12)] elif (not QueryNCBIeUtils.is_mesh_term(terms[a][0])): mesh_flags[a] = False ngd = QueryNCBIeUtils.multi_normalized_google_distance(terms_combined, mesh_flags) return ngd<|docstring|>Takes a list of currie ids and descriptions then calculates the normalized google distance for the set of nodes. Params: curie_id_list - a list of strings containing the curie ids of the nodes. Formatted <source abbreviation>:<number> e.g. DOID:8398 description_list - a list of strings containing the English names for the nodes<|endoftext|>
eb60ae9f703b06a21c82bc2bccf3b299e2921a31deb092e39e36fb136697dd4c
@staticmethod def get_pmids_for_all(curie_id_list, description_list): '\n Takes a list of currie ids and descriptions then calculates the normalized google distance for the set of nodes.\n Params:\n curie_id_list - a list of strings containing the curie ids of the nodes. Formatted <source abbreviation>:<number> e.g. DOID:8398\n description_list - a list of strings containing the English names for the nodes\n ' assert (len(curie_id_list) == len(description_list)) terms = ([None] * len(curie_id_list)) for a in range(len(description_list)): terms[a] = NormGoogleDistance.get_mesh_term_for_all(curie_id_list[a], description_list[a]) if (type(terms[a]) != list): terms[a] = [terms[a]] if (len(terms[a]) == 0): terms[a] = [description_list[a]] if (len(terms[a]) > 30): terms[a] = terms[a][:30] terms_combined = ([''] * len(terms)) mesh_flags = ([True] * len(terms)) for a in range(len(terms)): if (len(terms[a]) > 1): if (not terms[a][0].endswith('[uid]')): for b in range(len(terms[a])): if (QueryNCBIeUtils.is_mesh_term(terms[a][b]) and (not terms[a][b].endswith('[MeSH Terms]'))): terms[a][b] += '[MeSH Terms]' terms_combined[a] = '|'.join(terms[a]) mesh_flags[a] = False else: terms_combined[a] = terms[a][0] if terms[a][0].endswith('[MeSH Terms]'): terms_combined[a] = terms[a][0][:(- 12)] elif (not QueryNCBIeUtils.is_mesh_term(terms[a][0])): mesh_flags[a] = False pmids = QueryNCBIeUtils.multi_normalized_pmids(terms_combined, mesh_flags) pmids_with_prefix = [] for lst in pmids: pmids_with_prefix.append([f'PMID:{x}' for x in lst]) return pmids_with_prefix
Takes a list of currie ids and descriptions then calculates the normalized google distance for the set of nodes. Params: curie_id_list - a list of strings containing the curie ids of the nodes. Formatted <source abbreviation>:<number> e.g. DOID:8398 description_list - a list of strings containing the English names for the nodes
code/reasoningtool/kg-construction/NormGoogleDistance.py
get_pmids_for_all
rtx-travis-tester/RTX
31
python
@staticmethod def get_pmids_for_all(curie_id_list, description_list): '\n Takes a list of currie ids and descriptions then calculates the normalized google distance for the set of nodes.\n Params:\n curie_id_list - a list of strings containing the curie ids of the nodes. Formatted <source abbreviation>:<number> e.g. DOID:8398\n description_list - a list of strings containing the English names for the nodes\n ' assert (len(curie_id_list) == len(description_list)) terms = ([None] * len(curie_id_list)) for a in range(len(description_list)): terms[a] = NormGoogleDistance.get_mesh_term_for_all(curie_id_list[a], description_list[a]) if (type(terms[a]) != list): terms[a] = [terms[a]] if (len(terms[a]) == 0): terms[a] = [description_list[a]] if (len(terms[a]) > 30): terms[a] = terms[a][:30] terms_combined = ([] * len(terms)) mesh_flags = ([True] * len(terms)) for a in range(len(terms)): if (len(terms[a]) > 1): if (not terms[a][0].endswith('[uid]')): for b in range(len(terms[a])): if (QueryNCBIeUtils.is_mesh_term(terms[a][b]) and (not terms[a][b].endswith('[MeSH Terms]'))): terms[a][b] += '[MeSH Terms]' terms_combined[a] = '|'.join(terms[a]) mesh_flags[a] = False else: terms_combined[a] = terms[a][0] if terms[a][0].endswith('[MeSH Terms]'): terms_combined[a] = terms[a][0][:(- 12)] elif (not QueryNCBIeUtils.is_mesh_term(terms[a][0])): mesh_flags[a] = False pmids = QueryNCBIeUtils.multi_normalized_pmids(terms_combined, mesh_flags) pmids_with_prefix = [] for lst in pmids: pmids_with_prefix.append([f'PMID:{x}' for x in lst]) return pmids_with_prefix
@staticmethod def get_pmids_for_all(curie_id_list, description_list): '\n Takes a list of currie ids and descriptions then calculates the normalized google distance for the set of nodes.\n Params:\n curie_id_list - a list of strings containing the curie ids of the nodes. Formatted <source abbreviation>:<number> e.g. DOID:8398\n description_list - a list of strings containing the English names for the nodes\n ' assert (len(curie_id_list) == len(description_list)) terms = ([None] * len(curie_id_list)) for a in range(len(description_list)): terms[a] = NormGoogleDistance.get_mesh_term_for_all(curie_id_list[a], description_list[a]) if (type(terms[a]) != list): terms[a] = [terms[a]] if (len(terms[a]) == 0): terms[a] = [description_list[a]] if (len(terms[a]) > 30): terms[a] = terms[a][:30] terms_combined = ([] * len(terms)) mesh_flags = ([True] * len(terms)) for a in range(len(terms)): if (len(terms[a]) > 1): if (not terms[a][0].endswith('[uid]')): for b in range(len(terms[a])): if (QueryNCBIeUtils.is_mesh_term(terms[a][b]) and (not terms[a][b].endswith('[MeSH Terms]'))): terms[a][b] += '[MeSH Terms]' terms_combined[a] = '|'.join(terms[a]) mesh_flags[a] = False else: terms_combined[a] = terms[a][0] if terms[a][0].endswith('[MeSH Terms]'): terms_combined[a] = terms[a][0][:(- 12)] elif (not QueryNCBIeUtils.is_mesh_term(terms[a][0])): mesh_flags[a] = False pmids = QueryNCBIeUtils.multi_normalized_pmids(terms_combined, mesh_flags) pmids_with_prefix = [] for lst in pmids: pmids_with_prefix.append([f'PMID:{x}' for x in lst]) return pmids_with_prefix<|docstring|>Takes a list of currie ids and descriptions then calculates the normalized google distance for the set of nodes. Params: curie_id_list - a list of strings containing the curie ids of the nodes. Formatted <source abbreviation>:<number> e.g. DOID:8398 description_list - a list of strings containing the English names for the nodes<|endoftext|>
593e8dfba974a9490a9e2a43a8a28fd271b135f1676af31aecc6e4c9ed4f7acf
@property def AuxiliaryId(self): '\n Returns\n -------\n - number: This describes the identifier for auxiliary connections.\n ' return self._get_attribute(self._SDM_ATT_MAP['AuxiliaryId'])
Returns ------- - number: This describes the identifier for auxiliary connections.
ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/controllerauxiliaryconnectionlearnedinfo_1c2f8f11bff25ef8bd7fd65ee1072e9b.py
AuxiliaryId
slieberth/ixnetwork_restpy
0
python
@property def AuxiliaryId(self): '\n Returns\n -------\n - number: This describes the identifier for auxiliary connections.\n ' return self._get_attribute(self._SDM_ATT_MAP['AuxiliaryId'])
@property def AuxiliaryId(self): '\n Returns\n -------\n - number: This describes the identifier for auxiliary connections.\n ' return self._get_attribute(self._SDM_ATT_MAP['AuxiliaryId'])<|docstring|>Returns ------- - number: This describes the identifier for auxiliary connections.<|endoftext|>
653b6ca5d0bbbe4b36891dcba1b9d3e856d17fa4a65338dc3b37be29b6ea3914
@property def ConnectionType(self): '\n Returns\n -------\n - str(tcp | tls | udp): Specifies how this controllerPort is connected to another controller (internal/external) or host or there is no connection (noConnection)\n ' return self._get_attribute(self._SDM_ATT_MAP['ConnectionType'])
Returns ------- - str(tcp | tls | udp): Specifies how this controllerPort is connected to another controller (internal/external) or host or there is no connection (noConnection)
ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/controllerauxiliaryconnectionlearnedinfo_1c2f8f11bff25ef8bd7fd65ee1072e9b.py
ConnectionType
slieberth/ixnetwork_restpy
0
python
@property def ConnectionType(self): '\n Returns\n -------\n - str(tcp | tls | udp): Specifies how this controllerPort is connected to another controller (internal/external) or host or there is no connection (noConnection)\n ' return self._get_attribute(self._SDM_ATT_MAP['ConnectionType'])
@property def ConnectionType(self): '\n Returns\n -------\n - str(tcp | tls | udp): Specifies how this controllerPort is connected to another controller (internal/external) or host or there is no connection (noConnection)\n ' return self._get_attribute(self._SDM_ATT_MAP['ConnectionType'])<|docstring|>Returns ------- - str(tcp | tls | udp): Specifies how this controllerPort is connected to another controller (internal/external) or host or there is no connection (noConnection)<|endoftext|>
a0bb7f85516cc1a8c943c8f999a16397dbb9a9a907b3b37d0e0b0e4a70fd9e1b
@property def DataPathId(self): '\n Returns\n -------\n - str: Indicates the datapath ID of the OpenFlow controller.\n ' return self._get_attribute(self._SDM_ATT_MAP['DataPathId'])
Returns ------- - str: Indicates the datapath ID of the OpenFlow controller.
ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/controllerauxiliaryconnectionlearnedinfo_1c2f8f11bff25ef8bd7fd65ee1072e9b.py
DataPathId
slieberth/ixnetwork_restpy
0
python
@property def DataPathId(self): '\n Returns\n -------\n - str: Indicates the datapath ID of the OpenFlow controller.\n ' return self._get_attribute(self._SDM_ATT_MAP['DataPathId'])
@property def DataPathId(self): '\n Returns\n -------\n - str: Indicates the datapath ID of the OpenFlow controller.\n ' return self._get_attribute(self._SDM_ATT_MAP['DataPathId'])<|docstring|>Returns ------- - str: Indicates the datapath ID of the OpenFlow controller.<|endoftext|>
4ca878a78ece21e2219ac3fd7d69c4ab0289f2f42ec6242cc02463290c9f6309
@property def DataPathIdAsHex(self): '\n Returns\n -------\n - str: Indicates the datapath ID of the OpenFlow controller in hexadecimal format.\n ' return self._get_attribute(self._SDM_ATT_MAP['DataPathIdAsHex'])
Returns ------- - str: Indicates the datapath ID of the OpenFlow controller in hexadecimal format.
ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/controllerauxiliaryconnectionlearnedinfo_1c2f8f11bff25ef8bd7fd65ee1072e9b.py
DataPathIdAsHex
slieberth/ixnetwork_restpy
0
python
@property def DataPathIdAsHex(self): '\n Returns\n -------\n - str: Indicates the datapath ID of the OpenFlow controller in hexadecimal format.\n ' return self._get_attribute(self._SDM_ATT_MAP['DataPathIdAsHex'])
@property def DataPathIdAsHex(self): '\n Returns\n -------\n - str: Indicates the datapath ID of the OpenFlow controller in hexadecimal format.\n ' return self._get_attribute(self._SDM_ATT_MAP['DataPathIdAsHex'])<|docstring|>Returns ------- - str: Indicates the datapath ID of the OpenFlow controller in hexadecimal format.<|endoftext|>
6cd11f9ee88bf47aa238f548b5c3cd3812495976af486ee07b85d4232bfea4c0
@property def LocalIp(self): '\n Returns\n -------\n - str: Signifies the local IP address of the selected interface.\n ' return self._get_attribute(self._SDM_ATT_MAP['LocalIp'])
Returns ------- - str: Signifies the local IP address of the selected interface.
ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/controllerauxiliaryconnectionlearnedinfo_1c2f8f11bff25ef8bd7fd65ee1072e9b.py
LocalIp
slieberth/ixnetwork_restpy
0
python
@property def LocalIp(self): '\n Returns\n -------\n - str: Signifies the local IP address of the selected interface.\n ' return self._get_attribute(self._SDM_ATT_MAP['LocalIp'])
@property def LocalIp(self): '\n Returns\n -------\n - str: Signifies the local IP address of the selected interface.\n ' return self._get_attribute(self._SDM_ATT_MAP['LocalIp'])<|docstring|>Returns ------- - str: Signifies the local IP address of the selected interface.<|endoftext|>
bff02b0d687b0cde35e787c71c2a352e1f18d2012dbd83864ea1150355fe321e
@property def LocalPort(self): '\n Returns\n -------\n - number: This describes the local port number identifier.\n ' return self._get_attribute(self._SDM_ATT_MAP['LocalPort'])
Returns ------- - number: This describes the local port number identifier.
ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/controllerauxiliaryconnectionlearnedinfo_1c2f8f11bff25ef8bd7fd65ee1072e9b.py
LocalPort
slieberth/ixnetwork_restpy
0
python
@property def LocalPort(self): '\n Returns\n -------\n - number: This describes the local port number identifier.\n ' return self._get_attribute(self._SDM_ATT_MAP['LocalPort'])
@property def LocalPort(self): '\n Returns\n -------\n - number: This describes the local port number identifier.\n ' return self._get_attribute(self._SDM_ATT_MAP['LocalPort'])<|docstring|>Returns ------- - number: This describes the local port number identifier.<|endoftext|>
8c9637b33d205fb380d1c0a7d2725b2cc05b209d5eebce9efaccaa51cd39e8f2
@property def RemoteIp(self): '\n Returns\n -------\n - str: This describes the IP address of the remote end of the OF Channel.\n ' return self._get_attribute(self._SDM_ATT_MAP['RemoteIp'])
Returns ------- - str: This describes the IP address of the remote end of the OF Channel.
ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/controllerauxiliaryconnectionlearnedinfo_1c2f8f11bff25ef8bd7fd65ee1072e9b.py
RemoteIp
slieberth/ixnetwork_restpy
0
python
@property def RemoteIp(self): '\n Returns\n -------\n - str: This describes the IP address of the remote end of the OF Channel.\n ' return self._get_attribute(self._SDM_ATT_MAP['RemoteIp'])
@property def RemoteIp(self): '\n Returns\n -------\n - str: This describes the IP address of the remote end of the OF Channel.\n ' return self._get_attribute(self._SDM_ATT_MAP['RemoteIp'])<|docstring|>Returns ------- - str: This describes the IP address of the remote end of the OF Channel.<|endoftext|>
3b769b54811d4822ecff3c5baec745fb2935e36aff46e15a010b28f549622b33
@property def RemotePort(self): '\n Returns\n -------\n - number: This describes the remote port number identifier.\n ' return self._get_attribute(self._SDM_ATT_MAP['RemotePort'])
Returns ------- - number: This describes the remote port number identifier.
ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/controllerauxiliaryconnectionlearnedinfo_1c2f8f11bff25ef8bd7fd65ee1072e9b.py
RemotePort
slieberth/ixnetwork_restpy
0
python
@property def RemotePort(self): '\n Returns\n -------\n - number: This describes the remote port number identifier.\n ' return self._get_attribute(self._SDM_ATT_MAP['RemotePort'])
@property def RemotePort(self): '\n Returns\n -------\n - number: This describes the remote port number identifier.\n ' return self._get_attribute(self._SDM_ATT_MAP['RemotePort'])<|docstring|>Returns ------- - number: This describes the remote port number identifier.<|endoftext|>
1ca37cf7f36582ea26ace7be7f19abd80ef8d290cda849e91f9bee601b1be5e9
def find(self, AuxiliaryId=None, ConnectionType=None, DataPathId=None, DataPathIdAsHex=None, LocalIp=None, LocalPort=None, RemoteIp=None, RemotePort=None): 'Finds and retrieves controllerAuxiliaryConnectionLearnedInfo resources from the server.\n\n All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve controllerAuxiliaryConnectionLearnedInfo resources from the server.\n To retrieve an exact match ensure the parameter value starts with ^ and ends with $\n By default the find method takes no parameters and will retrieve all controllerAuxiliaryConnectionLearnedInfo resources from the server.\n\n Args\n ----\n - AuxiliaryId (number): This describes the identifier for auxiliary connections.\n - ConnectionType (str(tcp | tls | udp)): Specifies how this controllerPort is connected to another controller (internal/external) or host or there is no connection (noConnection)\n - DataPathId (str): Indicates the datapath ID of the OpenFlow controller.\n - DataPathIdAsHex (str): Indicates the datapath ID of the OpenFlow controller in hexadecimal format.\n - LocalIp (str): Signifies the local IP address of the selected interface.\n - LocalPort (number): This describes the local port number identifier.\n - RemoteIp (str): This describes the IP address of the remote end of the OF Channel.\n - RemotePort (number): This describes the remote port number identifier.\n\n Returns\n -------\n - self: This instance with matching controllerAuxiliaryConnectionLearnedInfo resources retrieved from the server available through an iterator or index\n\n Raises\n ------\n - ServerError: The server has encountered an uncategorized error condition\n ' return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
Finds and retrieves controllerAuxiliaryConnectionLearnedInfo resources from the server. All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve controllerAuxiliaryConnectionLearnedInfo resources from the server. To retrieve an exact match ensure the parameter value starts with ^ and ends with $ By default the find method takes no parameters and will retrieve all controllerAuxiliaryConnectionLearnedInfo resources from the server. Args ---- - AuxiliaryId (number): This describes the identifier for auxiliary connections. - ConnectionType (str(tcp | tls | udp)): Specifies how this controllerPort is connected to another controller (internal/external) or host or there is no connection (noConnection) - DataPathId (str): Indicates the datapath ID of the OpenFlow controller. - DataPathIdAsHex (str): Indicates the datapath ID of the OpenFlow controller in hexadecimal format. - LocalIp (str): Signifies the local IP address of the selected interface. - LocalPort (number): This describes the local port number identifier. - RemoteIp (str): This describes the IP address of the remote end of the OF Channel. - RemotePort (number): This describes the remote port number identifier. Returns ------- - self: This instance with matching controllerAuxiliaryConnectionLearnedInfo resources retrieved from the server available through an iterator or index Raises ------ - ServerError: The server has encountered an uncategorized error condition
ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/controllerauxiliaryconnectionlearnedinfo_1c2f8f11bff25ef8bd7fd65ee1072e9b.py
find
slieberth/ixnetwork_restpy
0
python
def find(self, AuxiliaryId=None, ConnectionType=None, DataPathId=None, DataPathIdAsHex=None, LocalIp=None, LocalPort=None, RemoteIp=None, RemotePort=None): 'Finds and retrieves controllerAuxiliaryConnectionLearnedInfo resources from the server.\n\n All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve controllerAuxiliaryConnectionLearnedInfo resources from the server.\n To retrieve an exact match ensure the parameter value starts with ^ and ends with $\n By default the find method takes no parameters and will retrieve all controllerAuxiliaryConnectionLearnedInfo resources from the server.\n\n Args\n ----\n - AuxiliaryId (number): This describes the identifier for auxiliary connections.\n - ConnectionType (str(tcp | tls | udp)): Specifies how this controllerPort is connected to another controller (internal/external) or host or there is no connection (noConnection)\n - DataPathId (str): Indicates the datapath ID of the OpenFlow controller.\n - DataPathIdAsHex (str): Indicates the datapath ID of the OpenFlow controller in hexadecimal format.\n - LocalIp (str): Signifies the local IP address of the selected interface.\n - LocalPort (number): This describes the local port number identifier.\n - RemoteIp (str): This describes the IP address of the remote end of the OF Channel.\n - RemotePort (number): This describes the remote port number identifier.\n\n Returns\n -------\n - self: This instance with matching controllerAuxiliaryConnectionLearnedInfo resources retrieved from the server available through an iterator or index\n\n Raises\n ------\n - ServerError: The server has encountered an uncategorized error condition\n ' return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def find(self, AuxiliaryId=None, ConnectionType=None, DataPathId=None, DataPathIdAsHex=None, LocalIp=None, LocalPort=None, RemoteIp=None, RemotePort=None): 'Finds and retrieves controllerAuxiliaryConnectionLearnedInfo resources from the server.\n\n All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve controllerAuxiliaryConnectionLearnedInfo resources from the server.\n To retrieve an exact match ensure the parameter value starts with ^ and ends with $\n By default the find method takes no parameters and will retrieve all controllerAuxiliaryConnectionLearnedInfo resources from the server.\n\n Args\n ----\n - AuxiliaryId (number): This describes the identifier for auxiliary connections.\n - ConnectionType (str(tcp | tls | udp)): Specifies how this controllerPort is connected to another controller (internal/external) or host or there is no connection (noConnection)\n - DataPathId (str): Indicates the datapath ID of the OpenFlow controller.\n - DataPathIdAsHex (str): Indicates the datapath ID of the OpenFlow controller in hexadecimal format.\n - LocalIp (str): Signifies the local IP address of the selected interface.\n - LocalPort (number): This describes the local port number identifier.\n - RemoteIp (str): This describes the IP address of the remote end of the OF Channel.\n - RemotePort (number): This describes the remote port number identifier.\n\n Returns\n -------\n - self: This instance with matching controllerAuxiliaryConnectionLearnedInfo resources retrieved from the server available through an iterator or index\n\n Raises\n ------\n - ServerError: The server has encountered an uncategorized error condition\n ' return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))<|docstring|>Finds and retrieves controllerAuxiliaryConnectionLearnedInfo resources from the server. All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve controllerAuxiliaryConnectionLearnedInfo resources from the server. To retrieve an exact match ensure the parameter value starts with ^ and ends with $ By default the find method takes no parameters and will retrieve all controllerAuxiliaryConnectionLearnedInfo resources from the server. Args ---- - AuxiliaryId (number): This describes the identifier for auxiliary connections. - ConnectionType (str(tcp | tls | udp)): Specifies how this controllerPort is connected to another controller (internal/external) or host or there is no connection (noConnection) - DataPathId (str): Indicates the datapath ID of the OpenFlow controller. - DataPathIdAsHex (str): Indicates the datapath ID of the OpenFlow controller in hexadecimal format. - LocalIp (str): Signifies the local IP address of the selected interface. - LocalPort (number): This describes the local port number identifier. - RemoteIp (str): This describes the IP address of the remote end of the OF Channel. - RemotePort (number): This describes the remote port number identifier. Returns ------- - self: This instance with matching controllerAuxiliaryConnectionLearnedInfo resources retrieved from the server available through an iterator or index Raises ------ - ServerError: The server has encountered an uncategorized error condition<|endoftext|>
e819d9cbbc624f4ec8438a71cc9c0866c97e72e2e82990e0bd0e5418132a39b4
def read(self, href): 'Retrieves a single instance of controllerAuxiliaryConnectionLearnedInfo data from the server.\n\n Args\n ----\n - href (str): An href to the instance to be retrieved\n\n Returns\n -------\n - self: This instance with the controllerAuxiliaryConnectionLearnedInfo resources from the server available through an iterator or index\n\n Raises\n ------\n - NotFoundError: The requested resource does not exist on the server\n - ServerError: The server has encountered an uncategorized error condition\n ' return self._read(href)
Retrieves a single instance of controllerAuxiliaryConnectionLearnedInfo data from the server. Args ---- - href (str): An href to the instance to be retrieved Returns ------- - self: This instance with the controllerAuxiliaryConnectionLearnedInfo resources from the server available through an iterator or index Raises ------ - NotFoundError: The requested resource does not exist on the server - ServerError: The server has encountered an uncategorized error condition
ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/controllerauxiliaryconnectionlearnedinfo_1c2f8f11bff25ef8bd7fd65ee1072e9b.py
read
slieberth/ixnetwork_restpy
0
python
def read(self, href): 'Retrieves a single instance of controllerAuxiliaryConnectionLearnedInfo data from the server.\n\n Args\n ----\n - href (str): An href to the instance to be retrieved\n\n Returns\n -------\n - self: This instance with the controllerAuxiliaryConnectionLearnedInfo resources from the server available through an iterator or index\n\n Raises\n ------\n - NotFoundError: The requested resource does not exist on the server\n - ServerError: The server has encountered an uncategorized error condition\n ' return self._read(href)
def read(self, href): 'Retrieves a single instance of controllerAuxiliaryConnectionLearnedInfo data from the server.\n\n Args\n ----\n - href (str): An href to the instance to be retrieved\n\n Returns\n -------\n - self: This instance with the controllerAuxiliaryConnectionLearnedInfo resources from the server available through an iterator or index\n\n Raises\n ------\n - NotFoundError: The requested resource does not exist on the server\n - ServerError: The server has encountered an uncategorized error condition\n ' return self._read(href)<|docstring|>Retrieves a single instance of controllerAuxiliaryConnectionLearnedInfo data from the server. Args ---- - href (str): An href to the instance to be retrieved Returns ------- - self: This instance with the controllerAuxiliaryConnectionLearnedInfo resources from the server available through an iterator or index Raises ------ - NotFoundError: The requested resource does not exist on the server - ServerError: The server has encountered an uncategorized error condition<|endoftext|>
a81b413ddd41cec247151aefceaaef37ef175c23bb2ada961d531974330ed91f
def to_string(self): ' Return object as string ' url = self.scheme if self.user: if self.password: url += ('://%s@%s' % (self.user, self.password)) else: url += '://' url += self.netloc if self.port: url += (':%d' % self.port) else: url += ':80' url += ('%s' % self.path) return url
Return object as string
apps/orgapp/app/models.py
to_string
dorneanu/crudappify
1
python
def to_string(self): ' ' url = self.scheme if self.user: if self.password: url += ('://%s@%s' % (self.user, self.password)) else: url += '://' url += self.netloc if self.port: url += (':%d' % self.port) else: url += ':80' url += ('%s' % self.path) return url
def to_string(self): ' ' url = self.scheme if self.user: if self.password: url += ('://%s@%s' % (self.user, self.password)) else: url += '://' url += self.netloc if self.port: url += (':%d' % self.port) else: url += ':80' url += ('%s' % self.path) return url<|docstring|>Return object as string<|endoftext|>
e00feb417a93d5f095f02d25e9aeecaed93ecebdf6314abbba89dfb04e451806
def load_data(database_filepath): "\n Load data from the specified database\n\n Args:\n database_filepath: string. A relative path to the database file\n\n Returns:\n X: Array of features data which is data in the 'message' column\n y: Array of labels data which is the 36 categories in the dataset\n category_names: List of category names corresponding to columns of y\n " engine = create_engine('sqlite:///{}'.format(database_filepath)) df = pd.read_sql_table('InsertTableName', engine) X = np.array(df['message']) cat_values = df.drop(['id', 'message', 'original', 'genre'], axis=1) if pd.__version__.startswith('0.24'): Y = cat_values.to_numpy() else: Y = cat_values.values category_names = cat_values.columns.tolist() return (X, Y, category_names)
Load data from the specified database Args: database_filepath: string. A relative path to the database file Returns: X: Array of features data which is data in the 'message' column y: Array of labels data which is the 36 categories in the dataset category_names: List of category names corresponding to columns of y
models/train_classifier.py
load_data
nongnoochr/diaster-response-app
0
python
def load_data(database_filepath): "\n Load data from the specified database\n\n Args:\n database_filepath: string. A relative path to the database file\n\n Returns:\n X: Array of features data which is data in the 'message' column\n y: Array of labels data which is the 36 categories in the dataset\n category_names: List of category names corresponding to columns of y\n " engine = create_engine('sqlite:///{}'.format(database_filepath)) df = pd.read_sql_table('InsertTableName', engine) X = np.array(df['message']) cat_values = df.drop(['id', 'message', 'original', 'genre'], axis=1) if pd.__version__.startswith('0.24'): Y = cat_values.to_numpy() else: Y = cat_values.values category_names = cat_values.columns.tolist() return (X, Y, category_names)
def load_data(database_filepath): "\n Load data from the specified database\n\n Args:\n database_filepath: string. A relative path to the database file\n\n Returns:\n X: Array of features data which is data in the 'message' column\n y: Array of labels data which is the 36 categories in the dataset\n category_names: List of category names corresponding to columns of y\n " engine = create_engine('sqlite:///{}'.format(database_filepath)) df = pd.read_sql_table('InsertTableName', engine) X = np.array(df['message']) cat_values = df.drop(['id', 'message', 'original', 'genre'], axis=1) if pd.__version__.startswith('0.24'): Y = cat_values.to_numpy() else: Y = cat_values.values category_names = cat_values.columns.tolist() return (X, Y, category_names)<|docstring|>Load data from the specified database Args: database_filepath: string. A relative path to the database file Returns: X: Array of features data which is data in the 'message' column y: Array of labels data which is the 36 categories in the dataset category_names: List of category names corresponding to columns of y<|endoftext|>
e65cf0669b7cf9111387975ed8163250ee8bd49dc217fa9286aba57046477890
def get_wordnet_pos(tag): ' \n Get a TreeBank tag from the specified WordNet part of speech name\n\n Args:\n tag: string. WordNet part of speech name.\n\n Returns:\n A corresponding TreeBank tag\n ' treebank_tag = '' if tag.startswith('J'): treebank_tag = wordnet.ADJ elif tag.startswith('V'): treebank_tag = wordnet.VERB elif tag.startswith('N'): treebank_tag = wordnet.NOUN elif tag.startswith('R'): treebank_tag = wordnet.ADV else: treebank_tag = wordnet.NOUN return treebank_tag
Get a TreeBank tag from the specified WordNet part of speech name Args: tag: string. WordNet part of speech name. Returns: A corresponding TreeBank tag
models/train_classifier.py
get_wordnet_pos
nongnoochr/diaster-response-app
0
python
def get_wordnet_pos(tag): ' \n Get a TreeBank tag from the specified WordNet part of speech name\n\n Args:\n tag: string. WordNet part of speech name.\n\n Returns:\n A corresponding TreeBank tag\n ' treebank_tag = if tag.startswith('J'): treebank_tag = wordnet.ADJ elif tag.startswith('V'): treebank_tag = wordnet.VERB elif tag.startswith('N'): treebank_tag = wordnet.NOUN elif tag.startswith('R'): treebank_tag = wordnet.ADV else: treebank_tag = wordnet.NOUN return treebank_tag
def get_wordnet_pos(tag): ' \n Get a TreeBank tag from the specified WordNet part of speech name\n\n Args:\n tag: string. WordNet part of speech name.\n\n Returns:\n A corresponding TreeBank tag\n ' treebank_tag = if tag.startswith('J'): treebank_tag = wordnet.ADJ elif tag.startswith('V'): treebank_tag = wordnet.VERB elif tag.startswith('N'): treebank_tag = wordnet.NOUN elif tag.startswith('R'): treebank_tag = wordnet.ADV else: treebank_tag = wordnet.NOUN return treebank_tag<|docstring|>Get a TreeBank tag from the specified WordNet part of speech name Args: tag: string. WordNet part of speech name. Returns: A corresponding TreeBank tag<|endoftext|>
ec66bcc257954739476efb560111d068b628ca001aff95556c4f8f8e328a4c36
def tokenize(text): '\n Perform a tokenization process on the input text\n\n Args:\n text: string. A message to be tokenized\n\n Returns:\n clean_tokens\n ' text = text.lower() text = re.sub('[^a-zA-Z0-9]', ' ', text) words = word_tokenize(text) words = [w for w in words if (w not in stopwords.words('english'))] pv_tags = pos_tag(words) lemmatizer = WordNetLemmatizer() clean_tokens = [] for cur_tag in pv_tags: cur_text = cur_tag[0] w_tag = get_wordnet_pos(cur_tag[1]) clean_tok = lemmatizer.lemmatize(cur_text, w_tag) clean_tokens.append(clean_tok) return clean_tokens
Perform a tokenization process on the input text Args: text: string. A message to be tokenized Returns: clean_tokens
models/train_classifier.py
tokenize
nongnoochr/diaster-response-app
0
python
def tokenize(text): '\n Perform a tokenization process on the input text\n\n Args:\n text: string. A message to be tokenized\n\n Returns:\n clean_tokens\n ' text = text.lower() text = re.sub('[^a-zA-Z0-9]', ' ', text) words = word_tokenize(text) words = [w for w in words if (w not in stopwords.words('english'))] pv_tags = pos_tag(words) lemmatizer = WordNetLemmatizer() clean_tokens = [] for cur_tag in pv_tags: cur_text = cur_tag[0] w_tag = get_wordnet_pos(cur_tag[1]) clean_tok = lemmatizer.lemmatize(cur_text, w_tag) clean_tokens.append(clean_tok) return clean_tokens
def tokenize(text): '\n Perform a tokenization process on the input text\n\n Args:\n text: string. A message to be tokenized\n\n Returns:\n clean_tokens\n ' text = text.lower() text = re.sub('[^a-zA-Z0-9]', ' ', text) words = word_tokenize(text) words = [w for w in words if (w not in stopwords.words('english'))] pv_tags = pos_tag(words) lemmatizer = WordNetLemmatizer() clean_tokens = [] for cur_tag in pv_tags: cur_text = cur_tag[0] w_tag = get_wordnet_pos(cur_tag[1]) clean_tok = lemmatizer.lemmatize(cur_text, w_tag) clean_tokens.append(clean_tok) return clean_tokens<|docstring|>Perform a tokenization process on the input text Args: text: string. A message to be tokenized Returns: clean_tokens<|endoftext|>
d9bb12308b321eb0c8cab317ec110ecc12b80045d779047f5a14d11dd98eeb28
def build_model(): '\n Create a GridsearchCV object of a pipeline where the MultiOutputClassifier \n is used along with the RandomForestClassifier as an estimator\n\n Returns:\n model: GridsearchCV object.\n ' pipeline = Pipeline([('vect', CountVectorizer(tokenizer=tokenize)), ('tfidf', TfidfTransformer()), ('clf', MultiOutputClassifier(RandomForestClassifier(random_state=1, n_jobs=(- 1))))]) parameters = {'clf__estimator__min_samples_leaf': [2, 5], 'clf__estimator__n_estimators': [10, 30]} model = GridSearchCV(pipeline, param_grid=parameters, cv=5) return model
Create a GridsearchCV object of a pipeline where the MultiOutputClassifier is used along with the RandomForestClassifier as an estimator Returns: model: GridsearchCV object.
models/train_classifier.py
build_model
nongnoochr/diaster-response-app
0
python
def build_model(): '\n Create a GridsearchCV object of a pipeline where the MultiOutputClassifier \n is used along with the RandomForestClassifier as an estimator\n\n Returns:\n model: GridsearchCV object.\n ' pipeline = Pipeline([('vect', CountVectorizer(tokenizer=tokenize)), ('tfidf', TfidfTransformer()), ('clf', MultiOutputClassifier(RandomForestClassifier(random_state=1, n_jobs=(- 1))))]) parameters = {'clf__estimator__min_samples_leaf': [2, 5], 'clf__estimator__n_estimators': [10, 30]} model = GridSearchCV(pipeline, param_grid=parameters, cv=5) return model
def build_model(): '\n Create a GridsearchCV object of a pipeline where the MultiOutputClassifier \n is used along with the RandomForestClassifier as an estimator\n\n Returns:\n model: GridsearchCV object.\n ' pipeline = Pipeline([('vect', CountVectorizer(tokenizer=tokenize)), ('tfidf', TfidfTransformer()), ('clf', MultiOutputClassifier(RandomForestClassifier(random_state=1, n_jobs=(- 1))))]) parameters = {'clf__estimator__min_samples_leaf': [2, 5], 'clf__estimator__n_estimators': [10, 30]} model = GridSearchCV(pipeline, param_grid=parameters, cv=5) return model<|docstring|>Create a GridsearchCV object of a pipeline where the MultiOutputClassifier is used along with the RandomForestClassifier as an estimator Returns: model: GridsearchCV object.<|endoftext|>
05fde6d985ed9c85dad3277805aa4d25260bf7132654c40dad0aaf6bcdbbf653
def evaluate_model(model, X_test, Y_test, category_names): "\n Run the predict method of the specified model with a given input data and\n print out the best parameter found by the model (GridSearchCV object) and also\n report the f1 score, precision and recall for each output category of the dataset\n\n Args:\n model: A trained GridSearchCV object\n X_Test: Array of feature's test data\n Y_Test: Array of label's test data\n category_names: List of category names corresponding to each column in Y_Test\n " print('{} : Start model.predict'.format(datetime.datetime.now())) Y_pred = model.predict(X_test) print('{} : Finish model.predict'.format(datetime.datetime.now())) print('Best parameters:\n{}'.format(model.best_params_)) for (index, col_name) in enumerate(category_names): print('Column#{} - {}'.format(index, col_name)) print(classification_report(Y_test[(:, index)], Y_pred[(:, index)]))
Run the predict method of the specified model with a given input data and print out the best parameter found by the model (GridSearchCV object) and also report the f1 score, precision and recall for each output category of the dataset Args: model: A trained GridSearchCV object X_Test: Array of feature's test data Y_Test: Array of label's test data category_names: List of category names corresponding to each column in Y_Test
models/train_classifier.py
evaluate_model
nongnoochr/diaster-response-app
0
python
def evaluate_model(model, X_test, Y_test, category_names): "\n Run the predict method of the specified model with a given input data and\n print out the best parameter found by the model (GridSearchCV object) and also\n report the f1 score, precision and recall for each output category of the dataset\n\n Args:\n model: A trained GridSearchCV object\n X_Test: Array of feature's test data\n Y_Test: Array of label's test data\n category_names: List of category names corresponding to each column in Y_Test\n " print('{} : Start model.predict'.format(datetime.datetime.now())) Y_pred = model.predict(X_test) print('{} : Finish model.predict'.format(datetime.datetime.now())) print('Best parameters:\n{}'.format(model.best_params_)) for (index, col_name) in enumerate(category_names): print('Column#{} - {}'.format(index, col_name)) print(classification_report(Y_test[(:, index)], Y_pred[(:, index)]))
def evaluate_model(model, X_test, Y_test, category_names): "\n Run the predict method of the specified model with a given input data and\n print out the best parameter found by the model (GridSearchCV object) and also\n report the f1 score, precision and recall for each output category of the dataset\n\n Args:\n model: A trained GridSearchCV object\n X_Test: Array of feature's test data\n Y_Test: Array of label's test data\n category_names: List of category names corresponding to each column in Y_Test\n " print('{} : Start model.predict'.format(datetime.datetime.now())) Y_pred = model.predict(X_test) print('{} : Finish model.predict'.format(datetime.datetime.now())) print('Best parameters:\n{}'.format(model.best_params_)) for (index, col_name) in enumerate(category_names): print('Column#{} - {}'.format(index, col_name)) print(classification_report(Y_test[(:, index)], Y_pred[(:, index)]))<|docstring|>Run the predict method of the specified model with a given input data and print out the best parameter found by the model (GridSearchCV object) and also report the f1 score, precision and recall for each output category of the dataset Args: model: A trained GridSearchCV object X_Test: Array of feature's test data Y_Test: Array of label's test data category_names: List of category names corresponding to each column in Y_Test<|endoftext|>
44145b7d17f229bcbcf966e1175040bafade619ecdd4d3d03ddeb8d20c79074b
def save_model(model, model_filepath): '\n Save a model to a pickle file at the speicified file path\n\n Args:\n model: A model object\n model_filepath: A relative path of the output file path\n ' with open(model_filepath, 'wb') as file: pickle.dump(model, file)
Save a model to a pickle file at the speicified file path Args: model: A model object model_filepath: A relative path of the output file path
models/train_classifier.py
save_model
nongnoochr/diaster-response-app
0
python
def save_model(model, model_filepath): '\n Save a model to a pickle file at the speicified file path\n\n Args:\n model: A model object\n model_filepath: A relative path of the output file path\n ' with open(model_filepath, 'wb') as file: pickle.dump(model, file)
def save_model(model, model_filepath): '\n Save a model to a pickle file at the speicified file path\n\n Args:\n model: A model object\n model_filepath: A relative path of the output file path\n ' with open(model_filepath, 'wb') as file: pickle.dump(model, file)<|docstring|>Save a model to a pickle file at the speicified file path Args: model: A model object model_filepath: A relative path of the output file path<|endoftext|>
701edf4c27ca473c3dbe243ce87bb069255462a81a9f6238e78d5b00b5e59b85
def run_update(ydl): '\n Update the program file with the latest version from the repository\n Returns whether the program should terminate\n ' JSON_URL = 'https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest' def report_error(msg, expected=False): ydl.report_error(msg, tb=('' if expected else None)) def report_unable(action, expected=False): report_error(f'Unable to {action}', expected) def report_permission_error(file): report_unable(f'write to {file}; Try running as administrator', True) def report_network_error(action, delim=';'): report_unable(f'{action}{delim} Visit https://github.com/yt-dlp/yt-dlp/releases/latest', True) def calc_sha256sum(path): h = hashlib.sha256() b = bytearray((128 * 1024)) mv = memoryview(b) with open(os.path.realpath(path), 'rb', buffering=0) as f: for n in iter((lambda : f.readinto(mv)), 0): h.update(mv[:n]) return h.hexdigest() try: version_info = ydl._opener.open(JSON_URL).read().decode('utf-8') version_info = json.loads(version_info) except Exception: return report_network_error('obtain version info', delim='; Please try again later or') def version_tuple(version_str): return tuple(map(int, version_str.split('.'))) version_id = version_info['tag_name'] ydl.to_screen(f'Latest version: {version_id}, Current version: {__version__}') if (version_tuple(__version__) >= version_tuple(version_id)): ydl.to_screen(f'yt-dlp is up to date ({__version__})') return err = is_non_updateable() if err: return report_error(err, True) filename = compat_realpath((sys.executable if hasattr(sys, 'frozen') else sys.argv[0])) ydl.to_screen(f'Current Build Hash {calc_sha256sum(filename)}') ydl.to_screen(f'Updating to version {version_id} ...') version_labels = {'zip_3': '', 'win_exe_64': '.exe', 'py2exe_64': '_min.exe', 'win_exe_32': '_x86.exe', 'mac_exe_64': '_macos'} def get_bin_info(bin_or_exe, version): label = version_labels[('%s_%s' % (bin_or_exe, version))] return next((i for i in version_info['assets'] if (i['name'] == ('yt-dlp%s' % label))), {}) def get_sha256sum(bin_or_exe, version): filename = ('yt-dlp%s' % version_labels[('%s_%s' % (bin_or_exe, version))]) urlh = next((i for i in version_info['assets'] if (i['name'] in 'SHA2-256SUMS')), {}).get('browser_download_url') if (not urlh): return None hash_data = ydl._opener.open(urlh).read().decode('utf-8') return dict((ln.split()[::(- 1)] for ln in hash_data.splitlines())).get(filename) if (not os.access(filename, os.W_OK)): return report_permission_error(filename) variant = detect_variant() if (variant in ('win_exe', 'py2exe')): directory = os.path.dirname(filename) if (not os.access(directory, os.W_OK)): return report_permission_error(directory) try: if os.path.exists((filename + '.old')): os.remove((filename + '.old')) except (IOError, OSError): return report_unable('remove the old version') try: arch = platform.architecture()[0][:2] url = get_bin_info(variant, arch).get('browser_download_url') if (not url): return report_network_error('fetch updates') urlh = ydl._opener.open(url) newcontent = urlh.read() urlh.close() except (IOError, OSError): return report_network_error('download latest version') try: with open((filename + '.new'), 'wb') as outf: outf.write(newcontent) except (IOError, OSError): return report_permission_error(f'{filename}.new') expected_sum = get_sha256sum(variant, arch) if (not expected_sum): ydl.report_warning('no hash information found for the release') elif (calc_sha256sum((filename + '.new')) != expected_sum): report_network_error('verify the new executable') try: os.remove((filename + '.new')) except OSError: return report_unable('remove corrupt download') try: os.rename(filename, (filename + '.old')) except (IOError, OSError): return report_unable('move current version') try: os.rename((filename + '.new'), filename) except (IOError, OSError): report_unable('overwrite current version') os.rename((filename + '.old'), filename) return try: Popen(('ping 127.0.0.1 -n 5 -w 1000 & del /F "%s.old"' % filename), shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) ydl.to_screen(('Updated yt-dlp to version %s' % version_id)) return True except OSError: report_unable('delete the old version') elif (variant in ('zip', 'mac_exe')): pack_type = ('3' if (variant == 'zip') else '64') try: url = get_bin_info(variant, pack_type).get('browser_download_url') if (not url): return report_network_error('fetch updates') urlh = ydl._opener.open(url) newcontent = urlh.read() urlh.close() except (IOError, OSError): return report_network_error('download the latest version') expected_sum = get_sha256sum(variant, pack_type) if (not expected_sum): ydl.report_warning('no hash information found for the release') elif (hashlib.sha256(newcontent).hexdigest() != expected_sum): return report_network_error('verify the new package') try: with open(filename, 'wb') as outf: outf.write(newcontent) except (IOError, OSError): return report_unable('overwrite current version') ydl.to_screen(('Updated yt-dlp to version %s; Restart yt-dlp to use the new version' % version_id)) return assert False, f'Unhandled variant: {variant}'
Update the program file with the latest version from the repository Returns whether the program should terminate
yt_dlp/update.py
run_update
wlritchi/yt-dlp
64
python
def run_update(ydl): '\n Update the program file with the latest version from the repository\n Returns whether the program should terminate\n ' JSON_URL = 'https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest' def report_error(msg, expected=False): ydl.report_error(msg, tb=( if expected else None)) def report_unable(action, expected=False): report_error(f'Unable to {action}', expected) def report_permission_error(file): report_unable(f'write to {file}; Try running as administrator', True) def report_network_error(action, delim=';'): report_unable(f'{action}{delim} Visit https://github.com/yt-dlp/yt-dlp/releases/latest', True) def calc_sha256sum(path): h = hashlib.sha256() b = bytearray((128 * 1024)) mv = memoryview(b) with open(os.path.realpath(path), 'rb', buffering=0) as f: for n in iter((lambda : f.readinto(mv)), 0): h.update(mv[:n]) return h.hexdigest() try: version_info = ydl._opener.open(JSON_URL).read().decode('utf-8') version_info = json.loads(version_info) except Exception: return report_network_error('obtain version info', delim='; Please try again later or') def version_tuple(version_str): return tuple(map(int, version_str.split('.'))) version_id = version_info['tag_name'] ydl.to_screen(f'Latest version: {version_id}, Current version: {__version__}') if (version_tuple(__version__) >= version_tuple(version_id)): ydl.to_screen(f'yt-dlp is up to date ({__version__})') return err = is_non_updateable() if err: return report_error(err, True) filename = compat_realpath((sys.executable if hasattr(sys, 'frozen') else sys.argv[0])) ydl.to_screen(f'Current Build Hash {calc_sha256sum(filename)}') ydl.to_screen(f'Updating to version {version_id} ...') version_labels = {'zip_3': , 'win_exe_64': '.exe', 'py2exe_64': '_min.exe', 'win_exe_32': '_x86.exe', 'mac_exe_64': '_macos'} def get_bin_info(bin_or_exe, version): label = version_labels[('%s_%s' % (bin_or_exe, version))] return next((i for i in version_info['assets'] if (i['name'] == ('yt-dlp%s' % label))), {}) def get_sha256sum(bin_or_exe, version): filename = ('yt-dlp%s' % version_labels[('%s_%s' % (bin_or_exe, version))]) urlh = next((i for i in version_info['assets'] if (i['name'] in 'SHA2-256SUMS')), {}).get('browser_download_url') if (not urlh): return None hash_data = ydl._opener.open(urlh).read().decode('utf-8') return dict((ln.split()[::(- 1)] for ln in hash_data.splitlines())).get(filename) if (not os.access(filename, os.W_OK)): return report_permission_error(filename) variant = detect_variant() if (variant in ('win_exe', 'py2exe')): directory = os.path.dirname(filename) if (not os.access(directory, os.W_OK)): return report_permission_error(directory) try: if os.path.exists((filename + '.old')): os.remove((filename + '.old')) except (IOError, OSError): return report_unable('remove the old version') try: arch = platform.architecture()[0][:2] url = get_bin_info(variant, arch).get('browser_download_url') if (not url): return report_network_error('fetch updates') urlh = ydl._opener.open(url) newcontent = urlh.read() urlh.close() except (IOError, OSError): return report_network_error('download latest version') try: with open((filename + '.new'), 'wb') as outf: outf.write(newcontent) except (IOError, OSError): return report_permission_error(f'{filename}.new') expected_sum = get_sha256sum(variant, arch) if (not expected_sum): ydl.report_warning('no hash information found for the release') elif (calc_sha256sum((filename + '.new')) != expected_sum): report_network_error('verify the new executable') try: os.remove((filename + '.new')) except OSError: return report_unable('remove corrupt download') try: os.rename(filename, (filename + '.old')) except (IOError, OSError): return report_unable('move current version') try: os.rename((filename + '.new'), filename) except (IOError, OSError): report_unable('overwrite current version') os.rename((filename + '.old'), filename) return try: Popen(('ping 127.0.0.1 -n 5 -w 1000 & del /F "%s.old"' % filename), shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) ydl.to_screen(('Updated yt-dlp to version %s' % version_id)) return True except OSError: report_unable('delete the old version') elif (variant in ('zip', 'mac_exe')): pack_type = ('3' if (variant == 'zip') else '64') try: url = get_bin_info(variant, pack_type).get('browser_download_url') if (not url): return report_network_error('fetch updates') urlh = ydl._opener.open(url) newcontent = urlh.read() urlh.close() except (IOError, OSError): return report_network_error('download the latest version') expected_sum = get_sha256sum(variant, pack_type) if (not expected_sum): ydl.report_warning('no hash information found for the release') elif (hashlib.sha256(newcontent).hexdigest() != expected_sum): return report_network_error('verify the new package') try: with open(filename, 'wb') as outf: outf.write(newcontent) except (IOError, OSError): return report_unable('overwrite current version') ydl.to_screen(('Updated yt-dlp to version %s; Restart yt-dlp to use the new version' % version_id)) return assert False, f'Unhandled variant: {variant}'
def run_update(ydl): '\n Update the program file with the latest version from the repository\n Returns whether the program should terminate\n ' JSON_URL = 'https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest' def report_error(msg, expected=False): ydl.report_error(msg, tb=( if expected else None)) def report_unable(action, expected=False): report_error(f'Unable to {action}', expected) def report_permission_error(file): report_unable(f'write to {file}; Try running as administrator', True) def report_network_error(action, delim=';'): report_unable(f'{action}{delim} Visit https://github.com/yt-dlp/yt-dlp/releases/latest', True) def calc_sha256sum(path): h = hashlib.sha256() b = bytearray((128 * 1024)) mv = memoryview(b) with open(os.path.realpath(path), 'rb', buffering=0) as f: for n in iter((lambda : f.readinto(mv)), 0): h.update(mv[:n]) return h.hexdigest() try: version_info = ydl._opener.open(JSON_URL).read().decode('utf-8') version_info = json.loads(version_info) except Exception: return report_network_error('obtain version info', delim='; Please try again later or') def version_tuple(version_str): return tuple(map(int, version_str.split('.'))) version_id = version_info['tag_name'] ydl.to_screen(f'Latest version: {version_id}, Current version: {__version__}') if (version_tuple(__version__) >= version_tuple(version_id)): ydl.to_screen(f'yt-dlp is up to date ({__version__})') return err = is_non_updateable() if err: return report_error(err, True) filename = compat_realpath((sys.executable if hasattr(sys, 'frozen') else sys.argv[0])) ydl.to_screen(f'Current Build Hash {calc_sha256sum(filename)}') ydl.to_screen(f'Updating to version {version_id} ...') version_labels = {'zip_3': , 'win_exe_64': '.exe', 'py2exe_64': '_min.exe', 'win_exe_32': '_x86.exe', 'mac_exe_64': '_macos'} def get_bin_info(bin_or_exe, version): label = version_labels[('%s_%s' % (bin_or_exe, version))] return next((i for i in version_info['assets'] if (i['name'] == ('yt-dlp%s' % label))), {}) def get_sha256sum(bin_or_exe, version): filename = ('yt-dlp%s' % version_labels[('%s_%s' % (bin_or_exe, version))]) urlh = next((i for i in version_info['assets'] if (i['name'] in 'SHA2-256SUMS')), {}).get('browser_download_url') if (not urlh): return None hash_data = ydl._opener.open(urlh).read().decode('utf-8') return dict((ln.split()[::(- 1)] for ln in hash_data.splitlines())).get(filename) if (not os.access(filename, os.W_OK)): return report_permission_error(filename) variant = detect_variant() if (variant in ('win_exe', 'py2exe')): directory = os.path.dirname(filename) if (not os.access(directory, os.W_OK)): return report_permission_error(directory) try: if os.path.exists((filename + '.old')): os.remove((filename + '.old')) except (IOError, OSError): return report_unable('remove the old version') try: arch = platform.architecture()[0][:2] url = get_bin_info(variant, arch).get('browser_download_url') if (not url): return report_network_error('fetch updates') urlh = ydl._opener.open(url) newcontent = urlh.read() urlh.close() except (IOError, OSError): return report_network_error('download latest version') try: with open((filename + '.new'), 'wb') as outf: outf.write(newcontent) except (IOError, OSError): return report_permission_error(f'{filename}.new') expected_sum = get_sha256sum(variant, arch) if (not expected_sum): ydl.report_warning('no hash information found for the release') elif (calc_sha256sum((filename + '.new')) != expected_sum): report_network_error('verify the new executable') try: os.remove((filename + '.new')) except OSError: return report_unable('remove corrupt download') try: os.rename(filename, (filename + '.old')) except (IOError, OSError): return report_unable('move current version') try: os.rename((filename + '.new'), filename) except (IOError, OSError): report_unable('overwrite current version') os.rename((filename + '.old'), filename) return try: Popen(('ping 127.0.0.1 -n 5 -w 1000 & del /F "%s.old"' % filename), shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) ydl.to_screen(('Updated yt-dlp to version %s' % version_id)) return True except OSError: report_unable('delete the old version') elif (variant in ('zip', 'mac_exe')): pack_type = ('3' if (variant == 'zip') else '64') try: url = get_bin_info(variant, pack_type).get('browser_download_url') if (not url): return report_network_error('fetch updates') urlh = ydl._opener.open(url) newcontent = urlh.read() urlh.close() except (IOError, OSError): return report_network_error('download the latest version') expected_sum = get_sha256sum(variant, pack_type) if (not expected_sum): ydl.report_warning('no hash information found for the release') elif (hashlib.sha256(newcontent).hexdigest() != expected_sum): return report_network_error('verify the new package') try: with open(filename, 'wb') as outf: outf.write(newcontent) except (IOError, OSError): return report_unable('overwrite current version') ydl.to_screen(('Updated yt-dlp to version %s; Restart yt-dlp to use the new version' % version_id)) return assert False, f'Unhandled variant: {variant}'<|docstring|>Update the program file with the latest version from the repository Returns whether the program should terminate<|endoftext|>
c846159964fca60e8619abcad7e150d6ffa91e4e6c86eb1b89290165c28ae2e3
def escape_tags(value, valid_tags): '\n Strips text from the given html string, leaving only tags.\n This functionality requires BeautifulSoup, nothing will be\n done otherwise.\n\n This isn\'t perfect. Someone could put javascript in here:\n <a onClick="alert(\'hi\');">test</a>\n\n So if you use valid_tags, you still need to trust your data entry.\n Or we could try:\n - only escape the non matching bits\n - use BeautifulSoup to understand the elements, escape everything\n else and remove potentially harmful attributes (onClick).\n - Remove this feature entirely. Half-escaping things securely is\n very difficult, developers should not be lured into a false\n sense of security.\n ' value = conditional_escape(value) if valid_tags: tag_re = re.compile(('&lt;(\\s*/?\\s*(%s))(.*?\\s*)&gt;' % '|'.join((re.escape(tag) for tag in valid_tags)))) value = tag_re.sub(_replace_quot, value) value = value.replace('&lt;!--', '<!--').replace('--&gt;', '-->') return mark_safe(value)
Strips text from the given html string, leaving only tags. This functionality requires BeautifulSoup, nothing will be done otherwise. This isn't perfect. Someone could put javascript in here: <a onClick="alert('hi');">test</a> So if you use valid_tags, you still need to trust your data entry. Or we could try: - only escape the non matching bits - use BeautifulSoup to understand the elements, escape everything else and remove potentially harmful attributes (onClick). - Remove this feature entirely. Half-escaping things securely is very difficult, developers should not be lured into a false sense of security.
djangoseo/utils.py
escape_tags
adilshehzad786/django-seo2
66
python
def escape_tags(value, valid_tags): '\n Strips text from the given html string, leaving only tags.\n This functionality requires BeautifulSoup, nothing will be\n done otherwise.\n\n This isn\'t perfect. Someone could put javascript in here:\n <a onClick="alert(\'hi\');">test</a>\n\n So if you use valid_tags, you still need to trust your data entry.\n Or we could try:\n - only escape the non matching bits\n - use BeautifulSoup to understand the elements, escape everything\n else and remove potentially harmful attributes (onClick).\n - Remove this feature entirely. Half-escaping things securely is\n very difficult, developers should not be lured into a false\n sense of security.\n ' value = conditional_escape(value) if valid_tags: tag_re = re.compile(('&lt;(\\s*/?\\s*(%s))(.*?\\s*)&gt;' % '|'.join((re.escape(tag) for tag in valid_tags)))) value = tag_re.sub(_replace_quot, value) value = value.replace('&lt;!--', '<!--').replace('--&gt;', '-->') return mark_safe(value)
def escape_tags(value, valid_tags): '\n Strips text from the given html string, leaving only tags.\n This functionality requires BeautifulSoup, nothing will be\n done otherwise.\n\n This isn\'t perfect. Someone could put javascript in here:\n <a onClick="alert(\'hi\');">test</a>\n\n So if you use valid_tags, you still need to trust your data entry.\n Or we could try:\n - only escape the non matching bits\n - use BeautifulSoup to understand the elements, escape everything\n else and remove potentially harmful attributes (onClick).\n - Remove this feature entirely. Half-escaping things securely is\n very difficult, developers should not be lured into a false\n sense of security.\n ' value = conditional_escape(value) if valid_tags: tag_re = re.compile(('&lt;(\\s*/?\\s*(%s))(.*?\\s*)&gt;' % '|'.join((re.escape(tag) for tag in valid_tags)))) value = tag_re.sub(_replace_quot, value) value = value.replace('&lt;!--', '<!--').replace('--&gt;', '-->') return mark_safe(value)<|docstring|>Strips text from the given html string, leaving only tags. This functionality requires BeautifulSoup, nothing will be done otherwise. This isn't perfect. Someone could put javascript in here: <a onClick="alert('hi');">test</a> So if you use valid_tags, you still need to trust your data entry. Or we could try: - only escape the non matching bits - use BeautifulSoup to understand the elements, escape everything else and remove potentially harmful attributes (onClick). - Remove this feature entirely. Half-escaping things securely is very difficult, developers should not be lured into a false sense of security.<|endoftext|>
7fc5ede4b1a5d1260f251f9c98bb114fe0de04ce620481ded00334fc7c9f4e3b
def _get_seo_content_types(seo_models): 'Returns a list of content types from the models defined in settings.' try: return [ContentType.objects.get_for_model(m).id for m in seo_models] except Exception: return []
Returns a list of content types from the models defined in settings.
djangoseo/utils.py
_get_seo_content_types
adilshehzad786/django-seo2
66
python
def _get_seo_content_types(seo_models): try: return [ContentType.objects.get_for_model(m).id for m in seo_models] except Exception: return []
def _get_seo_content_types(seo_models): try: return [ContentType.objects.get_for_model(m).id for m in seo_models] except Exception: return []<|docstring|>Returns a list of content types from the models defined in settings.<|endoftext|>
dad0551a029b244d0d5ae3f898c0dcdf06aaeb679b96dbf761a4d01fcd0f5272
def __init__(self, Lower=(- 10.0), Upper=10.0): 'Initialize of Quintic benchmark.\n\n Args:\n Lower (Optional[float]): Lower bound of problem.\n Upper (Optional[float]): Upper bound of problem.\n\n See Also:\n :func:`NiaPy.benchmarks.Benchmark.__init__`\n ' Benchmark.__init__(self, Lower, Upper)
Initialize of Quintic benchmark. Args: Lower (Optional[float]): Lower bound of problem. Upper (Optional[float]): Upper bound of problem. See Also: :func:`NiaPy.benchmarks.Benchmark.__init__`
NiaPy/benchmarks/quintic.py
__init__
lucijabrezocnik/NiaPy
0
python
def __init__(self, Lower=(- 10.0), Upper=10.0): 'Initialize of Quintic benchmark.\n\n Args:\n Lower (Optional[float]): Lower bound of problem.\n Upper (Optional[float]): Upper bound of problem.\n\n See Also:\n :func:`NiaPy.benchmarks.Benchmark.__init__`\n ' Benchmark.__init__(self, Lower, Upper)
def __init__(self, Lower=(- 10.0), Upper=10.0): 'Initialize of Quintic benchmark.\n\n Args:\n Lower (Optional[float]): Lower bound of problem.\n Upper (Optional[float]): Upper bound of problem.\n\n See Also:\n :func:`NiaPy.benchmarks.Benchmark.__init__`\n ' Benchmark.__init__(self, Lower, Upper)<|docstring|>Initialize of Quintic benchmark. Args: Lower (Optional[float]): Lower bound of problem. Upper (Optional[float]): Upper bound of problem. See Also: :func:`NiaPy.benchmarks.Benchmark.__init__`<|endoftext|>
7c125cdd7e7a9d612f575ba81ebb9e273042e1c9ac72b9717ce7dd148c02bf1c
@staticmethod def latex_code(): 'Return the latex code of the problem.\n\n Returns:\n str: Latex code\n ' return '$f(\\mathbf{x}) = \\sum_{i=1}^D \\left| x_i^5 - 3x_i^4 +\n 4x_i^3 + 2x_i^2 - 10x_i - 4\\right|$'
Return the latex code of the problem. Returns: str: Latex code
NiaPy/benchmarks/quintic.py
latex_code
lucijabrezocnik/NiaPy
0
python
@staticmethod def latex_code(): 'Return the latex code of the problem.\n\n Returns:\n str: Latex code\n ' return '$f(\\mathbf{x}) = \\sum_{i=1}^D \\left| x_i^5 - 3x_i^4 +\n 4x_i^3 + 2x_i^2 - 10x_i - 4\\right|$'
@staticmethod def latex_code(): 'Return the latex code of the problem.\n\n Returns:\n str: Latex code\n ' return '$f(\\mathbf{x}) = \\sum_{i=1}^D \\left| x_i^5 - 3x_i^4 +\n 4x_i^3 + 2x_i^2 - 10x_i - 4\\right|$'<|docstring|>Return the latex code of the problem. Returns: str: Latex code<|endoftext|>
396e8e5145b1a11c22095d219a15dc0ed9d00098b17739d493fcb801fa96602c
def function(self): 'Return benchmark evaluation function.\n\n Returns:\n Callable[[int, Union[int, float, List[int, float], numpy.ndarray]], float]: Fitness function\n ' def evaluate(D, sol): 'Fitness function.\n\n Args:\n D (int): Dimensionality of the problem\n sol (Union[int, float, List[int, float], numpy.ndarray]): Solution to check.\n\n Returns:\n float: Fitness value for the solution.\n ' val = 0.0 for i in range(D): val += abs((((((math.pow(sol[i], 5) - (3.0 * math.pow(sol[i], 4))) + (4.0 * math.pow(sol[i], 3))) + (2.0 * math.pow(sol[i], 2))) - (10.0 * sol[i])) - 4)) return val return evaluate
Return benchmark evaluation function. Returns: Callable[[int, Union[int, float, List[int, float], numpy.ndarray]], float]: Fitness function
NiaPy/benchmarks/quintic.py
function
lucijabrezocnik/NiaPy
0
python
def function(self): 'Return benchmark evaluation function.\n\n Returns:\n Callable[[int, Union[int, float, List[int, float], numpy.ndarray]], float]: Fitness function\n ' def evaluate(D, sol): 'Fitness function.\n\n Args:\n D (int): Dimensionality of the problem\n sol (Union[int, float, List[int, float], numpy.ndarray]): Solution to check.\n\n Returns:\n float: Fitness value for the solution.\n ' val = 0.0 for i in range(D): val += abs((((((math.pow(sol[i], 5) - (3.0 * math.pow(sol[i], 4))) + (4.0 * math.pow(sol[i], 3))) + (2.0 * math.pow(sol[i], 2))) - (10.0 * sol[i])) - 4)) return val return evaluate
def function(self): 'Return benchmark evaluation function.\n\n Returns:\n Callable[[int, Union[int, float, List[int, float], numpy.ndarray]], float]: Fitness function\n ' def evaluate(D, sol): 'Fitness function.\n\n Args:\n D (int): Dimensionality of the problem\n sol (Union[int, float, List[int, float], numpy.ndarray]): Solution to check.\n\n Returns:\n float: Fitness value for the solution.\n ' val = 0.0 for i in range(D): val += abs((((((math.pow(sol[i], 5) - (3.0 * math.pow(sol[i], 4))) + (4.0 * math.pow(sol[i], 3))) + (2.0 * math.pow(sol[i], 2))) - (10.0 * sol[i])) - 4)) return val return evaluate<|docstring|>Return benchmark evaluation function. Returns: Callable[[int, Union[int, float, List[int, float], numpy.ndarray]], float]: Fitness function<|endoftext|>
4fa9f34f37ba61fb5f19b88f45cd5e3693dc2c3ba4441bb72ed36f7a7174a804
def evaluate(D, sol): 'Fitness function.\n\n Args:\n D (int): Dimensionality of the problem\n sol (Union[int, float, List[int, float], numpy.ndarray]): Solution to check.\n\n Returns:\n float: Fitness value for the solution.\n ' val = 0.0 for i in range(D): val += abs((((((math.pow(sol[i], 5) - (3.0 * math.pow(sol[i], 4))) + (4.0 * math.pow(sol[i], 3))) + (2.0 * math.pow(sol[i], 2))) - (10.0 * sol[i])) - 4)) return val
Fitness function. Args: D (int): Dimensionality of the problem sol (Union[int, float, List[int, float], numpy.ndarray]): Solution to check. Returns: float: Fitness value for the solution.
NiaPy/benchmarks/quintic.py
evaluate
lucijabrezocnik/NiaPy
0
python
def evaluate(D, sol): 'Fitness function.\n\n Args:\n D (int): Dimensionality of the problem\n sol (Union[int, float, List[int, float], numpy.ndarray]): Solution to check.\n\n Returns:\n float: Fitness value for the solution.\n ' val = 0.0 for i in range(D): val += abs((((((math.pow(sol[i], 5) - (3.0 * math.pow(sol[i], 4))) + (4.0 * math.pow(sol[i], 3))) + (2.0 * math.pow(sol[i], 2))) - (10.0 * sol[i])) - 4)) return val
def evaluate(D, sol): 'Fitness function.\n\n Args:\n D (int): Dimensionality of the problem\n sol (Union[int, float, List[int, float], numpy.ndarray]): Solution to check.\n\n Returns:\n float: Fitness value for the solution.\n ' val = 0.0 for i in range(D): val += abs((((((math.pow(sol[i], 5) - (3.0 * math.pow(sol[i], 4))) + (4.0 * math.pow(sol[i], 3))) + (2.0 * math.pow(sol[i], 2))) - (10.0 * sol[i])) - 4)) return val<|docstring|>Fitness function. Args: D (int): Dimensionality of the problem sol (Union[int, float, List[int, float], numpy.ndarray]): Solution to check. Returns: float: Fitness value for the solution.<|endoftext|>
c9332bd7166bc5a8b2a3e636499e6cbc8e9084177b3b0df8fa6e2c6eb64fe2af
def freeParameters(self): '\n Roadrunner models do not have a concept of "free" or "fixed"\n parameters (maybe it should?). Either way, we add a cheeky method\n to the tellurium interface to roadrunner to return the names\n of the parameters we want to fit\n ' return ['k1', 'k2', 'k3']
Roadrunner models do not have a concept of "free" or "fixed" parameters (maybe it should?). Either way, we add a cheeky method to the tellurium interface to roadrunner to return the names of the parameters we want to fit
sresFromMoonfit/test/RoadrunnerProblemWithRL.py
freeParameters
CiaranWelsh/SRES
1
python
def freeParameters(self): '\n Roadrunner models do not have a concept of "free" or "fixed"\n parameters (maybe it should?). Either way, we add a cheeky method\n to the tellurium interface to roadrunner to return the names\n of the parameters we want to fit\n ' return ['k1', 'k2', 'k3']
def freeParameters(self): '\n Roadrunner models do not have a concept of "free" or "fixed"\n parameters (maybe it should?). Either way, we add a cheeky method\n to the tellurium interface to roadrunner to return the names\n of the parameters we want to fit\n ' return ['k1', 'k2', 'k3']<|docstring|>Roadrunner models do not have a concept of "free" or "fixed" parameters (maybe it should?). Either way, we add a cheeky method to the tellurium interface to roadrunner to return the names of the parameters we want to fit<|endoftext|>
51c6a0667dd4eaa1878d61acd1f5affd589bdd1f8ba90740669cfc01da4cec69
@SRES.COST_FUNCTION_CALLBACK def cost_fun(parameters, fitness, constraints): "\n Brief\n -----\n Compute difference between experimental dataset and model simulation with candidate parameters.\n This cost function is user defined and used as input to the main SRES algorithm. The input\n to this function is always [parameters, fitness and constraints]. You do not need to worry\n about generating candidate parameters as they are generated by the underlying algorithm. You do\n however have to worry about updating the fitness value, which you do like this:\n\n fitness.contents.value = calculated_cost\n\n Where calculated_cost is a float computed by your function. Note, that even though\n we haven't used the constraints argument in this cost function, we still need to pass it in\n as an input parameter.\n\n Details\n -------\n The underlying SRES C code requires as input a function pointer to a cost function\n that has the following signature:\n\n typedef void(*ESfcnFG)(double *, double *, double *);\n\n We can create a cost function in Python to pass to C by using the\n :py:class:`SRES.COST_FUNCTION_CALLBACK` decorator. Since the C end is\n expecting a function with three double pointer types, we must have\n as arguments to our cost function, three arguments.\n\n When coding the cost function, you need to remember that the types of\n parameter, fitness and constraints are ctypes pointers to double\n arrays in the case of the parameter and constraints argument and\n a pointer to a double in the case of fitness. To do computation\n with these types you need the value that the pointer points to, not\n the pointer. To get these, you use:\n >>> parameters.contents[0]\n In the case of pointer to a double array or\n >>> fitness.contents.value\n in the case of a pointer to a double.\n\n Args\n ----\n parameters: A list of candidate parameters with the same size as the\n dimensionality of your defined optimization problem.\n fitness: This is the value that you must compute and assign.\n\n " (x, y, sel) = get_data(**dict(zip(r.freeParameters(), parameters.contents))) cost = np.sum(np.sum(((y - y_data) ** 2))) fitness.contents.value = cost
Brief ----- Compute difference between experimental dataset and model simulation with candidate parameters. This cost function is user defined and used as input to the main SRES algorithm. The input to this function is always [parameters, fitness and constraints]. You do not need to worry about generating candidate parameters as they are generated by the underlying algorithm. You do however have to worry about updating the fitness value, which you do like this: fitness.contents.value = calculated_cost Where calculated_cost is a float computed by your function. Note, that even though we haven't used the constraints argument in this cost function, we still need to pass it in as an input parameter. Details ------- The underlying SRES C code requires as input a function pointer to a cost function that has the following signature: typedef void(*ESfcnFG)(double *, double *, double *); We can create a cost function in Python to pass to C by using the :py:class:`SRES.COST_FUNCTION_CALLBACK` decorator. Since the C end is expecting a function with three double pointer types, we must have as arguments to our cost function, three arguments. When coding the cost function, you need to remember that the types of parameter, fitness and constraints are ctypes pointers to double arrays in the case of the parameter and constraints argument and a pointer to a double in the case of fitness. To do computation with these types you need the value that the pointer points to, not the pointer. To get these, you use: >>> parameters.contents[0] In the case of pointer to a double array or >>> fitness.contents.value in the case of a pointer to a double. Args ---- parameters: A list of candidate parameters with the same size as the dimensionality of your defined optimization problem. fitness: This is the value that you must compute and assign.
sresFromMoonfit/test/RoadrunnerProblemWithRL.py
cost_fun
CiaranWelsh/SRES
1
python
@SRES.COST_FUNCTION_CALLBACK def cost_fun(parameters, fitness, constraints): "\n Brief\n -----\n Compute difference between experimental dataset and model simulation with candidate parameters.\n This cost function is user defined and used as input to the main SRES algorithm. The input\n to this function is always [parameters, fitness and constraints]. You do not need to worry\n about generating candidate parameters as they are generated by the underlying algorithm. You do\n however have to worry about updating the fitness value, which you do like this:\n\n fitness.contents.value = calculated_cost\n\n Where calculated_cost is a float computed by your function. Note, that even though\n we haven't used the constraints argument in this cost function, we still need to pass it in\n as an input parameter.\n\n Details\n -------\n The underlying SRES C code requires as input a function pointer to a cost function\n that has the following signature:\n\n typedef void(*ESfcnFG)(double *, double *, double *);\n\n We can create a cost function in Python to pass to C by using the\n :py:class:`SRES.COST_FUNCTION_CALLBACK` decorator. Since the C end is\n expecting a function with three double pointer types, we must have\n as arguments to our cost function, three arguments.\n\n When coding the cost function, you need to remember that the types of\n parameter, fitness and constraints are ctypes pointers to double\n arrays in the case of the parameter and constraints argument and\n a pointer to a double in the case of fitness. To do computation\n with these types you need the value that the pointer points to, not\n the pointer. To get these, you use:\n >>> parameters.contents[0]\n In the case of pointer to a double array or\n >>> fitness.contents.value\n in the case of a pointer to a double.\n\n Args\n ----\n parameters: A list of candidate parameters with the same size as the\n dimensionality of your defined optimization problem.\n fitness: This is the value that you must compute and assign.\n\n " (x, y, sel) = get_data(**dict(zip(r.freeParameters(), parameters.contents))) cost = np.sum(np.sum(((y - y_data) ** 2))) fitness.contents.value = cost
@SRES.COST_FUNCTION_CALLBACK def cost_fun(parameters, fitness, constraints): "\n Brief\n -----\n Compute difference between experimental dataset and model simulation with candidate parameters.\n This cost function is user defined and used as input to the main SRES algorithm. The input\n to this function is always [parameters, fitness and constraints]. You do not need to worry\n about generating candidate parameters as they are generated by the underlying algorithm. You do\n however have to worry about updating the fitness value, which you do like this:\n\n fitness.contents.value = calculated_cost\n\n Where calculated_cost is a float computed by your function. Note, that even though\n we haven't used the constraints argument in this cost function, we still need to pass it in\n as an input parameter.\n\n Details\n -------\n The underlying SRES C code requires as input a function pointer to a cost function\n that has the following signature:\n\n typedef void(*ESfcnFG)(double *, double *, double *);\n\n We can create a cost function in Python to pass to C by using the\n :py:class:`SRES.COST_FUNCTION_CALLBACK` decorator. Since the C end is\n expecting a function with three double pointer types, we must have\n as arguments to our cost function, three arguments.\n\n When coding the cost function, you need to remember that the types of\n parameter, fitness and constraints are ctypes pointers to double\n arrays in the case of the parameter and constraints argument and\n a pointer to a double in the case of fitness. To do computation\n with these types you need the value that the pointer points to, not\n the pointer. To get these, you use:\n >>> parameters.contents[0]\n In the case of pointer to a double array or\n >>> fitness.contents.value\n in the case of a pointer to a double.\n\n Args\n ----\n parameters: A list of candidate parameters with the same size as the\n dimensionality of your defined optimization problem.\n fitness: This is the value that you must compute and assign.\n\n " (x, y, sel) = get_data(**dict(zip(r.freeParameters(), parameters.contents))) cost = np.sum(np.sum(((y - y_data) ** 2))) fitness.contents.value = cost<|docstring|>Brief ----- Compute difference between experimental dataset and model simulation with candidate parameters. This cost function is user defined and used as input to the main SRES algorithm. The input to this function is always [parameters, fitness and constraints]. You do not need to worry about generating candidate parameters as they are generated by the underlying algorithm. You do however have to worry about updating the fitness value, which you do like this: fitness.contents.value = calculated_cost Where calculated_cost is a float computed by your function. Note, that even though we haven't used the constraints argument in this cost function, we still need to pass it in as an input parameter. Details ------- The underlying SRES C code requires as input a function pointer to a cost function that has the following signature: typedef void(*ESfcnFG)(double *, double *, double *); We can create a cost function in Python to pass to C by using the :py:class:`SRES.COST_FUNCTION_CALLBACK` decorator. Since the C end is expecting a function with three double pointer types, we must have as arguments to our cost function, three arguments. When coding the cost function, you need to remember that the types of parameter, fitness and constraints are ctypes pointers to double arrays in the case of the parameter and constraints argument and a pointer to a double in the case of fitness. To do computation with these types you need the value that the pointer points to, not the pointer. To get these, you use: >>> parameters.contents[0] In the case of pointer to a double array or >>> fitness.contents.value in the case of a pointer to a double. Args ---- parameters: A list of candidate parameters with the same size as the dimensionality of your defined optimization problem. fitness: This is the value that you must compute and assign.<|endoftext|>
b7c3225891a10d859420a5e44a5cd9500d838e0d06ef9c8aed7e46ceb4d02e6d
def step(self, action: dict): "Run one timestep of the environment's dynamics. When end of\n episode is reached, you are responsible for calling `reset()`\n to reset this environment's state.\n\n Accepts an action and returns a tuple (observation, reward, done, info).\n\n Args:\n action (object): an action provided by the agent\n\n Returns:\n observation (object): agent's observation of the current environment\n reward (float) : amount of reward returned after previous action\n done (bool): whether the episode has ended, in which case further step() calls will return undefined results\n info (dict): contains auxiliary diagnostic information (helpful for debugging, and sometimes learning)\n " sres = SRES(cost_function=cost_fun, ngen=100, lb=([0.01] * 3), ub=([10] * 3), parent_popsize=action['parent_popsize'], child_popsize=action['child_popsize'], retry=100) best_val = sres.fit(False) best_params = sres.getBestParameters() observation = best_params reward = np.log10((1 / best_val)) if (self.current_episode == self.episide_length): self.done = True else: self.current_episode += 1 return (observation, reward, self.done, '')
Run one timestep of the environment's dynamics. When end of episode is reached, you are responsible for calling `reset()` to reset this environment's state. Accepts an action and returns a tuple (observation, reward, done, info). Args: action (object): an action provided by the agent Returns: observation (object): agent's observation of the current environment reward (float) : amount of reward returned after previous action done (bool): whether the episode has ended, in which case further step() calls will return undefined results info (dict): contains auxiliary diagnostic information (helpful for debugging, and sometimes learning)
sresFromMoonfit/test/RoadrunnerProblemWithRL.py
step
CiaranWelsh/SRES
1
python
def step(self, action: dict): "Run one timestep of the environment's dynamics. When end of\n episode is reached, you are responsible for calling `reset()`\n to reset this environment's state.\n\n Accepts an action and returns a tuple (observation, reward, done, info).\n\n Args:\n action (object): an action provided by the agent\n\n Returns:\n observation (object): agent's observation of the current environment\n reward (float) : amount of reward returned after previous action\n done (bool): whether the episode has ended, in which case further step() calls will return undefined results\n info (dict): contains auxiliary diagnostic information (helpful for debugging, and sometimes learning)\n " sres = SRES(cost_function=cost_fun, ngen=100, lb=([0.01] * 3), ub=([10] * 3), parent_popsize=action['parent_popsize'], child_popsize=action['child_popsize'], retry=100) best_val = sres.fit(False) best_params = sres.getBestParameters() observation = best_params reward = np.log10((1 / best_val)) if (self.current_episode == self.episide_length): self.done = True else: self.current_episode += 1 return (observation, reward, self.done, )
def step(self, action: dict): "Run one timestep of the environment's dynamics. When end of\n episode is reached, you are responsible for calling `reset()`\n to reset this environment's state.\n\n Accepts an action and returns a tuple (observation, reward, done, info).\n\n Args:\n action (object): an action provided by the agent\n\n Returns:\n observation (object): agent's observation of the current environment\n reward (float) : amount of reward returned after previous action\n done (bool): whether the episode has ended, in which case further step() calls will return undefined results\n info (dict): contains auxiliary diagnostic information (helpful for debugging, and sometimes learning)\n " sres = SRES(cost_function=cost_fun, ngen=100, lb=([0.01] * 3), ub=([10] * 3), parent_popsize=action['parent_popsize'], child_popsize=action['child_popsize'], retry=100) best_val = sres.fit(False) best_params = sres.getBestParameters() observation = best_params reward = np.log10((1 / best_val)) if (self.current_episode == self.episide_length): self.done = True else: self.current_episode += 1 return (observation, reward, self.done, )<|docstring|>Run one timestep of the environment's dynamics. When end of episode is reached, you are responsible for calling `reset()` to reset this environment's state. Accepts an action and returns a tuple (observation, reward, done, info). Args: action (object): an action provided by the agent Returns: observation (object): agent's observation of the current environment reward (float) : amount of reward returned after previous action done (bool): whether the episode has ended, in which case further step() calls will return undefined results info (dict): contains auxiliary diagnostic information (helpful for debugging, and sometimes learning)<|endoftext|>
82d9b483e59ace549efd5cf7d17bcee277fc0ee33d47f105b79ece3d38bb0ab9
def reset(self): "Resets the environment to an initial state and returns an initial\n observation.\n\n Note that this function should not reset the environment's random\n number generator(s); random variables in the environment's state should\n be sampled independently between multiple calls to `reset()`. In other\n words, each call of `reset()` should yield an environment suitable for\n a new episode, independent of previous episodes.\n\n Returns:\n observation (object): the initial observation.\n " self.done = False self.current_episode = 0 return self
Resets the environment to an initial state and returns an initial observation. Note that this function should not reset the environment's random number generator(s); random variables in the environment's state should be sampled independently between multiple calls to `reset()`. In other words, each call of `reset()` should yield an environment suitable for a new episode, independent of previous episodes. Returns: observation (object): the initial observation.
sresFromMoonfit/test/RoadrunnerProblemWithRL.py
reset
CiaranWelsh/SRES
1
python
def reset(self): "Resets the environment to an initial state and returns an initial\n observation.\n\n Note that this function should not reset the environment's random\n number generator(s); random variables in the environment's state should\n be sampled independently between multiple calls to `reset()`. In other\n words, each call of `reset()` should yield an environment suitable for\n a new episode, independent of previous episodes.\n\n Returns:\n observation (object): the initial observation.\n " self.done = False self.current_episode = 0 return self
def reset(self): "Resets the environment to an initial state and returns an initial\n observation.\n\n Note that this function should not reset the environment's random\n number generator(s); random variables in the environment's state should\n be sampled independently between multiple calls to `reset()`. In other\n words, each call of `reset()` should yield an environment suitable for\n a new episode, independent of previous episodes.\n\n Returns:\n observation (object): the initial observation.\n " self.done = False self.current_episode = 0 return self<|docstring|>Resets the environment to an initial state and returns an initial observation. Note that this function should not reset the environment's random number generator(s); random variables in the environment's state should be sampled independently between multiple calls to `reset()`. In other words, each call of `reset()` should yield an environment suitable for a new episode, independent of previous episodes. Returns: observation (object): the initial observation.<|endoftext|>
e70a2f98e8f7f555b7433ed1ffc9d2d472b2dcfd284e3b21df63429d0885b182
def _query_scihub(opener, query): '\n Get the data from the scihub catalogue\n and write it to a GeoPandas GeoDataFrame\n ' columns = ['identifier', 'polarisationmode', 'orbitdirection', 'acquisitiondate', 'relativeorbitnumber', 'orbitnumber', 'producttype', 'slicenumber', 'size', 'beginposition', 'endposition', 'lastrelativeorbitnumber', 'lastorbitnumber', 'uuid', 'platformidentifier', 'missiondatatakeid', 'swathidentifier', 'ingestiondate', 'sensoroperationalmode', 'geometry'] crs = 'epsg:4326' geo_df = gpd.GeoDataFrame(columns=columns, crs=crs) (index, rows, next_page) = (0, 99, 1) while next_page: url = (query + f'&rows={rows}&start={index}') try: req = opener.open(url) except URLError as error: if hasattr(error, 'reason'): logger.info(f'{CONNECTION_ERROR}{error.reason}') sys.exit() elif hasattr(error, 'code'): logger.info(f'{CONNECTION_ERROR_2}{error.code}') sys.exit() else: response = req.read().decode('utf-8') dom = xml.dom.minidom.parseString(response) acq_list = _read_xml(dom) gdf = gpd.GeoDataFrame(acq_list, columns=columns, crs=crs) geo_df = geo_df.append(gdf) next_page = scihub.next_page(dom) index += rows return geo_df
Get the data from the scihub catalogue and write it to a GeoPandas GeoDataFrame
ost/s1/search.py
_query_scihub
d-chambers/OpenSarToolkit
131
python
def _query_scihub(opener, query): '\n Get the data from the scihub catalogue\n and write it to a GeoPandas GeoDataFrame\n ' columns = ['identifier', 'polarisationmode', 'orbitdirection', 'acquisitiondate', 'relativeorbitnumber', 'orbitnumber', 'producttype', 'slicenumber', 'size', 'beginposition', 'endposition', 'lastrelativeorbitnumber', 'lastorbitnumber', 'uuid', 'platformidentifier', 'missiondatatakeid', 'swathidentifier', 'ingestiondate', 'sensoroperationalmode', 'geometry'] crs = 'epsg:4326' geo_df = gpd.GeoDataFrame(columns=columns, crs=crs) (index, rows, next_page) = (0, 99, 1) while next_page: url = (query + f'&rows={rows}&start={index}') try: req = opener.open(url) except URLError as error: if hasattr(error, 'reason'): logger.info(f'{CONNECTION_ERROR}{error.reason}') sys.exit() elif hasattr(error, 'code'): logger.info(f'{CONNECTION_ERROR_2}{error.code}') sys.exit() else: response = req.read().decode('utf-8') dom = xml.dom.minidom.parseString(response) acq_list = _read_xml(dom) gdf = gpd.GeoDataFrame(acq_list, columns=columns, crs=crs) geo_df = geo_df.append(gdf) next_page = scihub.next_page(dom) index += rows return geo_df
def _query_scihub(opener, query): '\n Get the data from the scihub catalogue\n and write it to a GeoPandas GeoDataFrame\n ' columns = ['identifier', 'polarisationmode', 'orbitdirection', 'acquisitiondate', 'relativeorbitnumber', 'orbitnumber', 'producttype', 'slicenumber', 'size', 'beginposition', 'endposition', 'lastrelativeorbitnumber', 'lastorbitnumber', 'uuid', 'platformidentifier', 'missiondatatakeid', 'swathidentifier', 'ingestiondate', 'sensoroperationalmode', 'geometry'] crs = 'epsg:4326' geo_df = gpd.GeoDataFrame(columns=columns, crs=crs) (index, rows, next_page) = (0, 99, 1) while next_page: url = (query + f'&rows={rows}&start={index}') try: req = opener.open(url) except URLError as error: if hasattr(error, 'reason'): logger.info(f'{CONNECTION_ERROR}{error.reason}') sys.exit() elif hasattr(error, 'code'): logger.info(f'{CONNECTION_ERROR_2}{error.code}') sys.exit() else: response = req.read().decode('utf-8') dom = xml.dom.minidom.parseString(response) acq_list = _read_xml(dom) gdf = gpd.GeoDataFrame(acq_list, columns=columns, crs=crs) geo_df = geo_df.append(gdf) next_page = scihub.next_page(dom) index += rows return geo_df<|docstring|>Get the data from the scihub catalogue and write it to a GeoPandas GeoDataFrame<|endoftext|>
05076f54d2b9c0e34e2119b37eca3d025a6cda4a05fa1c945047bedb99850889
def check_availability(inventory_gdf, download_dir, data_mount): 'This function checks if the data is already downloaded or\n available through a mount point on DIAS cloud\n\n :param inventory_gdf:\n :param download_dir:\n :param data_mount:\n :return:\n ' from ost import Sentinel1Scene inventory_gdf['download_path'] = inventory_gdf.identifier.apply((lambda row: str(Sentinel1Scene(row).get_path(download_dir, data_mount)))) return inventory_gdf
This function checks if the data is already downloaded or available through a mount point on DIAS cloud :param inventory_gdf: :param download_dir: :param data_mount: :return:
ost/s1/search.py
check_availability
d-chambers/OpenSarToolkit
131
python
def check_availability(inventory_gdf, download_dir, data_mount): 'This function checks if the data is already downloaded or\n available through a mount point on DIAS cloud\n\n :param inventory_gdf:\n :param download_dir:\n :param data_mount:\n :return:\n ' from ost import Sentinel1Scene inventory_gdf['download_path'] = inventory_gdf.identifier.apply((lambda row: str(Sentinel1Scene(row).get_path(download_dir, data_mount)))) return inventory_gdf
def check_availability(inventory_gdf, download_dir, data_mount): 'This function checks if the data is already downloaded or\n available through a mount point on DIAS cloud\n\n :param inventory_gdf:\n :param download_dir:\n :param data_mount:\n :return:\n ' from ost import Sentinel1Scene inventory_gdf['download_path'] = inventory_gdf.identifier.apply((lambda row: str(Sentinel1Scene(row).get_path(download_dir, data_mount)))) return inventory_gdf<|docstring|>This function checks if the data is already downloaded or available through a mount point on DIAS cloud :param inventory_gdf: :param download_dir: :param data_mount: :return:<|endoftext|>
35cfd1c6b70df3752d3aefd7e6a66c2710d42010bddf66a5d4ff49dfdcc62236
def scihub_catalogue(query_string, output, append=False, uname=None, pword=None, base_url='https://apihub.copernicus.eu/apihub'): 'This is the main search function on scihub\n\n :param query_string:\n :param output:\n :param append:\n :param uname:\n :param pword:\n :return:\n ' output = str(output) hub = f'{base_url}/search?q=' opener = scihub.connect(uname, pword, base_url) query = f'{hub}{query_string}' gdf = _query_scihub(opener, query) if (output[(- 4):] == '.shp'): logger.info(f'Writing inventory data to shape file: {output}') _to_shapefile(gdf, output, append) elif (output[(- 5):] == '.gpkg'): logger.info(f'Writing inventory data to geopackage file: {output}') _to_geopackage(gdf, output, append) else: logger.info(f'Writing inventory data toPostGIS table: {output}') db_connect = pgHandler() _to_postgis(gdf, db_connect, output)
This is the main search function on scihub :param query_string: :param output: :param append: :param uname: :param pword: :return:
ost/s1/search.py
scihub_catalogue
d-chambers/OpenSarToolkit
131
python
def scihub_catalogue(query_string, output, append=False, uname=None, pword=None, base_url='https://apihub.copernicus.eu/apihub'): 'This is the main search function on scihub\n\n :param query_string:\n :param output:\n :param append:\n :param uname:\n :param pword:\n :return:\n ' output = str(output) hub = f'{base_url}/search?q=' opener = scihub.connect(uname, pword, base_url) query = f'{hub}{query_string}' gdf = _query_scihub(opener, query) if (output[(- 4):] == '.shp'): logger.info(f'Writing inventory data to shape file: {output}') _to_shapefile(gdf, output, append) elif (output[(- 5):] == '.gpkg'): logger.info(f'Writing inventory data to geopackage file: {output}') _to_geopackage(gdf, output, append) else: logger.info(f'Writing inventory data toPostGIS table: {output}') db_connect = pgHandler() _to_postgis(gdf, db_connect, output)
def scihub_catalogue(query_string, output, append=False, uname=None, pword=None, base_url='https://apihub.copernicus.eu/apihub'): 'This is the main search function on scihub\n\n :param query_string:\n :param output:\n :param append:\n :param uname:\n :param pword:\n :return:\n ' output = str(output) hub = f'{base_url}/search?q=' opener = scihub.connect(uname, pword, base_url) query = f'{hub}{query_string}' gdf = _query_scihub(opener, query) if (output[(- 4):] == '.shp'): logger.info(f'Writing inventory data to shape file: {output}') _to_shapefile(gdf, output, append) elif (output[(- 5):] == '.gpkg'): logger.info(f'Writing inventory data to geopackage file: {output}') _to_geopackage(gdf, output, append) else: logger.info(f'Writing inventory data toPostGIS table: {output}') db_connect = pgHandler() _to_postgis(gdf, db_connect, output)<|docstring|>This is the main search function on scihub :param query_string: :param output: :param append: :param uname: :param pword: :return:<|endoftext|>
938497d7fe580abe80493a41f86db303f965e2f7856cbcf9aa1f49f5202c3e0c
def __get_acceptable(self): 'A list of symbols that the configuration would accept in its current state.' return self.configuration.acceptableSymbols()
A list of symbols that the configuration would accept in its current state.
pyxb/utils/fac.py
__get_acceptable
maciekwawro/pyxb
123
python
def __get_acceptable(self): return self.configuration.acceptableSymbols()
def __get_acceptable(self): return self.configuration.acceptableSymbols()<|docstring|>A list of symbols that the configuration would accept in its current state.<|endoftext|>
c09a4cd6e05eafaaec184ccce93dbdd93f66870143caa53749371583d5406554
def __init__(self, symbol, is_initial, final_update=None, is_unordered_catenation=False): 'Create a FAC state.\n\n @param symbol: The symbol associated with the state.\n Normally initialized from the L{Symbol.metadata} value. The\n state may be entered if, among other conditions, the L{match}\n routine accepts the proposed input as being consistent with\n this value.\n\n @param is_initial: C{True} iff this state may serve as the\n first state of the automaton.\n\n @param final_update: C{None} if this state is not an\n accepting state of the automaton; otherwise a set of\n L{UpdateInstruction} values that must be satisfied by the\n counter values in a configuration as a further restriction of\n acceptance.\n\n @param is_unordered_catenation: C{True} if this state has\n subautomata that must be matched to execute the unordered\n catenation of an L{All} node; C{False} if this is a regular\n symbol.' self.__symbol = symbol self.__isInitial = (not (not is_initial)) self.__finalUpdate = final_update self.__isUnorderedCatenation = is_unordered_catenation
Create a FAC state. @param symbol: The symbol associated with the state. Normally initialized from the L{Symbol.metadata} value. The state may be entered if, among other conditions, the L{match} routine accepts the proposed input as being consistent with this value. @param is_initial: C{True} iff this state may serve as the first state of the automaton. @param final_update: C{None} if this state is not an accepting state of the automaton; otherwise a set of L{UpdateInstruction} values that must be satisfied by the counter values in a configuration as a further restriction of acceptance. @param is_unordered_catenation: C{True} if this state has subautomata that must be matched to execute the unordered catenation of an L{All} node; C{False} if this is a regular symbol.
pyxb/utils/fac.py
__init__
maciekwawro/pyxb
123
python
def __init__(self, symbol, is_initial, final_update=None, is_unordered_catenation=False): 'Create a FAC state.\n\n @param symbol: The symbol associated with the state.\n Normally initialized from the L{Symbol.metadata} value. The\n state may be entered if, among other conditions, the L{match}\n routine accepts the proposed input as being consistent with\n this value.\n\n @param is_initial: C{True} iff this state may serve as the\n first state of the automaton.\n\n @param final_update: C{None} if this state is not an\n accepting state of the automaton; otherwise a set of\n L{UpdateInstruction} values that must be satisfied by the\n counter values in a configuration as a further restriction of\n acceptance.\n\n @param is_unordered_catenation: C{True} if this state has\n subautomata that must be matched to execute the unordered\n catenation of an L{All} node; C{False} if this is a regular\n symbol.' self.__symbol = symbol self.__isInitial = (not (not is_initial)) self.__finalUpdate = final_update self.__isUnorderedCatenation = is_unordered_catenation
def __init__(self, symbol, is_initial, final_update=None, is_unordered_catenation=False): 'Create a FAC state.\n\n @param symbol: The symbol associated with the state.\n Normally initialized from the L{Symbol.metadata} value. The\n state may be entered if, among other conditions, the L{match}\n routine accepts the proposed input as being consistent with\n this value.\n\n @param is_initial: C{True} iff this state may serve as the\n first state of the automaton.\n\n @param final_update: C{None} if this state is not an\n accepting state of the automaton; otherwise a set of\n L{UpdateInstruction} values that must be satisfied by the\n counter values in a configuration as a further restriction of\n acceptance.\n\n @param is_unordered_catenation: C{True} if this state has\n subautomata that must be matched to execute the unordered\n catenation of an L{All} node; C{False} if this is a regular\n symbol.' self.__symbol = symbol self.__isInitial = (not (not is_initial)) self.__finalUpdate = final_update self.__isUnorderedCatenation = is_unordered_catenation<|docstring|>Create a FAC state. @param symbol: The symbol associated with the state. Normally initialized from the L{Symbol.metadata} value. The state may be entered if, among other conditions, the L{match} routine accepts the proposed input as being consistent with this value. @param is_initial: C{True} iff this state may serve as the first state of the automaton. @param final_update: C{None} if this state is not an accepting state of the automaton; otherwise a set of L{UpdateInstruction} values that must be satisfied by the counter values in a configuration as a further restriction of acceptance. @param is_unordered_catenation: C{True} if this state has subautomata that must be matched to execute the unordered catenation of an L{All} node; C{False} if this is a regular symbol.<|endoftext|>
2b16cbe72363e624c097738680fa0b8a5adb6289dac56f2da630cf6aae50faa3
def __get_automaton(self): 'Link to the L{Automaton} to which the state belongs.' return self.__automaton
Link to the L{Automaton} to which the state belongs.
pyxb/utils/fac.py
__get_automaton
maciekwawro/pyxb
123
python
def __get_automaton(self): return self.__automaton
def __get_automaton(self): return self.__automaton<|docstring|>Link to the L{Automaton} to which the state belongs.<|endoftext|>
37662e4fa8dbeebd992a198650733dcf4ddbaa42aaff3ae1026de2d074e73868
def _set_automaton(self, automaton): 'Method invoked during automaton construction to set state owner.' assert (self.__automaton is None) self.__automaton = automaton return self
Method invoked during automaton construction to set state owner.
pyxb/utils/fac.py
_set_automaton
maciekwawro/pyxb
123
python
def _set_automaton(self, automaton): assert (self.__automaton is None) self.__automaton = automaton return self
def _set_automaton(self, automaton): assert (self.__automaton is None) self.__automaton = automaton return self<|docstring|>Method invoked during automaton construction to set state owner.<|endoftext|>
fa309d42871994a6bdd612d59c196522b16e90389b77f3d9ea17a57c522047a3
def __get_symbol(self): 'Application-specific metadata identifying the symbol.\n\n See also L{match}.' return self.__symbol
Application-specific metadata identifying the symbol. See also L{match}.
pyxb/utils/fac.py
__get_symbol
maciekwawro/pyxb
123
python
def __get_symbol(self): 'Application-specific metadata identifying the symbol.\n\n See also L{match}.' return self.__symbol
def __get_symbol(self): 'Application-specific metadata identifying the symbol.\n\n See also L{match}.' return self.__symbol<|docstring|>Application-specific metadata identifying the symbol. See also L{match}.<|endoftext|>
d02b5e75dee64ac894d3971c03bc4d70f837c44ab27b2650792be81187f558a8
def __get_isUnorderedCatenation(self): 'Indicate whether the state has subautomata for unordered\n catenation.\n\n To reduce state explosion due to non-determinism, such a state\n executes internal transitions in subautomata until all terms\n have matched or a failure is discovered.' return self.__isUnorderedCatenation
Indicate whether the state has subautomata for unordered catenation. To reduce state explosion due to non-determinism, such a state executes internal transitions in subautomata until all terms have matched or a failure is discovered.
pyxb/utils/fac.py
__get_isUnorderedCatenation
maciekwawro/pyxb
123
python
def __get_isUnorderedCatenation(self): 'Indicate whether the state has subautomata for unordered\n catenation.\n\n To reduce state explosion due to non-determinism, such a state\n executes internal transitions in subautomata until all terms\n have matched or a failure is discovered.' return self.__isUnorderedCatenation
def __get_isUnorderedCatenation(self): 'Indicate whether the state has subautomata for unordered\n catenation.\n\n To reduce state explosion due to non-determinism, such a state\n executes internal transitions in subautomata until all terms\n have matched or a failure is discovered.' return self.__isUnorderedCatenation<|docstring|>Indicate whether the state has subautomata for unordered catenation. To reduce state explosion due to non-determinism, such a state executes internal transitions in subautomata until all terms have matched or a failure is discovered.<|endoftext|>
e76b5190169a256051a961683b72231ace6fda558dacb13a0632ce551cae1183
def __get_subAutomata(self): 'A sequence of sub-automata supporting internal state transitions.\n\n This will return C{None} unless L{isUnorderedCatenation} is C{True}.' return self.__subAutomata
A sequence of sub-automata supporting internal state transitions. This will return C{None} unless L{isUnorderedCatenation} is C{True}.
pyxb/utils/fac.py
__get_subAutomata
maciekwawro/pyxb
123
python
def __get_subAutomata(self): 'A sequence of sub-automata supporting internal state transitions.\n\n This will return C{None} unless L{isUnorderedCatenation} is C{True}.' return self.__subAutomata
def __get_subAutomata(self): 'A sequence of sub-automata supporting internal state transitions.\n\n This will return C{None} unless L{isUnorderedCatenation} is C{True}.' return self.__subAutomata<|docstring|>A sequence of sub-automata supporting internal state transitions. This will return C{None} unless L{isUnorderedCatenation} is C{True}.<|endoftext|>
99c052bd935b4f8cf212477da4633da288b574a0bb39c6e38b1897c988e4b44a
def __get_isInitial(self): 'C{True} iff this state may be the first state the automaton enters.' return self.__isInitial
C{True} iff this state may be the first state the automaton enters.
pyxb/utils/fac.py
__get_isInitial
maciekwawro/pyxb
123
python
def __get_isInitial(self): return self.__isInitial
def __get_isInitial(self): return self.__isInitial<|docstring|>C{True} iff this state may be the first state the automaton enters.<|endoftext|>
24b2a6100dc2713d34a637ff4339d8b2a79596baa223ca829282ba5132c9f3ff
def __get_automatonEntryTransitions(self): 'Return the set of initial transitions allowing entry to the automata through this state.\n\n These are structurally-permitted transitions only, and must be\n filtered based on the symbol that might trigger the\n transition. The results are not filtered based on counter\n value, since this value is used to determine how the\n containing automaton might be entered. Consequently the\n return value is the empty set unless this is an initial state.\n\n The returned set is closed under entry to sub-automata,\n i.e. it is guaranteed that each transition includes a\n consuming state even if it requires a multi-element chain of\n transitions into subautomata to reach one.' if (self.__automatonEntryTransitions is None): transitions = [] if self.__isInitial: xit = Transition(self, set()) if (self.__subAutomata is None): transitions.append(xit) else: for sa in self.__subAutomata: for saxit in sa.initialTransitions: transitions.append(xit.chainTo(saxit.makeEnterAutomatonTransition())) self.__automatonEntryTransitions = transitions return self.__automatonEntryTransitions
Return the set of initial transitions allowing entry to the automata through this state. These are structurally-permitted transitions only, and must be filtered based on the symbol that might trigger the transition. The results are not filtered based on counter value, since this value is used to determine how the containing automaton might be entered. Consequently the return value is the empty set unless this is an initial state. The returned set is closed under entry to sub-automata, i.e. it is guaranteed that each transition includes a consuming state even if it requires a multi-element chain of transitions into subautomata to reach one.
pyxb/utils/fac.py
__get_automatonEntryTransitions
maciekwawro/pyxb
123
python
def __get_automatonEntryTransitions(self): 'Return the set of initial transitions allowing entry to the automata through this state.\n\n These are structurally-permitted transitions only, and must be\n filtered based on the symbol that might trigger the\n transition. The results are not filtered based on counter\n value, since this value is used to determine how the\n containing automaton might be entered. Consequently the\n return value is the empty set unless this is an initial state.\n\n The returned set is closed under entry to sub-automata,\n i.e. it is guaranteed that each transition includes a\n consuming state even if it requires a multi-element chain of\n transitions into subautomata to reach one.' if (self.__automatonEntryTransitions is None): transitions = [] if self.__isInitial: xit = Transition(self, set()) if (self.__subAutomata is None): transitions.append(xit) else: for sa in self.__subAutomata: for saxit in sa.initialTransitions: transitions.append(xit.chainTo(saxit.makeEnterAutomatonTransition())) self.__automatonEntryTransitions = transitions return self.__automatonEntryTransitions
def __get_automatonEntryTransitions(self): 'Return the set of initial transitions allowing entry to the automata through this state.\n\n These are structurally-permitted transitions only, and must be\n filtered based on the symbol that might trigger the\n transition. The results are not filtered based on counter\n value, since this value is used to determine how the\n containing automaton might be entered. Consequently the\n return value is the empty set unless this is an initial state.\n\n The returned set is closed under entry to sub-automata,\n i.e. it is guaranteed that each transition includes a\n consuming state even if it requires a multi-element chain of\n transitions into subautomata to reach one.' if (self.__automatonEntryTransitions is None): transitions = [] if self.__isInitial: xit = Transition(self, set()) if (self.__subAutomata is None): transitions.append(xit) else: for sa in self.__subAutomata: for saxit in sa.initialTransitions: transitions.append(xit.chainTo(saxit.makeEnterAutomatonTransition())) self.__automatonEntryTransitions = transitions return self.__automatonEntryTransitions<|docstring|>Return the set of initial transitions allowing entry to the automata through this state. These are structurally-permitted transitions only, and must be filtered based on the symbol that might trigger the transition. The results are not filtered based on counter value, since this value is used to determine how the containing automaton might be entered. Consequently the return value is the empty set unless this is an initial state. The returned set is closed under entry to sub-automata, i.e. it is guaranteed that each transition includes a consuming state even if it requires a multi-element chain of transitions into subautomata to reach one.<|endoftext|>
42e28a5355768fce8a342df453258586ac68d0fc330a0728a4b9035a9ae7df33
def __get_finalUpdate(self): 'Return the update instructions that must be satisfied for this to be a final state.' return self.__finalUpdate
Return the update instructions that must be satisfied for this to be a final state.
pyxb/utils/fac.py
__get_finalUpdate
maciekwawro/pyxb
123
python
def __get_finalUpdate(self): return self.__finalUpdate
def __get_finalUpdate(self): return self.__finalUpdate<|docstring|>Return the update instructions that must be satisfied for this to be a final state.<|endoftext|>
155c5a656446b9b51c19666b941d47bbc1ce485a310e4ddf60a0e304891c4f08
def subAutomataInitialTransitions(self, sub_automata=None): 'Return the set of candidate transitions to enter a sub-automaton of this state.\n\n @param sub_automata: A subset of the sub-automata of this\n state which should contribute to the result. If C{None}, all\n sub-automata are used.\n\n @return: A pair C{(nullable, transitions)} where C{nullable}\n is C{True} iff there is at least one sub-automaton that is in\n an accepting state on entry, and C{transitions} is a list of\n L{Transition} instances describing how to reach some state in\n a sub-automaton via a consumed symbol.\n ' assert (self.__subAutomata is not None) is_nullable = True transitions = [] if (sub_automata is None): sub_automata = self.__subAutomata for sa in sub_automata: if (not sa.nullable): is_nullable = False transitions.extend(sa.initialTransitions) return (is_nullable, transitions)
Return the set of candidate transitions to enter a sub-automaton of this state. @param sub_automata: A subset of the sub-automata of this state which should contribute to the result. If C{None}, all sub-automata are used. @return: A pair C{(nullable, transitions)} where C{nullable} is C{True} iff there is at least one sub-automaton that is in an accepting state on entry, and C{transitions} is a list of L{Transition} instances describing how to reach some state in a sub-automaton via a consumed symbol.
pyxb/utils/fac.py
subAutomataInitialTransitions
maciekwawro/pyxb
123
python
def subAutomataInitialTransitions(self, sub_automata=None): 'Return the set of candidate transitions to enter a sub-automaton of this state.\n\n @param sub_automata: A subset of the sub-automata of this\n state which should contribute to the result. If C{None}, all\n sub-automata are used.\n\n @return: A pair C{(nullable, transitions)} where C{nullable}\n is C{True} iff there is at least one sub-automaton that is in\n an accepting state on entry, and C{transitions} is a list of\n L{Transition} instances describing how to reach some state in\n a sub-automaton via a consumed symbol.\n ' assert (self.__subAutomata is not None) is_nullable = True transitions = [] if (sub_automata is None): sub_automata = self.__subAutomata for sa in sub_automata: if (not sa.nullable): is_nullable = False transitions.extend(sa.initialTransitions) return (is_nullable, transitions)
def subAutomataInitialTransitions(self, sub_automata=None): 'Return the set of candidate transitions to enter a sub-automaton of this state.\n\n @param sub_automata: A subset of the sub-automata of this\n state which should contribute to the result. If C{None}, all\n sub-automata are used.\n\n @return: A pair C{(nullable, transitions)} where C{nullable}\n is C{True} iff there is at least one sub-automaton that is in\n an accepting state on entry, and C{transitions} is a list of\n L{Transition} instances describing how to reach some state in\n a sub-automaton via a consumed symbol.\n ' assert (self.__subAutomata is not None) is_nullable = True transitions = [] if (sub_automata is None): sub_automata = self.__subAutomata for sa in sub_automata: if (not sa.nullable): is_nullable = False transitions.extend(sa.initialTransitions) return (is_nullable, transitions)<|docstring|>Return the set of candidate transitions to enter a sub-automaton of this state. @param sub_automata: A subset of the sub-automata of this state which should contribute to the result. If C{None}, all sub-automata are used. @return: A pair C{(nullable, transitions)} where C{nullable} is C{True} iff there is at least one sub-automaton that is in an accepting state on entry, and C{transitions} is a list of L{Transition} instances describing how to reach some state in a sub-automaton via a consumed symbol.<|endoftext|>
f1828f55a517ecd95e52cd8ee0661b976f361e1fc2d3dea3392fd8f502405e7a
def isAccepting(self, counter_values): 'C{True} iff this state is an accepting state for the automaton.\n\n @param counter_values: Counter values that further validate\n whether the requirements of the automaton have been met.\n\n @return: C{True} if this is an accepting state and the\n counter values relevant at it are satisfied.' if (self.__finalUpdate is None): return False return UpdateInstruction.Satisfies(counter_values, self.__finalUpdate)
C{True} iff this state is an accepting state for the automaton. @param counter_values: Counter values that further validate whether the requirements of the automaton have been met. @return: C{True} if this is an accepting state and the counter values relevant at it are satisfied.
pyxb/utils/fac.py
isAccepting
maciekwawro/pyxb
123
python
def isAccepting(self, counter_values): 'C{True} iff this state is an accepting state for the automaton.\n\n @param counter_values: Counter values that further validate\n whether the requirements of the automaton have been met.\n\n @return: C{True} if this is an accepting state and the\n counter values relevant at it are satisfied.' if (self.__finalUpdate is None): return False return UpdateInstruction.Satisfies(counter_values, self.__finalUpdate)
def isAccepting(self, counter_values): 'C{True} iff this state is an accepting state for the automaton.\n\n @param counter_values: Counter values that further validate\n whether the requirements of the automaton have been met.\n\n @return: C{True} if this is an accepting state and the\n counter values relevant at it are satisfied.' if (self.__finalUpdate is None): return False return UpdateInstruction.Satisfies(counter_values, self.__finalUpdate)<|docstring|>C{True} iff this state is an accepting state for the automaton. @param counter_values: Counter values that further validate whether the requirements of the automaton have been met. @return: C{True} if this is an accepting state and the counter values relevant at it are satisfied.<|endoftext|>
4cd4cb51ed7a04d068beae4bd1e4b46c5e346b8eff2b3148e32d9476d9685f8c
def __get_transitionSet(self): 'Definitions of viable transitions from this state.\n\n The transition set of a state is a set of L{Transition} nodes\n identifying a state reachable in a single step from this\n state, and a set of counter updates that must apply if the\n transition is taken.\n\n These transitions may not in themselves consume a symbol. For\n example, if the destination state represents a match of an\n L{unordered catenation of terms<All>}, then secondary\n processing must be done to traverse into the automata for\n those terms and identify transitions that include a symbol\n consumption.\n\n @note: Although conceptually the viable transitions are a set,\n this implementation maintains them in a list so that order is\n preserved when automata processing becomes non-deterministic.\n PyXB is careful to build the transition list so that the\n states are attempted in the order in which they appear in the\n schema that define the automata.\n ' return self.__transitionSet
Definitions of viable transitions from this state. The transition set of a state is a set of L{Transition} nodes identifying a state reachable in a single step from this state, and a set of counter updates that must apply if the transition is taken. These transitions may not in themselves consume a symbol. For example, if the destination state represents a match of an L{unordered catenation of terms<All>}, then secondary processing must be done to traverse into the automata for those terms and identify transitions that include a symbol consumption. @note: Although conceptually the viable transitions are a set, this implementation maintains them in a list so that order is preserved when automata processing becomes non-deterministic. PyXB is careful to build the transition list so that the states are attempted in the order in which they appear in the schema that define the automata.
pyxb/utils/fac.py
__get_transitionSet
maciekwawro/pyxb
123
python
def __get_transitionSet(self): 'Definitions of viable transitions from this state.\n\n The transition set of a state is a set of L{Transition} nodes\n identifying a state reachable in a single step from this\n state, and a set of counter updates that must apply if the\n transition is taken.\n\n These transitions may not in themselves consume a symbol. For\n example, if the destination state represents a match of an\n L{unordered catenation of terms<All>}, then secondary\n processing must be done to traverse into the automata for\n those terms and identify transitions that include a symbol\n consumption.\n\n @note: Although conceptually the viable transitions are a set,\n this implementation maintains them in a list so that order is\n preserved when automata processing becomes non-deterministic.\n PyXB is careful to build the transition list so that the\n states are attempted in the order in which they appear in the\n schema that define the automata.\n ' return self.__transitionSet
def __get_transitionSet(self): 'Definitions of viable transitions from this state.\n\n The transition set of a state is a set of L{Transition} nodes\n identifying a state reachable in a single step from this\n state, and a set of counter updates that must apply if the\n transition is taken.\n\n These transitions may not in themselves consume a symbol. For\n example, if the destination state represents a match of an\n L{unordered catenation of terms<All>}, then secondary\n processing must be done to traverse into the automata for\n those terms and identify transitions that include a symbol\n consumption.\n\n @note: Although conceptually the viable transitions are a set,\n this implementation maintains them in a list so that order is\n preserved when automata processing becomes non-deterministic.\n PyXB is careful to build the transition list so that the\n states are attempted in the order in which they appear in the\n schema that define the automata.\n ' return self.__transitionSet<|docstring|>Definitions of viable transitions from this state. The transition set of a state is a set of L{Transition} nodes identifying a state reachable in a single step from this state, and a set of counter updates that must apply if the transition is taken. These transitions may not in themselves consume a symbol. For example, if the destination state represents a match of an L{unordered catenation of terms<All>}, then secondary processing must be done to traverse into the automata for those terms and identify transitions that include a symbol consumption. @note: Although conceptually the viable transitions are a set, this implementation maintains them in a list so that order is preserved when automata processing becomes non-deterministic. PyXB is careful to build the transition list so that the states are attempted in the order in which they appear in the schema that define the automata.<|endoftext|>
c93d0036efa66ece1cbb633273760cfd9996dd9e00192fd4b9fdf8ee18d4153e
def _set_transitionSet(self, transition_set): 'Method invoked during automaton construction to set the\n legal transitions from the state.\n\n The set of transitions cannot be defined until all states that\n appear in it are available, so the creation of the automaton\n requires that the association of the transition set be\n delayed. (Though described as a set, the transitions are a\n list where order reflects priority.)\n\n @param transition_set: a list of pairs where the first\n member is the destination L{State} and the second member is the\n set of L{UpdateInstruction}s that apply when the automaton\n transitions to the destination state.' self.__transitionSet = [] seen = set() for xit in transition_set: if (not (xit in seen)): seen.add(xit) self.__transitionSet.append(xit)
Method invoked during automaton construction to set the legal transitions from the state. The set of transitions cannot be defined until all states that appear in it are available, so the creation of the automaton requires that the association of the transition set be delayed. (Though described as a set, the transitions are a list where order reflects priority.) @param transition_set: a list of pairs where the first member is the destination L{State} and the second member is the set of L{UpdateInstruction}s that apply when the automaton transitions to the destination state.
pyxb/utils/fac.py
_set_transitionSet
maciekwawro/pyxb
123
python
def _set_transitionSet(self, transition_set): 'Method invoked during automaton construction to set the\n legal transitions from the state.\n\n The set of transitions cannot be defined until all states that\n appear in it are available, so the creation of the automaton\n requires that the association of the transition set be\n delayed. (Though described as a set, the transitions are a\n list where order reflects priority.)\n\n @param transition_set: a list of pairs where the first\n member is the destination L{State} and the second member is the\n set of L{UpdateInstruction}s that apply when the automaton\n transitions to the destination state.' self.__transitionSet = [] seen = set() for xit in transition_set: if (not (xit in seen)): seen.add(xit) self.__transitionSet.append(xit)
def _set_transitionSet(self, transition_set): 'Method invoked during automaton construction to set the\n legal transitions from the state.\n\n The set of transitions cannot be defined until all states that\n appear in it are available, so the creation of the automaton\n requires that the association of the transition set be\n delayed. (Though described as a set, the transitions are a\n list where order reflects priority.)\n\n @param transition_set: a list of pairs where the first\n member is the destination L{State} and the second member is the\n set of L{UpdateInstruction}s that apply when the automaton\n transitions to the destination state.' self.__transitionSet = [] seen = set() for xit in transition_set: if (not (xit in seen)): seen.add(xit) self.__transitionSet.append(xit)<|docstring|>Method invoked during automaton construction to set the legal transitions from the state. The set of transitions cannot be defined until all states that appear in it are available, so the creation of the automaton requires that the association of the transition set be delayed. (Though described as a set, the transitions are a list where order reflects priority.) @param transition_set: a list of pairs where the first member is the destination L{State} and the second member is the set of L{UpdateInstruction}s that apply when the automaton transitions to the destination state.<|endoftext|>
b1be5774f35e5cf718ec504714c4f6144cd2fe545651aad79f9adee7f54d1e90
def match(self, symbol): 'Return C{True} iff the symbol matches for this state.\n\n This may be overridden by subclasses when matching by\n equivalence does not work. Alternatively, if the symbol\n stored in this node is a subclass of L{SymbolMatch_mixin}, then\n its match method will be used. Otherwise C{symbol} matches\n only if it is equal to the L{symbol} of this state.\n\n @param symbol: A candidate symbol corresponding to the\n expression symbol for this state.\n\n @return: C{True} iff C{symbol} is a match for this state.\n ' if isinstance(self.__symbol, SymbolMatch_mixin): return self.__symbol.match(symbol) return (self.__symbol == symbol)
Return C{True} iff the symbol matches for this state. This may be overridden by subclasses when matching by equivalence does not work. Alternatively, if the symbol stored in this node is a subclass of L{SymbolMatch_mixin}, then its match method will be used. Otherwise C{symbol} matches only if it is equal to the L{symbol} of this state. @param symbol: A candidate symbol corresponding to the expression symbol for this state. @return: C{True} iff C{symbol} is a match for this state.
pyxb/utils/fac.py
match
maciekwawro/pyxb
123
python
def match(self, symbol): 'Return C{True} iff the symbol matches for this state.\n\n This may be overridden by subclasses when matching by\n equivalence does not work. Alternatively, if the symbol\n stored in this node is a subclass of L{SymbolMatch_mixin}, then\n its match method will be used. Otherwise C{symbol} matches\n only if it is equal to the L{symbol} of this state.\n\n @param symbol: A candidate symbol corresponding to the\n expression symbol for this state.\n\n @return: C{True} iff C{symbol} is a match for this state.\n ' if isinstance(self.__symbol, SymbolMatch_mixin): return self.__symbol.match(symbol) return (self.__symbol == symbol)
def match(self, symbol): 'Return C{True} iff the symbol matches for this state.\n\n This may be overridden by subclasses when matching by\n equivalence does not work. Alternatively, if the symbol\n stored in this node is a subclass of L{SymbolMatch_mixin}, then\n its match method will be used. Otherwise C{symbol} matches\n only if it is equal to the L{symbol} of this state.\n\n @param symbol: A candidate symbol corresponding to the\n expression symbol for this state.\n\n @return: C{True} iff C{symbol} is a match for this state.\n ' if isinstance(self.__symbol, SymbolMatch_mixin): return self.__symbol.match(symbol) return (self.__symbol == symbol)<|docstring|>Return C{True} iff the symbol matches for this state. This may be overridden by subclasses when matching by equivalence does not work. Alternatively, if the symbol stored in this node is a subclass of L{SymbolMatch_mixin}, then its match method will be used. Otherwise C{symbol} matches only if it is equal to the L{symbol} of this state. @param symbol: A candidate symbol corresponding to the expression symbol for this state. @return: C{True} iff C{symbol} is a match for this state.<|endoftext|>
678cbf810b869b86bcc279470d1224ae7c130953c6b1448efd74cbee3809639d
def __get_min(self): 'The minimum legal value for the counter.\n\n This is a non-negative integer.' return self.__min
The minimum legal value for the counter. This is a non-negative integer.
pyxb/utils/fac.py
__get_min
maciekwawro/pyxb
123
python
def __get_min(self): 'The minimum legal value for the counter.\n\n This is a non-negative integer.' return self.__min
def __get_min(self): 'The minimum legal value for the counter.\n\n This is a non-negative integer.' return self.__min<|docstring|>The minimum legal value for the counter. This is a non-negative integer.<|endoftext|>
5722ad9ddaac8898a6576c607f8276ce697cc0323b32cc4710e70af81dd83a69
def __get_max(self): 'The maximum legal value for the counter.\n\n This is a positive integer, or C{None} to indicate that the\n counter is unbounded.' return self.__max
The maximum legal value for the counter. This is a positive integer, or C{None} to indicate that the counter is unbounded.
pyxb/utils/fac.py
__get_max
maciekwawro/pyxb
123
python
def __get_max(self): 'The maximum legal value for the counter.\n\n This is a positive integer, or C{None} to indicate that the\n counter is unbounded.' return self.__max
def __get_max(self): 'The maximum legal value for the counter.\n\n This is a positive integer, or C{None} to indicate that the\n counter is unbounded.' return self.__max<|docstring|>The maximum legal value for the counter. This is a positive integer, or C{None} to indicate that the counter is unbounded.<|endoftext|>
42db0a57764598881bf96b6ac1042504c0e009a045e89f82f40c81d46d2583fc
def __get_metadata(self): 'A pointer to application metadata provided when the condition was created.' return self.__metadata
A pointer to application metadata provided when the condition was created.
pyxb/utils/fac.py
__get_metadata
maciekwawro/pyxb
123
python
def __get_metadata(self): return self.__metadata
def __get_metadata(self): return self.__metadata<|docstring|>A pointer to application metadata provided when the condition was created.<|endoftext|>
073c53aa7789c2f3224340df7b5572112728849b348404319aa037e31894aa3f
def __init__(self, min, max, metadata=None): 'Create a counter condition.\n\n @param min: The value for L{min}\n @param max: The value for L{max}\n @param metadata: The value for L{metadata}\n ' self.__min = min self.__max = max self.__metadata = metadata
Create a counter condition. @param min: The value for L{min} @param max: The value for L{max} @param metadata: The value for L{metadata}
pyxb/utils/fac.py
__init__
maciekwawro/pyxb
123
python
def __init__(self, min, max, metadata=None): 'Create a counter condition.\n\n @param min: The value for L{min}\n @param max: The value for L{max}\n @param metadata: The value for L{metadata}\n ' self.__min = min self.__max = max self.__metadata = metadata
def __init__(self, min, max, metadata=None): 'Create a counter condition.\n\n @param min: The value for L{min}\n @param max: The value for L{max}\n @param metadata: The value for L{metadata}\n ' self.__min = min self.__max = max self.__metadata = metadata<|docstring|>Create a counter condition. @param min: The value for L{min} @param max: The value for L{max} @param metadata: The value for L{metadata}<|endoftext|>
b60947aa9029a3d761275db6a4eb2e849b901ed1333d946edbd8b685171f265d
def __get_counterCondition(self): 'A reference to the L{CounterCondition} identifying the\n counter to be updated.\n\n The counter condition instance is used as a key to the\n dictionary maintaining current counter values.' return self.__counterCondition
A reference to the L{CounterCondition} identifying the counter to be updated. The counter condition instance is used as a key to the dictionary maintaining current counter values.
pyxb/utils/fac.py
__get_counterCondition
maciekwawro/pyxb
123
python
def __get_counterCondition(self): 'A reference to the L{CounterCondition} identifying the\n counter to be updated.\n\n The counter condition instance is used as a key to the\n dictionary maintaining current counter values.' return self.__counterCondition
def __get_counterCondition(self): 'A reference to the L{CounterCondition} identifying the\n counter to be updated.\n\n The counter condition instance is used as a key to the\n dictionary maintaining current counter values.' return self.__counterCondition<|docstring|>A reference to the L{CounterCondition} identifying the counter to be updated. The counter condition instance is used as a key to the dictionary maintaining current counter values.<|endoftext|>
906c331571439917d1924bb98fa139edb7239ee82e69074c246e83bf129078f2
def __get_doIncrement(self): 'C{True} if the counter is to be incremented; C{False} if it is to be reset.' return self.__doIncrement
C{True} if the counter is to be incremented; C{False} if it is to be reset.
pyxb/utils/fac.py
__get_doIncrement
maciekwawro/pyxb
123
python
def __get_doIncrement(self): return self.__doIncrement
def __get_doIncrement(self): return self.__doIncrement<|docstring|>C{True} if the counter is to be incremented; C{False} if it is to be reset.<|endoftext|>
7b48f68ff00b29b45d75e1e6aa7d86cad24b4fe95ca3b049bd0841eabe208740
def __init__(self, counter_condition, do_increment): 'Create an update instruction.\n\n @param counter_condition: A L{CounterCondition} identifying a\n minimum and maximum value for a counter, and serving as a map\n key for the value of the corresponding counter.\n\n @param do_increment: C{True} if the update is to increment\n the value of the counter; C{False} if the update is to reset\n the counter.\n ' self.__counterCondition = counter_condition self.__doIncrement = (not (not do_increment)) self.__min = counter_condition.min self.__max = counter_condition.max
Create an update instruction. @param counter_condition: A L{CounterCondition} identifying a minimum and maximum value for a counter, and serving as a map key for the value of the corresponding counter. @param do_increment: C{True} if the update is to increment the value of the counter; C{False} if the update is to reset the counter.
pyxb/utils/fac.py
__init__
maciekwawro/pyxb
123
python
def __init__(self, counter_condition, do_increment): 'Create an update instruction.\n\n @param counter_condition: A L{CounterCondition} identifying a\n minimum and maximum value for a counter, and serving as a map\n key for the value of the corresponding counter.\n\n @param do_increment: C{True} if the update is to increment\n the value of the counter; C{False} if the update is to reset\n the counter.\n ' self.__counterCondition = counter_condition self.__doIncrement = (not (not do_increment)) self.__min = counter_condition.min self.__max = counter_condition.max
def __init__(self, counter_condition, do_increment): 'Create an update instruction.\n\n @param counter_condition: A L{CounterCondition} identifying a\n minimum and maximum value for a counter, and serving as a map\n key for the value of the corresponding counter.\n\n @param do_increment: C{True} if the update is to increment\n the value of the counter; C{False} if the update is to reset\n the counter.\n ' self.__counterCondition = counter_condition self.__doIncrement = (not (not do_increment)) self.__min = counter_condition.min self.__max = counter_condition.max<|docstring|>Create an update instruction. @param counter_condition: A L{CounterCondition} identifying a minimum and maximum value for a counter, and serving as a map key for the value of the corresponding counter. @param do_increment: C{True} if the update is to increment the value of the counter; C{False} if the update is to reset the counter.<|endoftext|>
2c2cfa3c8086b06ebe5347b86a43c9e0dbac98f530dc233e37ad483b473b45ed
def satisfiedBy(self, counter_values): 'Implement a component of definition 5 from B{HOV09}.\n\n The update instruction is satisfied by the counter values if\n its action may be legitimately applied to the value of its\n associated counter.\n\n @param counter_values: A map from L{CounterCondition}s to\n non-negative integers\n\n @return: C{True} or C{False}\n ' value = counter_values[self.__counterCondition] if (self.__doIncrement and (self.__max is not None) and (value >= self.__max)): return False if ((not self.__doIncrement) and (value < self.__min)): return False return True
Implement a component of definition 5 from B{HOV09}. The update instruction is satisfied by the counter values if its action may be legitimately applied to the value of its associated counter. @param counter_values: A map from L{CounterCondition}s to non-negative integers @return: C{True} or C{False}
pyxb/utils/fac.py
satisfiedBy
maciekwawro/pyxb
123
python
def satisfiedBy(self, counter_values): 'Implement a component of definition 5 from B{HOV09}.\n\n The update instruction is satisfied by the counter values if\n its action may be legitimately applied to the value of its\n associated counter.\n\n @param counter_values: A map from L{CounterCondition}s to\n non-negative integers\n\n @return: C{True} or C{False}\n ' value = counter_values[self.__counterCondition] if (self.__doIncrement and (self.__max is not None) and (value >= self.__max)): return False if ((not self.__doIncrement) and (value < self.__min)): return False return True
def satisfiedBy(self, counter_values): 'Implement a component of definition 5 from B{HOV09}.\n\n The update instruction is satisfied by the counter values if\n its action may be legitimately applied to the value of its\n associated counter.\n\n @param counter_values: A map from L{CounterCondition}s to\n non-negative integers\n\n @return: C{True} or C{False}\n ' value = counter_values[self.__counterCondition] if (self.__doIncrement and (self.__max is not None) and (value >= self.__max)): return False if ((not self.__doIncrement) and (value < self.__min)): return False return True<|docstring|>Implement a component of definition 5 from B{HOV09}. The update instruction is satisfied by the counter values if its action may be legitimately applied to the value of its associated counter. @param counter_values: A map from L{CounterCondition}s to non-negative integers @return: C{True} or C{False}<|endoftext|>
b53954c0ca54a255c3cb5cb1d6db01af81adfce5593073e09cd899668e7e603a
@classmethod def Satisfies(cls, counter_values, update_instructions): 'Return C{True} iff the counter values satisfy the update\n instructions.\n\n @param counter_values: A map from L{CounterCondition} to\n integer counter values\n\n @param update_instructions: A set of L{UpdateInstruction}\n instances\n\n @return: C{True} iff all instructions are satisfied by the\n values and limits.' for psi in update_instructions: if (not psi.satisfiedBy(counter_values)): return False return True
Return C{True} iff the counter values satisfy the update instructions. @param counter_values: A map from L{CounterCondition} to integer counter values @param update_instructions: A set of L{UpdateInstruction} instances @return: C{True} iff all instructions are satisfied by the values and limits.
pyxb/utils/fac.py
Satisfies
maciekwawro/pyxb
123
python
@classmethod def Satisfies(cls, counter_values, update_instructions): 'Return C{True} iff the counter values satisfy the update\n instructions.\n\n @param counter_values: A map from L{CounterCondition} to\n integer counter values\n\n @param update_instructions: A set of L{UpdateInstruction}\n instances\n\n @return: C{True} iff all instructions are satisfied by the\n values and limits.' for psi in update_instructions: if (not psi.satisfiedBy(counter_values)): return False return True
@classmethod def Satisfies(cls, counter_values, update_instructions): 'Return C{True} iff the counter values satisfy the update\n instructions.\n\n @param counter_values: A map from L{CounterCondition} to\n integer counter values\n\n @param update_instructions: A set of L{UpdateInstruction}\n instances\n\n @return: C{True} iff all instructions are satisfied by the\n values and limits.' for psi in update_instructions: if (not psi.satisfiedBy(counter_values)): return False return True<|docstring|>Return C{True} iff the counter values satisfy the update instructions. @param counter_values: A map from L{CounterCondition} to integer counter values @param update_instructions: A set of L{UpdateInstruction} instances @return: C{True} iff all instructions are satisfied by the values and limits.<|endoftext|>
84a3ba255bed317474ebcca2b55ae6f6467f2ed50d2baf41edc08687834dec7c
def apply(self, counter_values): 'Apply the update instruction to the provided counter values.\n\n @param counter_values: A map from L{CounterCondition} to\n integer counter values. This map is updated in-place.' if (not self.satisfiedBy(counter_values)): raise UpdateApplicationError(self, counter_values) value = counter_values[self.__counterCondition] if self.__doIncrement: value += 1 else: value = 1 counter_values[self.__counterCondition] = value
Apply the update instruction to the provided counter values. @param counter_values: A map from L{CounterCondition} to integer counter values. This map is updated in-place.
pyxb/utils/fac.py
apply
maciekwawro/pyxb
123
python
def apply(self, counter_values): 'Apply the update instruction to the provided counter values.\n\n @param counter_values: A map from L{CounterCondition} to\n integer counter values. This map is updated in-place.' if (not self.satisfiedBy(counter_values)): raise UpdateApplicationError(self, counter_values) value = counter_values[self.__counterCondition] if self.__doIncrement: value += 1 else: value = 1 counter_values[self.__counterCondition] = value
def apply(self, counter_values): 'Apply the update instruction to the provided counter values.\n\n @param counter_values: A map from L{CounterCondition} to\n integer counter values. This map is updated in-place.' if (not self.satisfiedBy(counter_values)): raise UpdateApplicationError(self, counter_values) value = counter_values[self.__counterCondition] if self.__doIncrement: value += 1 else: value = 1 counter_values[self.__counterCondition] = value<|docstring|>Apply the update instruction to the provided counter values. @param counter_values: A map from L{CounterCondition} to integer counter values. This map is updated in-place.<|endoftext|>
771f09018cc23ca61c527bba4abb2196e64382ed9be6160ab9ba583870488a4a
@classmethod def Apply(cls, update_instructions, counter_values): 'Apply the update instructions to the counter values.\n\n @param update_instructions: A set of L{UpdateInstruction}\n instances.\n\n @param counter_values: A map from L{CounterCondition}\n instances to non-negative integers. This map is updated\n in-place by applying each instruction in\n C{update_instructions}.' for psi in update_instructions: psi.apply(counter_values)
Apply the update instructions to the counter values. @param update_instructions: A set of L{UpdateInstruction} instances. @param counter_values: A map from L{CounterCondition} instances to non-negative integers. This map is updated in-place by applying each instruction in C{update_instructions}.
pyxb/utils/fac.py
Apply
maciekwawro/pyxb
123
python
@classmethod def Apply(cls, update_instructions, counter_values): 'Apply the update instructions to the counter values.\n\n @param update_instructions: A set of L{UpdateInstruction}\n instances.\n\n @param counter_values: A map from L{CounterCondition}\n instances to non-negative integers. This map is updated\n in-place by applying each instruction in\n C{update_instructions}.' for psi in update_instructions: psi.apply(counter_values)
@classmethod def Apply(cls, update_instructions, counter_values): 'Apply the update instructions to the counter values.\n\n @param update_instructions: A set of L{UpdateInstruction}\n instances.\n\n @param counter_values: A map from L{CounterCondition}\n instances to non-negative integers. This map is updated\n in-place by applying each instruction in\n C{update_instructions}.' for psi in update_instructions: psi.apply(counter_values)<|docstring|>Apply the update instructions to the counter values. @param update_instructions: A set of L{UpdateInstruction} instances. @param counter_values: A map from L{CounterCondition} instances to non-negative integers. This map is updated in-place by applying each instruction in C{update_instructions}.<|endoftext|>
5cd41080797adcbb3a3b99b3dea2ea97ff261949455c810a88e45bc7f8b1438b
def __get_destination(self): 'The transition destination state.' return self.__destination
The transition destination state.
pyxb/utils/fac.py
__get_destination
maciekwawro/pyxb
123
python
def __get_destination(self): return self.__destination
def __get_destination(self): return self.__destination<|docstring|>The transition destination state.<|endoftext|>
0af89c42ad447b5391b121c8029b37040d6e713203e2e0e4b68278a4c36d1955
def __get_updateInstructions(self): 'The set of counter updates that are applied when the transition is taken.' return self.__updateInstructions
The set of counter updates that are applied when the transition is taken.
pyxb/utils/fac.py
__get_updateInstructions
maciekwawro/pyxb
123
python
def __get_updateInstructions(self): return self.__updateInstructions
def __get_updateInstructions(self): return self.__updateInstructions<|docstring|>The set of counter updates that are applied when the transition is taken.<|endoftext|>
49110cefcf48cca8c6944a08217dfaaf8b16ce7a1a38fee934bc0fa01e706c1a
def __get_nextTransition(self): 'The next transition to apply in this chain.\n\n C{None} if this is the last transition in the chain.' return self.__nextTransition
The next transition to apply in this chain. C{None} if this is the last transition in the chain.
pyxb/utils/fac.py
__get_nextTransition
maciekwawro/pyxb
123
python
def __get_nextTransition(self): 'The next transition to apply in this chain.\n\n C{None} if this is the last transition in the chain.' return self.__nextTransition
def __get_nextTransition(self): 'The next transition to apply in this chain.\n\n C{None} if this is the last transition in the chain.' return self.__nextTransition<|docstring|>The next transition to apply in this chain. C{None} if this is the last transition in the chain.<|endoftext|>
92b597d5c493066c1f562222e50c629cca475a486c379346de3d47d0931bd4a7
def __get_layerLink(self): 'A directive relating to changing automaton layer on transition.\n\n C{None} indicates this transition is from one state to another\n within a single automaton.\n\n An instance of L{Configuration} is a transition on completion\n of a subautomaton back to the configuration in the parent\n automaton. The L{destination} is the state in the parent automaton.\n\n An instance of L{Automaton} requires creation of a\n sub-configuration and initial entry into the automaton. The\n L{destination} is the state in the sub-automaton.\n ' return self.__layerLink
A directive relating to changing automaton layer on transition. C{None} indicates this transition is from one state to another within a single automaton. An instance of L{Configuration} is a transition on completion of a subautomaton back to the configuration in the parent automaton. The L{destination} is the state in the parent automaton. An instance of L{Automaton} requires creation of a sub-configuration and initial entry into the automaton. The L{destination} is the state in the sub-automaton.
pyxb/utils/fac.py
__get_layerLink
maciekwawro/pyxb
123
python
def __get_layerLink(self): 'A directive relating to changing automaton layer on transition.\n\n C{None} indicates this transition is from one state to another\n within a single automaton.\n\n An instance of L{Configuration} is a transition on completion\n of a subautomaton back to the configuration in the parent\n automaton. The L{destination} is the state in the parent automaton.\n\n An instance of L{Automaton} requires creation of a\n sub-configuration and initial entry into the automaton. The\n L{destination} is the state in the sub-automaton.\n ' return self.__layerLink
def __get_layerLink(self): 'A directive relating to changing automaton layer on transition.\n\n C{None} indicates this transition is from one state to another\n within a single automaton.\n\n An instance of L{Configuration} is a transition on completion\n of a subautomaton back to the configuration in the parent\n automaton. The L{destination} is the state in the parent automaton.\n\n An instance of L{Automaton} requires creation of a\n sub-configuration and initial entry into the automaton. The\n L{destination} is the state in the sub-automaton.\n ' return self.__layerLink<|docstring|>A directive relating to changing automaton layer on transition. C{None} indicates this transition is from one state to another within a single automaton. An instance of L{Configuration} is a transition on completion of a subautomaton back to the configuration in the parent automaton. The L{destination} is the state in the parent automaton. An instance of L{Automaton} requires creation of a sub-configuration and initial entry into the automaton. The L{destination} is the state in the sub-automaton.<|endoftext|>
3f812b0b40599f56b386563b6ae87d84f6598cb47a49dba20b6c27a444d8843c
def __init__(self, destination, update_instructions, layer_link=None): 'Create a transition to a state.\n\n @param destination: the state into which the transition is\n made\n\n @param update_instructions: A iterable of L{UpdateInstruction}s\n denoting the changes that must be made to counters as a\n consequence of taking the transition.\n\n @keyword layer_link: The value for L{layerLink}.' self.__destination = destination if (not isinstance(update_instructions, list)): update_instructions = list(update_instructions) self.__updateInstructions = update_instructions self.__layerLink = layer_link
Create a transition to a state. @param destination: the state into which the transition is made @param update_instructions: A iterable of L{UpdateInstruction}s denoting the changes that must be made to counters as a consequence of taking the transition. @keyword layer_link: The value for L{layerLink}.
pyxb/utils/fac.py
__init__
maciekwawro/pyxb
123
python
def __init__(self, destination, update_instructions, layer_link=None): 'Create a transition to a state.\n\n @param destination: the state into which the transition is\n made\n\n @param update_instructions: A iterable of L{UpdateInstruction}s\n denoting the changes that must be made to counters as a\n consequence of taking the transition.\n\n @keyword layer_link: The value for L{layerLink}.' self.__destination = destination if (not isinstance(update_instructions, list)): update_instructions = list(update_instructions) self.__updateInstructions = update_instructions self.__layerLink = layer_link
def __init__(self, destination, update_instructions, layer_link=None): 'Create a transition to a state.\n\n @param destination: the state into which the transition is\n made\n\n @param update_instructions: A iterable of L{UpdateInstruction}s\n denoting the changes that must be made to counters as a\n consequence of taking the transition.\n\n @keyword layer_link: The value for L{layerLink}.' self.__destination = destination if (not isinstance(update_instructions, list)): update_instructions = list(update_instructions) self.__updateInstructions = update_instructions self.__layerLink = layer_link<|docstring|>Create a transition to a state. @param destination: the state into which the transition is made @param update_instructions: A iterable of L{UpdateInstruction}s denoting the changes that must be made to counters as a consequence of taking the transition. @keyword layer_link: The value for L{layerLink}.<|endoftext|>
781f07226e6d18d177e5f49e23e882a2f490c513050407cd420186d1947751d0
def consumingState(self): 'Return the state in this transition chain that must match a symbol.' if (self.__destination.subAutomata is not None): if (not self.__nextTransition): return None return self.__nextTransition.consumingState() assert (self.__nextTransition is None) return self.__destination
Return the state in this transition chain that must match a symbol.
pyxb/utils/fac.py
consumingState
maciekwawro/pyxb
123
python
def consumingState(self): if (self.__destination.subAutomata is not None): if (not self.__nextTransition): return None return self.__nextTransition.consumingState() assert (self.__nextTransition is None) return self.__destination
def consumingState(self): if (self.__destination.subAutomata is not None): if (not self.__nextTransition): return None return self.__nextTransition.consumingState() assert (self.__nextTransition is None) return self.__destination<|docstring|>Return the state in this transition chain that must match a symbol.<|endoftext|>
0945b3fb5749bb218f3ddc9e1259bfbebd7e6b0c1afe954023b064185b567bb4
def consumedSymbol(self): 'Return the L{symbol<State.symbol>} of the L{consumingState}.' return self.consumingState().symbol
Return the L{symbol<State.symbol>} of the L{consumingState}.
pyxb/utils/fac.py
consumedSymbol
maciekwawro/pyxb
123
python
def consumedSymbol(self): return self.consumingState().symbol
def consumedSymbol(self): return self.consumingState().symbol<|docstring|>Return the L{symbol<State.symbol>} of the L{consumingState}.<|endoftext|>
7a325a859ac885b2747ddb977384ada4b59008f232dc412c461ca24d74a2ab37
def satisfiedBy(self, configuration): 'Check the transition update instructions against\n configuration counter values.\n\n This implementation follows layer changes, updating the\n configuration used as counter value source as necessary.\n\n @param configuration: A L{Configuration} instance containing\n counter data against which update instruction satisfaction is\n checked.\n\n @return: C{True} iff all update instructions along the\n transition chain are satisfied by their relevant\n configuration.' if isinstance(self.__layerLink, Automaton): return True if isinstance(self.__layerLink, Configuration): configuration = self.__layerLink assert (self.destination.automaton == configuration.automaton) if (not configuration.satisfies(self)): return False if self.__nextTransition: return self.__nextTransition.satisfiedBy(configuration) return True
Check the transition update instructions against configuration counter values. This implementation follows layer changes, updating the configuration used as counter value source as necessary. @param configuration: A L{Configuration} instance containing counter data against which update instruction satisfaction is checked. @return: C{True} iff all update instructions along the transition chain are satisfied by their relevant configuration.
pyxb/utils/fac.py
satisfiedBy
maciekwawro/pyxb
123
python
def satisfiedBy(self, configuration): 'Check the transition update instructions against\n configuration counter values.\n\n This implementation follows layer changes, updating the\n configuration used as counter value source as necessary.\n\n @param configuration: A L{Configuration} instance containing\n counter data against which update instruction satisfaction is\n checked.\n\n @return: C{True} iff all update instructions along the\n transition chain are satisfied by their relevant\n configuration.' if isinstance(self.__layerLink, Automaton): return True if isinstance(self.__layerLink, Configuration): configuration = self.__layerLink assert (self.destination.automaton == configuration.automaton) if (not configuration.satisfies(self)): return False if self.__nextTransition: return self.__nextTransition.satisfiedBy(configuration) return True
def satisfiedBy(self, configuration): 'Check the transition update instructions against\n configuration counter values.\n\n This implementation follows layer changes, updating the\n configuration used as counter value source as necessary.\n\n @param configuration: A L{Configuration} instance containing\n counter data against which update instruction satisfaction is\n checked.\n\n @return: C{True} iff all update instructions along the\n transition chain are satisfied by their relevant\n configuration.' if isinstance(self.__layerLink, Automaton): return True if isinstance(self.__layerLink, Configuration): configuration = self.__layerLink assert (self.destination.automaton == configuration.automaton) if (not configuration.satisfies(self)): return False if self.__nextTransition: return self.__nextTransition.satisfiedBy(configuration) return True<|docstring|>Check the transition update instructions against configuration counter values. This implementation follows layer changes, updating the configuration used as counter value source as necessary. @param configuration: A L{Configuration} instance containing counter data against which update instruction satisfaction is checked. @return: C{True} iff all update instructions along the transition chain are satisfied by their relevant configuration.<|endoftext|>
45c2cd231d04cbf9896f9fd86fd55094092eec55945a0a6c5e28388a0a41115d
def apply(self, configuration, clone_map=None): 'Apply the transitition to a configuration.\n\n This updates the configuration counter values based on the\n update instructions, and sets the new configuration state.\n\n @note: If the transition involves leaving a sub-automaton or\n creating a new sub-automaton, the returned configuration\n structure will be different from the one passed in. You\n should invoke this as::\n\n cfg = transition.apply(cfg)\n\n @param configuration: A L{Configuration} of an executing automaton\n\n @param clone_map: A map from L{Configuration} to\n L{Configuration} reflecting the replacements made when the\n configuration for which the transition was calculated was\n subsequently cloned into the C{configuration} passed into this\n method. This is only necessary when the transition includes\n layer transitions.\n\n @return: The resulting configuration\n ' layer_link = self.__layerLink if isinstance(layer_link, Configuration): if (clone_map is not None): layer_link = clone_map[layer_link] configuration = layer_link.leaveAutomaton(configuration) elif isinstance(layer_link, Automaton): configuration = configuration.enterAutomaton(layer_link) UpdateInstruction.Apply(self.updateInstructions, configuration._get_counterValues()) configuration._set_state(self.destination, (layer_link is None)) if (self.__nextTransition is None): return configuration return self.__nextTransition.apply(configuration, clone_map)
Apply the transitition to a configuration. This updates the configuration counter values based on the update instructions, and sets the new configuration state. @note: If the transition involves leaving a sub-automaton or creating a new sub-automaton, the returned configuration structure will be different from the one passed in. You should invoke this as:: cfg = transition.apply(cfg) @param configuration: A L{Configuration} of an executing automaton @param clone_map: A map from L{Configuration} to L{Configuration} reflecting the replacements made when the configuration for which the transition was calculated was subsequently cloned into the C{configuration} passed into this method. This is only necessary when the transition includes layer transitions. @return: The resulting configuration
pyxb/utils/fac.py
apply
maciekwawro/pyxb
123
python
def apply(self, configuration, clone_map=None): 'Apply the transitition to a configuration.\n\n This updates the configuration counter values based on the\n update instructions, and sets the new configuration state.\n\n @note: If the transition involves leaving a sub-automaton or\n creating a new sub-automaton, the returned configuration\n structure will be different from the one passed in. You\n should invoke this as::\n\n cfg = transition.apply(cfg)\n\n @param configuration: A L{Configuration} of an executing automaton\n\n @param clone_map: A map from L{Configuration} to\n L{Configuration} reflecting the replacements made when the\n configuration for which the transition was calculated was\n subsequently cloned into the C{configuration} passed into this\n method. This is only necessary when the transition includes\n layer transitions.\n\n @return: The resulting configuration\n ' layer_link = self.__layerLink if isinstance(layer_link, Configuration): if (clone_map is not None): layer_link = clone_map[layer_link] configuration = layer_link.leaveAutomaton(configuration) elif isinstance(layer_link, Automaton): configuration = configuration.enterAutomaton(layer_link) UpdateInstruction.Apply(self.updateInstructions, configuration._get_counterValues()) configuration._set_state(self.destination, (layer_link is None)) if (self.__nextTransition is None): return configuration return self.__nextTransition.apply(configuration, clone_map)
def apply(self, configuration, clone_map=None): 'Apply the transitition to a configuration.\n\n This updates the configuration counter values based on the\n update instructions, and sets the new configuration state.\n\n @note: If the transition involves leaving a sub-automaton or\n creating a new sub-automaton, the returned configuration\n structure will be different from the one passed in. You\n should invoke this as::\n\n cfg = transition.apply(cfg)\n\n @param configuration: A L{Configuration} of an executing automaton\n\n @param clone_map: A map from L{Configuration} to\n L{Configuration} reflecting the replacements made when the\n configuration for which the transition was calculated was\n subsequently cloned into the C{configuration} passed into this\n method. This is only necessary when the transition includes\n layer transitions.\n\n @return: The resulting configuration\n ' layer_link = self.__layerLink if isinstance(layer_link, Configuration): if (clone_map is not None): layer_link = clone_map[layer_link] configuration = layer_link.leaveAutomaton(configuration) elif isinstance(layer_link, Automaton): configuration = configuration.enterAutomaton(layer_link) UpdateInstruction.Apply(self.updateInstructions, configuration._get_counterValues()) configuration._set_state(self.destination, (layer_link is None)) if (self.__nextTransition is None): return configuration return self.__nextTransition.apply(configuration, clone_map)<|docstring|>Apply the transitition to a configuration. This updates the configuration counter values based on the update instructions, and sets the new configuration state. @note: If the transition involves leaving a sub-automaton or creating a new sub-automaton, the returned configuration structure will be different from the one passed in. You should invoke this as:: cfg = transition.apply(cfg) @param configuration: A L{Configuration} of an executing automaton @param clone_map: A map from L{Configuration} to L{Configuration} reflecting the replacements made when the configuration for which the transition was calculated was subsequently cloned into the C{configuration} passed into this method. This is only necessary when the transition includes layer transitions. @return: The resulting configuration<|endoftext|>
588848a31c52d339fed10c76395885524875c9964db6f27b73da8bf3412623ee
def chainTo(self, next_transition): 'Duplicate the state and chain the duplicate to a successor\n transition.\n\n This returns a new transition which applies the operation for\n this transition, then proceeds to apply the next transition in\n the chain.\n\n @note: The node that is invoking this must not have successor\n transitions.\n\n @param next_transition: A L{Transition} node describing a\n subsequent transition.\n\n @return: a clone of this node, augmented with a link to\n C{next_transition}.' assert (not self.__nextTransition) head = type(self)(self.__destination, self.__updateInstructions, layer_link=self.__layerLink) head.__nextTransition = next_transition return head
Duplicate the state and chain the duplicate to a successor transition. This returns a new transition which applies the operation for this transition, then proceeds to apply the next transition in the chain. @note: The node that is invoking this must not have successor transitions. @param next_transition: A L{Transition} node describing a subsequent transition. @return: a clone of this node, augmented with a link to C{next_transition}.
pyxb/utils/fac.py
chainTo
maciekwawro/pyxb
123
python
def chainTo(self, next_transition): 'Duplicate the state and chain the duplicate to a successor\n transition.\n\n This returns a new transition which applies the operation for\n this transition, then proceeds to apply the next transition in\n the chain.\n\n @note: The node that is invoking this must not have successor\n transitions.\n\n @param next_transition: A L{Transition} node describing a\n subsequent transition.\n\n @return: a clone of this node, augmented with a link to\n C{next_transition}.' assert (not self.__nextTransition) head = type(self)(self.__destination, self.__updateInstructions, layer_link=self.__layerLink) head.__nextTransition = next_transition return head
def chainTo(self, next_transition): 'Duplicate the state and chain the duplicate to a successor\n transition.\n\n This returns a new transition which applies the operation for\n this transition, then proceeds to apply the next transition in\n the chain.\n\n @note: The node that is invoking this must not have successor\n transitions.\n\n @param next_transition: A L{Transition} node describing a\n subsequent transition.\n\n @return: a clone of this node, augmented with a link to\n C{next_transition}.' assert (not self.__nextTransition) head = type(self)(self.__destination, self.__updateInstructions, layer_link=self.__layerLink) head.__nextTransition = next_transition return head<|docstring|>Duplicate the state and chain the duplicate to a successor transition. This returns a new transition which applies the operation for this transition, then proceeds to apply the next transition in the chain. @note: The node that is invoking this must not have successor transitions. @param next_transition: A L{Transition} node describing a subsequent transition. @return: a clone of this node, augmented with a link to C{next_transition}.<|endoftext|>
41f0992b50b9299a7ded6989a2a840f959906509c4b40f0562f30ec8011d0d17
def makeEnterAutomatonTransition(self): 'Replicate the transition as a layer link into its automaton.\n\n This is used on initial transitions into sub-automata where a\n sub-configuration must be created and recorded.' assert (self.__layerLink is None) assert (self.__nextTransition is None) head = type(self)(self.__destination, self.__updateInstructions) head.__layerLink = self.__destination.automaton return head
Replicate the transition as a layer link into its automaton. This is used on initial transitions into sub-automata where a sub-configuration must be created and recorded.
pyxb/utils/fac.py
makeEnterAutomatonTransition
maciekwawro/pyxb
123
python
def makeEnterAutomatonTransition(self): 'Replicate the transition as a layer link into its automaton.\n\n This is used on initial transitions into sub-automata where a\n sub-configuration must be created and recorded.' assert (self.__layerLink is None) assert (self.__nextTransition is None) head = type(self)(self.__destination, self.__updateInstructions) head.__layerLink = self.__destination.automaton return head
def makeEnterAutomatonTransition(self): 'Replicate the transition as a layer link into its automaton.\n\n This is used on initial transitions into sub-automata where a\n sub-configuration must be created and recorded.' assert (self.__layerLink is None) assert (self.__nextTransition is None) head = type(self)(self.__destination, self.__updateInstructions) head.__layerLink = self.__destination.automaton return head<|docstring|>Replicate the transition as a layer link into its automaton. This is used on initial transitions into sub-automata where a sub-configuration must be created and recorded.<|endoftext|>
1df0714402dca463279e0b0370295278128cb382758eb0a7dc9b496f6449f984
def acceptableSymbols(self): 'Return the acceptable L{Symbol}s given the current\n configuration.\n\n This method extracts the symbol from all candidate transitions\n that are permitted based on the current counter values.\n Because transitions are presented in a preferred order, the\n symbols are as well.' raise NotImplementedError(('%s.acceptableSymbols' % (type(self).__name__,)))
Return the acceptable L{Symbol}s given the current configuration. This method extracts the symbol from all candidate transitions that are permitted based on the current counter values. Because transitions are presented in a preferred order, the symbols are as well.
pyxb/utils/fac.py
acceptableSymbols
maciekwawro/pyxb
123
python
def acceptableSymbols(self): 'Return the acceptable L{Symbol}s given the current\n configuration.\n\n This method extracts the symbol from all candidate transitions\n that are permitted based on the current counter values.\n Because transitions are presented in a preferred order, the\n symbols are as well.' raise NotImplementedError(('%s.acceptableSymbols' % (type(self).__name__,)))
def acceptableSymbols(self): 'Return the acceptable L{Symbol}s given the current\n configuration.\n\n This method extracts the symbol from all candidate transitions\n that are permitted based on the current counter values.\n Because transitions are presented in a preferred order, the\n symbols are as well.' raise NotImplementedError(('%s.acceptableSymbols' % (type(self).__name__,)))<|docstring|>Return the acceptable L{Symbol}s given the current configuration. This method extracts the symbol from all candidate transitions that are permitted based on the current counter values. Because transitions are presented in a preferred order, the symbols are as well.<|endoftext|>
17a03268c5198a4f9e008c0d34af4029044e8079db12ade2bdc5d36456050a81
def step(self, symbol): "Execute an automaton transition using the given symbol.\n\n @param symbol: A symbol from the alphabet of the automaton's\n language. This is a Python value that should be accepted by\n the L{SymbolMatch_mixin.match} method of a L{State.symbol}.\n It is not a L{Symbol} instance.\n\n @return: The new configuration resulting from the step.\n\n @raises AutomatonStepError: L{UnrecognizedSymbolError}\n when no transition compatible with C{symbol} is available, and\n L{NondeterministicSymbolError} if C{symbol} admits multiple\n transitions and the subclass does not support\n non-deterministic steps (see L{MultiConfiguration}).\n\n @warning: If the step entered or left a sub-automaton the\n return value will not be the configuration that was used to\n execute the step. The proper pattern for using this method\n is::\n\n cfg = cfg.step(sym)\n\n " raise NotImplementedError(('%s.step' % (type(self).__name__,)))
Execute an automaton transition using the given symbol. @param symbol: A symbol from the alphabet of the automaton's language. This is a Python value that should be accepted by the L{SymbolMatch_mixin.match} method of a L{State.symbol}. It is not a L{Symbol} instance. @return: The new configuration resulting from the step. @raises AutomatonStepError: L{UnrecognizedSymbolError} when no transition compatible with C{symbol} is available, and L{NondeterministicSymbolError} if C{symbol} admits multiple transitions and the subclass does not support non-deterministic steps (see L{MultiConfiguration}). @warning: If the step entered or left a sub-automaton the return value will not be the configuration that was used to execute the step. The proper pattern for using this method is:: cfg = cfg.step(sym)
pyxb/utils/fac.py
step
maciekwawro/pyxb
123
python
def step(self, symbol): "Execute an automaton transition using the given symbol.\n\n @param symbol: A symbol from the alphabet of the automaton's\n language. This is a Python value that should be accepted by\n the L{SymbolMatch_mixin.match} method of a L{State.symbol}.\n It is not a L{Symbol} instance.\n\n @return: The new configuration resulting from the step.\n\n @raises AutomatonStepError: L{UnrecognizedSymbolError}\n when no transition compatible with C{symbol} is available, and\n L{NondeterministicSymbolError} if C{symbol} admits multiple\n transitions and the subclass does not support\n non-deterministic steps (see L{MultiConfiguration}).\n\n @warning: If the step entered or left a sub-automaton the\n return value will not be the configuration that was used to\n execute the step. The proper pattern for using this method\n is::\n\n cfg = cfg.step(sym)\n\n " raise NotImplementedError(('%s.step' % (type(self).__name__,)))
def step(self, symbol): "Execute an automaton transition using the given symbol.\n\n @param symbol: A symbol from the alphabet of the automaton's\n language. This is a Python value that should be accepted by\n the L{SymbolMatch_mixin.match} method of a L{State.symbol}.\n It is not a L{Symbol} instance.\n\n @return: The new configuration resulting from the step.\n\n @raises AutomatonStepError: L{UnrecognizedSymbolError}\n when no transition compatible with C{symbol} is available, and\n L{NondeterministicSymbolError} if C{symbol} admits multiple\n transitions and the subclass does not support\n non-deterministic steps (see L{MultiConfiguration}).\n\n @warning: If the step entered or left a sub-automaton the\n return value will not be the configuration that was used to\n execute the step. The proper pattern for using this method\n is::\n\n cfg = cfg.step(sym)\n\n " raise NotImplementedError(('%s.step' % (type(self).__name__,)))<|docstring|>Execute an automaton transition using the given symbol. @param symbol: A symbol from the alphabet of the automaton's language. This is a Python value that should be accepted by the L{SymbolMatch_mixin.match} method of a L{State.symbol}. It is not a L{Symbol} instance. @return: The new configuration resulting from the step. @raises AutomatonStepError: L{UnrecognizedSymbolError} when no transition compatible with C{symbol} is available, and L{NondeterministicSymbolError} if C{symbol} admits multiple transitions and the subclass does not support non-deterministic steps (see L{MultiConfiguration}). @warning: If the step entered or left a sub-automaton the return value will not be the configuration that was used to execute the step. The proper pattern for using this method is:: cfg = cfg.step(sym)<|endoftext|>
e2a649ec4d8015e2614a2ca3a85cdc4174b2fa1dddf4afca1d1fb7bdf24a7896
def __get_state(self): "The state of the configuration.\n\n This is C{None} to indicate an initial state, or one of the underlying automaton's states." return self.__state
The state of the configuration. This is C{None} to indicate an initial state, or one of the underlying automaton's states.
pyxb/utils/fac.py
__get_state
maciekwawro/pyxb
123
python
def __get_state(self): "The state of the configuration.\n\n This is C{None} to indicate an initial state, or one of the underlying automaton's states." return self.__state
def __get_state(self): "The state of the configuration.\n\n This is C{None} to indicate an initial state, or one of the underlying automaton's states." return self.__state<|docstring|>The state of the configuration. This is C{None} to indicate an initial state, or one of the underlying automaton's states.<|endoftext|>