text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Handle truncated responses <END_TASK> <USER_TASK:> Description: def handle_truncated_response(callback, params, entities): """ Handle truncated responses :param callback: :param params: :param entities: :return: """
results = {} for entity in entities: results[entity] = [] while True: try: marker_found = False response = callback(**params) for entity in entities: if entity in response: results[entity] = results[entity] + response[entity] for marker_name in ['NextToken', 'Marker', 'PaginationToken']: if marker_name in response and response[marker_name]: params[marker_name] = response[marker_name] marker_found = True if not marker_found: break except Exception as e: if is_throttled(e): time.sleep(1) else: raise e return results
<SYSTEM_TASK:> Converts a PDF to a JPG and places it back onto the FileStorage instance <END_TASK> <USER_TASK:> Description: def pdftojpg(filehandle, meta): """Converts a PDF to a JPG and places it back onto the FileStorage instance passed to it as a BytesIO object. Optional meta arguments are: * resolution: int or (int, int) used for wand to determine resolution, defaults to 300. * width: new width of the image for resizing, defaults to 1080 * bgcolor: new background color, defaults to 'white' """
resolution = meta.get('resolution', 300) width = meta.get('width', 1080) bgcolor = Color(meta.get('bgcolor', 'white')) stream = BytesIO() with Image(blob=filehandle.stream, resolution=resolution) as img: img.background_color = bgcolor img.alpha_channel = False img.format = 'jpeg' ratio = width / img.width img.resize(width, int(ratio * img.height)) img.compression_quality = 90 img.save(file=stream) stream.seek(0) filehandle.stream = stream return filehandle
<SYSTEM_TASK:> Changes the filename to reflect the conversion from PDF to JPG. <END_TASK> <USER_TASK:> Description: def change_filename(filehandle, meta): """Changes the filename to reflect the conversion from PDF to JPG. This method will preserve the original filename in the meta dictionary. """
filename = secure_filename(meta.get('filename', filehandle.filename)) basename, _ = os.path.splitext(filename) meta['original_filename'] = filehandle.filename filehandle.filename = filename + '.jpg' return filehandle
<SYSTEM_TASK:> Returns the list of IP prefixes from an ip-ranges file <END_TASK> <USER_TASK:> Description: def read_ip_ranges(filename, local_file = True, ip_only = False, conditions = []): """ Returns the list of IP prefixes from an ip-ranges file :param filename: :param local_file: :param conditions: :param ip_only: :return: """
targets = [] data = load_data(filename, local_file = local_file) if 'source' in data: # Filtered IP ranges conditions = data['conditions'] local_file = data['local_file'] if 'local_file' in data else False data = load_data(data['source'], local_file = local_file, key_name = 'prefixes') else: # Plain IP ranges data = data['prefixes'] for d in data: condition_passed = True for condition in conditions: if type(condition) != list or len(condition) < 3: continue condition_passed = pass_condition(d[condition[0]], condition[1], condition[2]) if not condition_passed: break if condition_passed: targets.append(d) if ip_only: ips = [] for t in targets: ips.append(t['ip_prefix']) return ips else: return targets
<SYSTEM_TASK:> Configure a Flask application to use this ZODB extension. <END_TASK> <USER_TASK:> Description: def init_app(self, app): """Configure a Flask application to use this ZODB extension."""
assert 'zodb' not in app.extensions, \ 'app already initiated for zodb' app.extensions['zodb'] = _ZODBState(self, app) app.teardown_request(self.close_db)
<SYSTEM_TASK:> Added as a `~flask.Flask.teardown_request` to applications to <END_TASK> <USER_TASK:> Description: def close_db(self, exception): """Added as a `~flask.Flask.teardown_request` to applications to commit the transaction and disconnect ZODB if it was used during the request."""
if self.is_connected: if exception is None and not transaction.isDoomed(): transaction.commit() else: transaction.abort() self.connection.close()
<SYSTEM_TASK:> Request-bound database connection. <END_TASK> <USER_TASK:> Description: def connection(self): """Request-bound database connection."""
assert flask.has_request_context(), \ 'tried to connect zodb outside request' if not self.is_connected: connector = flask.current_app.extensions['zodb'] flask._request_ctx_stack.top.zodb_connection = connector.db.open() transaction.begin() return flask._request_ctx_stack.top.zodb_connection
<SYSTEM_TASK:> Delete a vritual MFA device given its serial number <END_TASK> <USER_TASK:> Description: def delete_virtual_mfa_device(iam_client, mfa_serial): """ Delete a vritual MFA device given its serial number :param iam_client: :param mfa_serial: :return: """
try: printInfo('Deleting MFA device %s...' % mfa_serial) iam_client.delete_virtual_mfa_device(SerialNumber = mfa_serial) except Exception as e: printException(e) printError('Failed to delete MFA device %s' % mfa_serial) pass
<SYSTEM_TASK:> Initialize and compile regular expression for category groups <END_TASK> <USER_TASK:> Description: def init_group_category_regex(category_groups, category_regex_args): """ Initialize and compile regular expression for category groups :param category_regex_args: List of string regex :return: List of compiled regex """
category_regex = [] authorized_empty_regex = 1 if len(category_regex_args) and len(category_groups) != len(category_regex_args): printError('Error: you must provide as many regex as category groups.') return None for regex in category_regex_args: if len(regex) < 1: if authorized_empty_regex > 0: category_regex.append(None) authorized_empty_regex -= 1 else: printError('Error: you cannot have more than one empty regex to automatically assign groups to users.') return None else: category_regex.append(re.compile(regex)) return category_regex
<SYSTEM_TASK:> Generic multithreading helper <END_TASK> <USER_TASK:> Description: def thread_work(targets, function, params = {}, num_threads = 0): """ Generic multithreading helper :param targets: :param function: :param params: :param num_threads: :return: """
q = Queue(maxsize=0) if not num_threads: num_threads = len(targets) for i in range(num_threads): worker = Thread(target=function, args=(q, params)) worker.setDaemon(True) worker.start() for target in targets: q.put(target) q.join()
<SYSTEM_TASK:> Helper for multithreading on a per-region basis <END_TASK> <USER_TASK:> Description: def threaded_per_region(q, params): """ Helper for multithreading on a per-region basis :param q: :param params: :return: """
while True: try: params['region'] = q.get() method = params['method'] method(params) except Exception as e: printException(e) finally: q.task_done()
<SYSTEM_TASK:> Remove all decorators from a function, method or class <END_TASK> <USER_TASK:> Description: def undecorated(o): """Remove all decorators from a function, method or class"""
# class decorator if type(o) is type: return o try: # python2 closure = o.func_closure except AttributeError: pass try: # python3 closure = o.__closure__ except AttributeError: return if closure: for cell in closure: # avoid infinite recursion if cell.cell_contents is o: continue # check if the contents looks like a decorator; in that case # we need to go one level down into the dream, otherwise it # might just be a different closed-over variable, which we # can ignore. # Note: this favors supporting decorators defined without # @wraps to the detriment of function/method/class closures if looks_like_a_decorator(cell.cell_contents): undecd = undecorated(cell.cell_contents) if undecd: return undecd else: return o else: return o
<SYSTEM_TASK:> Assume role and save credentials <END_TASK> <USER_TASK:> Description: def assume_role(role_name, credentials, role_arn, role_session_name, silent = False): """ Assume role and save credentials :param role_name: :param credentials: :param role_arn: :param role_session_name: :param silent: :return: """
external_id = credentials.pop('ExternalId') if 'ExternalId' in credentials else None # Connect to STS sts_client = connect_service('sts', credentials, silent = silent) # Set required arguments for assume role call sts_args = { 'RoleArn': role_arn, 'RoleSessionName': role_session_name } # MFA used ? if 'mfa_serial' in credentials and 'mfa_code' in credentials: sts_args['TokenCode'] = credentials['mfa_code'] sts_args['SerialNumber'] = credentials['mfa_serial'] # External ID used ? if external_id: sts_args['ExternalId'] = external_id # Assume the role sts_response = sts_client.assume_role(**sts_args) credentials = sts_response['Credentials'] cached_credentials_filename = get_cached_credentials_filename(role_name, role_arn) #with open(cached_credentials_filename, 'wt+') as f: # write_data_to_file(f, sts_response, True, False) cached_credentials_path = os.path.dirname(cached_credentials_filename) if not os.path.isdir(cached_credentials_path): os.makedirs(cached_credentials_path) save_blob_as_json(cached_credentials_filename, sts_response, True, False) # blob, force_write, debug): return credentials
<SYSTEM_TASK:> Generate a password using random characters from uppercase, lowercase, digits, and symbols <END_TASK> <USER_TASK:> Description: def generate_password(length=16): """ Generate a password using random characters from uppercase, lowercase, digits, and symbols :param length: Length of the password to be generated :return: The random password """
chars = string.ascii_letters + string.digits + '!@#$%^&*()_+-=[]{};:,<.>?|' modulus = len(chars) pchars = os.urandom(16) if type(pchars) == str: return ''.join(chars[i % modulus] for i in map(ord, pchars)) else: return ''.join(chars[i % modulus] for i in pchars)
<SYSTEM_TASK:> Read credentials from AWS config file <END_TASK> <USER_TASK:> Description: def read_creds_from_aws_credentials_file(profile_name, credentials_file = aws_credentials_file): """ Read credentials from AWS config file :param profile_name: :param credentials_file: :return: """
credentials = init_creds() profile_found = False try: # Make sure the ~.aws folder exists if not os.path.exists(aws_config_dir): os.makedirs(aws_config_dir) with open(credentials_file, 'rt') as cf: for line in cf: profile_line = re_profile_name.match(line) if profile_line: if profile_line.groups()[0] == profile_name: profile_found = True else: profile_found = False if profile_found: if re_access_key.match(line): credentials['AccessKeyId'] = line.split("=")[1].strip() elif re_secret_key.match(line): credentials['SecretAccessKey'] = line.split("=")[1].strip() elif re_mfa_serial.match(line): credentials['SerialNumber'] = (line.split('=')[1]).strip() elif re_session_token.match(line) or re_security_token.match(line): credentials['SessionToken'] = ('='.join(x for x in line.split('=')[1:])).strip() elif re_expiration.match(line): credentials['Expiration'] = ('='.join(x for x in line.split('=')[1:])).strip() except Exception as e: # Silent if error is due to no ~/.aws/credentials file if not hasattr(e, 'errno') or e.errno != 2: printException(e) return credentials
<SYSTEM_TASK:> Read credentials from a CSV file <END_TASK> <USER_TASK:> Description: def read_creds_from_csv(filename): """ Read credentials from a CSV file :param filename: :return: """
key_id = None secret = None mfa_serial = None secret_next = False with open(filename, 'rt') as csvfile: for i, line in enumerate(csvfile): values = line.split(',') for v in values: if v.startswith('AKIA'): key_id = v.strip() secret_next = True elif secret_next: secret = v.strip() secret_next = False elif re_mfa_serial_format.match(v): mfa_serial = v.strip() return key_id, secret, mfa_serial
<SYSTEM_TASK:> Read credentials from environment variables <END_TASK> <USER_TASK:> Description: def read_creds_from_environment_variables(): """ Read credentials from environment variables :return: """
creds = init_creds() # Check environment variables if 'AWS_ACCESS_KEY_ID' in os.environ and 'AWS_SECRET_ACCESS_KEY' in os.environ: creds['AccessKeyId'] = os.environ['AWS_ACCESS_KEY_ID'] creds['SecretAccessKey'] = os.environ['AWS_SECRET_ACCESS_KEY'] if 'AWS_SESSION_TOKEN' in os.environ: creds['SessionToken'] = os.environ['AWS_SESSION_TOKEN'] return creds
<SYSTEM_TASK:> Write credentials to AWS config file <END_TASK> <USER_TASK:> Description: def write_creds_to_aws_credentials_file(profile_name, credentials, credentials_file = aws_credentials_file): """ Write credentials to AWS config file :param profile_name: :param credentials: :param credentials_file: :return: """
profile_found = False profile_ever_found = False session_token_written = False security_token_written = False mfa_serial_written = False expiration_written = False # Create the .aws folder if needed if not os.path.isdir(aws_config_dir): os.mkdir(aws_config_dir) # Create an empty file if target does not exist if not os.path.isfile(credentials_file): open(credentials_file, 'a').close() # Open and parse/edit file for line in fileinput.input(credentials_file, inplace=True): profile_line = re_profile_name.match(line) if profile_line: if profile_line.groups()[0] == profile_name: profile_found = True profile_ever_found = True else: profile_found = False print(line.rstrip()) elif profile_found: if re_access_key.match(line) and 'AccessKeyId' in credentials and credentials['AccessKeyId']: print('aws_access_key_id = %s' % credentials['AccessKeyId']) elif re_secret_key.match(line) and 'SecretAccessKey' in credentials and credentials['SecretAccessKey']: print('aws_secret_access_key = %s' % credentials['SecretAccessKey']) elif re_mfa_serial.match(line) and 'SerialNumber' in credentials and credentials['SerialNumber']: print('aws_mfa_serial = %s' % credentials['SerialNumber']) mfa_serial_written = True elif re_session_token.match(line) and 'SessionToken' in credentials and credentials['SessionToken']: print('aws_session_token = %s' % credentials['SessionToken']) session_token_written = True elif re_security_token.match(line) and 'SessionToken' in credentials and credentials['SessionToken']: print('aws_security_token = %s' % credentials['SessionToken']) security_token_written = True elif re_expiration.match(line) and 'Expiration' in credentials and credentials['Expiration']: print('expiration = %s' % credentials['Expiration']) expiration_written = True else: print(line.rstrip()) else: print(line.rstrip()) # Complete the profile if needed if profile_found: with open(credentials_file, 'a') as f: complete_profile(f, credentials, session_token_written, mfa_serial_written) # Add new profile if not found if not profile_ever_found: with open(credentials_file, 'a') as f: f.write('[%s]\n' % profile_name) f.write('aws_access_key_id = %s\n' % credentials['AccessKeyId']) f.write('aws_secret_access_key = %s\n' % credentials['SecretAccessKey']) complete_profile(f, credentials, session_token_written, mfa_serial_written)
<SYSTEM_TASK:> Append session token and mfa serial if needed <END_TASK> <USER_TASK:> Description: def complete_profile(f, credentials, session_token_written, mfa_serial_written): """ Append session token and mfa serial if needed :param f: :param credentials: :param session_token_written: :param mfa_serial_written: :return: """
session_token = credentials['SessionToken'] if 'SessionToken' in credentials else None mfa_serial = credentials['SerialNumber'] if 'SerialNumber' in credentials else None if session_token and not session_token_written: f.write('aws_session_token = %s\n' % session_token) if mfa_serial and not mfa_serial_written: f.write('aws_mfa_serial = %s\n' % mfa_serial)
<SYSTEM_TASK:> Verify which AWS accounts have been configured for CloudFormation stack set by attempting to assume the stack set execution role <END_TASK> <USER_TASK:> Description: def get_stackset_ready_accounts(credentials, account_ids, quiet=True): """ Verify which AWS accounts have been configured for CloudFormation stack set by attempting to assume the stack set execution role :param credentials: AWS credentials to use when calling sts:assumerole :param org_account_ids: List of AWS accounts to check for Stackset configuration :return: List of account IDs in which assuming the stackset execution role worked """
api_client = connect_service('sts', credentials, silent=True) configured_account_ids = [] for account_id in account_ids: try: role_arn = 'arn:aws:iam::%s:role/AWSCloudFormationStackSetExecutionRole' % account_id api_client.assume_role(RoleArn=role_arn, RoleSessionName='opinel-get_stackset_ready_accounts') configured_account_ids.append(account_id) except Exception as e: pass if len(configured_account_ids) != len(account_ids) and not quiet: printInfo('Only %d of these accounts have the necessary stack set execution role:' % len(configured_account_ids)) printDebug(str(configured_account_ids)) return configured_account_ids
<SYSTEM_TASK:> Get the feed content using 'requests' <END_TASK> <USER_TASK:> Description: def fetch(self, url): """ Get the feed content using 'requests' """
try: r = requests.get(url, timeout=self.timeout) except requests.exceptions.Timeout: if not self.safe: raise else: return None # Raise 404/500 error if any if r and not self.safe: r.raise_for_status() return r.text
<SYSTEM_TASK:> Parse the fetched feed content <END_TASK> <USER_TASK:> Description: def parse(self, content): """ Parse the fetched feed content Feedparser returned dict contain a 'bozo' key which can be '1' if the feed is malformed. Return None if the feed is malformed and 'bozo_accept' is 'False', else return the feed content dict. If the feed is malformed but 'bozo_accept' is 'True', the feed content dict will contain the parsing error exception informations in 'bozo_exception'. """
if content is None: return None feed = feedparser.parse(content) # When feed is malformed if feed['bozo']: # keep track of the parsing error exception but as string # infos, not an exception object exception_content = { "exception": str(type(feed['bozo_exception'])), "content": str(feed['bozo_exception'].getException()), "line": feed['bozo_exception'].getLineNumber(), "message": feed['bozo_exception'].getMessage(), } # Overwrite the bozo content from feedparser feed['bozo_exception'] = exception_content # bozo feeds are not accepted if not self.bozo_accept: feed = None return feed
<SYSTEM_TASK:> Fetch the feed if no cache exist or if cache is stale <END_TASK> <USER_TASK:> Description: def get(self, url, expiration): """ Fetch the feed if no cache exist or if cache is stale """
# Hash url to have a shorter key and add it expiration time to avoid clash for # other url usage with different expiration cache_key = self.cache_key.format(**{ 'id': self._hash_url(url), 'expire': str(expiration) }) # Get feed from cache if any feed = cache.get(cache_key) # Else fetch it if feed is None: #print "No feed cache, have to fetch it" feed = self.fetch(url) cache.set(cache_key, feed, expiration) return self.parse(feed)
<SYSTEM_TASK:> Build template context with formatted feed content <END_TASK> <USER_TASK:> Description: def get_context(self, url, expiration): """ Build template context with formatted feed content """
self._feed = self.get(url, expiration) return { self.feed_context_name: self.format_feed_content(self._feed), }
<SYSTEM_TASK:> Read either the hash or block table of a MPQ archive. <END_TASK> <USER_TASK:> Description: def read_table(self, table_type): """Read either the hash or block table of a MPQ archive."""
if table_type == 'hash': entry_class = MPQHashTableEntry elif table_type == 'block': entry_class = MPQBlockTableEntry else: raise ValueError("Invalid table type.") table_offset = self.header['%s_table_offset' % table_type] table_entries = self.header['%s_table_entries' % table_type] key = self._hash('(%s table)' % table_type, 'TABLE') self.file.seek(table_offset + self.header['offset']) data = self.file.read(table_entries * 16) data = self._decrypt(data, key) def unpack_entry(position): entry_data = data[position*16:position*16+16] return entry_class._make( struct.unpack(entry_class.struct_format, entry_data)) return [unpack_entry(i) for i in range(table_entries)]
<SYSTEM_TASK:> Get the hash table entry corresponding to a given filename. <END_TASK> <USER_TASK:> Description: def get_hash_table_entry(self, filename): """Get the hash table entry corresponding to a given filename."""
hash_a = self._hash(filename, 'HASH_A') hash_b = self._hash(filename, 'HASH_B') for entry in self.hash_table: if (entry.hash_a == hash_a and entry.hash_b == hash_b): return entry
<SYSTEM_TASK:> Extract all files and write them to disk. <END_TASK> <USER_TASK:> Description: def extract_to_disk(self): """Extract all files and write them to disk."""
archive_name, extension = os.path.splitext(os.path.basename(self.file.name)) if not os.path.isdir(os.path.join(os.getcwd(), archive_name)): os.mkdir(archive_name) os.chdir(archive_name) for filename, data in self.extract().items(): f = open(filename, 'wb') f.write(data or b'') f.close()
<SYSTEM_TASK:> Decrypt hash or block table or a sector. <END_TASK> <USER_TASK:> Description: def _decrypt(self, data, key): """Decrypt hash or block table or a sector."""
seed1 = key seed2 = 0xEEEEEEEE result = BytesIO() for i in range(len(data) // 4): seed2 += self.encryption_table[0x400 + (seed1 & 0xFF)] seed2 &= 0xFFFFFFFF value = struct.unpack("<I", data[i*4:i*4+4])[0] value = (value ^ (seed1 + seed2)) & 0xFFFFFFFF seed1 = ((~seed1 << 0x15) + 0x11111111) | (seed1 >> 0x0B) seed1 &= 0xFFFFFFFF seed2 = value + seed2 + (seed2 << 5) + 3 & 0xFFFFFFFF result.write(struct.pack("<I", value)) return result.getvalue()
<SYSTEM_TASK:> Return a cache key from a given set of request parameters. <END_TASK> <USER_TASK:> Description: def key_for_request(self, method, url, **kwargs): """ Return a cache key from a given set of request parameters. Default behavior is to return a complete URL for all GET requests, and None otherwise. Can be overriden if caching of non-get requests is desired. """
if method != 'get': return None return requests.Request(url=url, params=kwargs.get('params', {})).prepare().url
<SYSTEM_TASK:> Override, wraps Session.request in caching. <END_TASK> <USER_TASK:> Description: def request(self, method, url, **kwargs): """ Override, wraps Session.request in caching. Cache is only used if key_for_request returns a valid key and should_cache_response was true as well. """
# short circuit if cache isn't configured if not self.cache_storage: resp = super(CachingSession, self).request(method, url, **kwargs) resp.fromcache = False return resp resp = None method = method.lower() request_key = self.key_for_request(method, url, **kwargs) if request_key and not self.cache_write_only: resp = self.cache_storage.get(request_key) if resp: resp.fromcache = True else: resp = super(CachingSession, self).request(method, url, **kwargs) # save to cache if request and response meet criteria if request_key and self.should_cache_response(resp): self.cache_storage.set(request_key, resp) resp.fromcache = False return resp
<SYSTEM_TASK:> Creates a callable out of the destination. If it's already callable, <END_TASK> <USER_TASK:> Description: def _make_destination_callable(dest): """Creates a callable out of the destination. If it's already callable, the destination is returned. Instead, if the object is a string or a writable object, it's wrapped in a closure to be used later. """
if callable(dest): return dest elif hasattr(dest, 'write') or isinstance(dest, string_types): return _use_filehandle_to_save(dest) else: raise TypeError("Destination must be a string, writable or callable object.")
<SYSTEM_TASK:> Runs all attached validators on the provided filehandle. <END_TASK> <USER_TASK:> Description: def _validate(self, filehandle, metadata, catch_all_errors=False): """Runs all attached validators on the provided filehandle. In the base implmentation of Transfer, the result of `_validate` isn't checked. Rather validators are expected to raise UploadError to report failure. `_validate` can optionally catch all UploadErrors that occur or bail out and the first one by toggling the `catch_all_errors` flag. If catch_all_errors is Truthy then a single UploadError is raised consisting of all UploadErrors raised. """
errors = [] DEFAULT_ERROR_MSG = '{0!r}({1!r}, {2!r}) returned False' for validator in self._validators: try: if not validator(filehandle, metadata): msg = DEFAULT_ERROR_MSG.format(validator, filehandle, metadata) raise UploadError(msg) except UploadError as e: if catch_all_errors: errors.append(e.args[0]) else: raise if errors: raise UploadError(errors)
<SYSTEM_TASK:> Saves the filehandle to the provided destination or the attached <END_TASK> <USER_TASK:> Description: def save(self, filehandle, destination=None, metadata=None, validate=True, catch_all_errors=False, *args, **kwargs): """Saves the filehandle to the provided destination or the attached default destination. Allows passing arbitrary positional and keyword arguments to the saving mechanism :param filehandle: werkzeug.FileStorage instance :param dest: String path, callable or writable destination to pass the filehandle off to. Transfer handles transforming a string or writable object into a callable automatically. :param metadata: Optional mapping of metadata to pass to validators, preprocessors, and postprocessors. :param validate boolean: Toggle validation, defaults to True :param catch_all_errors boolean: Toggles if validation should collect all UploadErrors and raise a collected error message or bail out on the first one. """
destination = destination or self._destination if destination is None: raise RuntimeError("Destination for filehandle must be provided.") elif destination is not self._destination: destination = _make_destination_callable(destination) if metadata is None: metadata = {} if validate: self._validate(filehandle, metadata) filehandle = self._preprocess(filehandle, metadata) destination(filehandle, metadata) filehandle = self._postprocess(filehandle, metadata) return filehandle
<SYSTEM_TASK:> Checks the upload directory to see if the uploaded file would exceed <END_TASK> <USER_TASK:> Description: def check_disk_usage(filehandle, meta): """Checks the upload directory to see if the uploaded file would exceed the total disk allotment. Meant as a quick and dirty example. """
# limit it at twenty kilobytes if no default is provided MAX_DISK_USAGE = current_app.config.get('MAX_DISK_USAGE', 20 * 1024) CURRENT_USAGE = really_bad_du(current_app.config['UPLOAD_PATH']) filehandle.seek(0, os.SEEK_END) if CURRENT_USAGE + filehandle.tell() > MAX_DISK_USAGE: filehandle.close() raise UploadError("Upload exceeds allotment.") filehandle.seek(0) return filehandle
<SYSTEM_TASK:> Returns a DFA that accepts any word but he ones accepted <END_TASK> <USER_TASK:> Description: def dfa_complementation(dfa: dict) -> dict: """ Returns a DFA that accepts any word but he ones accepted by the input DFA. Let A be a completed DFA, :math:`Ā = (Σ, S, s_0 , ρ, S − F )` is the DFA that runs A but accepts whatever word A does not. :param dict dfa: input DFA. :return: *(dict)* representing the complement of the input DFA. """
dfa_complement = dfa_completion(deepcopy(dfa)) dfa_complement['accepting_states'] = \ dfa_complement['states'].difference(dfa_complement['accepting_states']) return dfa_complement
<SYSTEM_TASK:> Returns a DFA accepting the intersection of the DFAs in <END_TASK> <USER_TASK:> Description: def dfa_intersection(dfa_1: dict, dfa_2: dict) -> dict: """ Returns a DFA accepting the intersection of the DFAs in input. Let :math:`A_1 = (Σ, S_1 , s_{01} , ρ_1 , F_1 )` and :math:`A_2 = (Σ, S_2 , s_{02} , ρ_2 , F_2 )` be two DFAs. Then there is a DFA :math:`A_∧` that runs simultaneously both :math:`A_1` and :math:`A_2` on the input word and accepts when both accept. It is defined as: :math:`A_∧ = (Σ, S_1 × S_2 , (s_{01} , s_{02} ), ρ, F_1 × F_2 )` where :math:`ρ((s_1 , s_2 ), a) = (s_{X1} , s_{X2} )` iff :math:`s_{X1} = ρ_1 (s_1 , a)` and :math:`s_{X2}= ρ_2 (s_2 , a)` Implementation proposed guarantees the resulting DFA has only **reachable** states. :param dict dfa_1: first input DFA; :param dict dfa_2: second input DFA. :return: *(dict)* representing the intersected DFA. """
intersection = { 'alphabet': dfa_1['alphabet'].intersection(dfa_2['alphabet']), 'states': {(dfa_1['initial_state'], dfa_2['initial_state'])}, 'initial_state': (dfa_1['initial_state'], dfa_2['initial_state']), 'accepting_states': set(), 'transitions': dict() } boundary = set() boundary.add(intersection['initial_state']) while boundary: (state_dfa_1, state_dfa_2) = boundary.pop() if state_dfa_1 in dfa_1['accepting_states'] \ and state_dfa_2 in dfa_2['accepting_states']: intersection['accepting_states'].add((state_dfa_1, state_dfa_2)) for a in intersection['alphabet']: if (state_dfa_1, a) in dfa_1['transitions'] \ and (state_dfa_2, a) in dfa_2['transitions']: next_state_1 = dfa_1['transitions'][state_dfa_1, a] next_state_2 = dfa_2['transitions'][state_dfa_2, a] if (next_state_1, next_state_2) not in intersection['states']: intersection['states'].add((next_state_1, next_state_2)) boundary.add((next_state_1, next_state_2)) intersection['transitions'][(state_dfa_1, state_dfa_2), a] = \ (next_state_1, next_state_2) return intersection
<SYSTEM_TASK:> Returns a DFA accepting the union of the input DFAs. <END_TASK> <USER_TASK:> Description: def dfa_union(dfa_1: dict, dfa_2: dict) -> dict: """ Returns a DFA accepting the union of the input DFAs. Let :math:`A_1 = (Σ, S_1 , s_{01} , ρ_1 , F_1 )` and :math:`A_2 = (Σ, S_2 , s_{02} , ρ_2 , F_2 )` be two completed DFAs. Then there is a DFA :math:`A_∨` that runs simultaneously both :math:`A_1` and :math:`A_2` on the input word and accepts when one of them accepts. It is defined as: :math:`A_∨ = (Σ, S_1 × S_2 , (s_{01} , s_{02} ), ρ, (F_1 × S_2 ) ∪ (S_1 × F_2 ))` where :math:`ρ((s_1 , s_2 ), a) = (s_{X1} , s_{X2} )` iff :math:`s_{X1} = ρ_1 (s_1 , a)` and :math:`s_{X2} = ρ(s_2 , a)` Proposed implementation guarantees resulting DFA has only **reachable** states. :param dict dfa_1: first input DFA; :param dict dfa_2: second input DFA. :return: *(dict)* representing the united DFA. """
dfa_1 = deepcopy(dfa_1) dfa_2 = deepcopy(dfa_2) dfa_1['alphabet'] = dfa_2['alphabet'] = dfa_1['alphabet'].union( dfa_2['alphabet']) # to complete the DFAs over all possible transition dfa_1 = dfa_completion(dfa_1) dfa_2 = dfa_completion(dfa_2) union = { 'alphabet': dfa_1['alphabet'].copy(), 'states': {(dfa_1['initial_state'], dfa_2['initial_state'])}, 'initial_state': (dfa_1['initial_state'], dfa_2['initial_state']), 'accepting_states': set(), 'transitions': dict() } boundary = set() boundary.add(union['initial_state']) while boundary: (state_dfa_1, state_dfa_2) = boundary.pop() if state_dfa_1 in dfa_1['accepting_states'] \ or state_dfa_2 in dfa_2['accepting_states']: union['accepting_states'].add((state_dfa_1, state_dfa_2)) for a in union['alphabet']: # as DFAs are completed they surely have the transition next_state_1 = dfa_1['transitions'][state_dfa_1, a] next_state_2 = dfa_2['transitions'][state_dfa_2, a] if (next_state_1, next_state_2) not in union['states']: union['states'].add((next_state_1, next_state_2)) boundary.add((next_state_1, next_state_2)) union['transitions'][(state_dfa_1, state_dfa_2), a] = \ (next_state_1, next_state_2) return union
<SYSTEM_TASK:> Returns the minimization of the DFA in input through a <END_TASK> <USER_TASK:> Description: def dfa_minimization(dfa: dict) -> dict: """ Returns the minimization of the DFA in input through a greatest fix-point method. Given a completed DFA :math:`A = (Σ, S, s_0 , ρ, F )` there exists a single minimal DFA :math:`A_m` which is equivalent to A, i.e. reads the same language :math:`L(A) = L(A_m)` and with a minimal number of states. To construct such a DFA we exploit bisimulation as a suitable equivalence relation between states. A bisimulation relation :math:`E ∈ S × S` is a relation between states that satisfies the following condition: if :math:`(s, t) ∈ E` then: • s ∈ F iff t ∈ F; • For all :math:`(s_X,a)` such that :math:`ρ(s, a) = s_X`, there exists :math:`t_X` such that :math:`ρ(t, a) = t_X` and :math:`(s_X , t_X ) ∈ E`; • For all :math:`(t_X,a)` such that :math:`ρ(t, a) = t_X` , there exists :math:`s_X` such that :math:`ρ(s, a) = s_X` and :math:`(s_X , t_X ) ∈ E`. :param dict dfa: input DFA. :return: *(dict)* representing the minimized DFA. """
dfa = dfa_completion(deepcopy(dfa)) ################################################################ ### Greatest-fixpoint z_current = set() z_next = set() # First bisimulation condition check (can be done just once) # s ∈ F iff t ∈ F for state_s in dfa['states']: for state_t in dfa['states']: if ( state_s in dfa['accepting_states'] and state_t in dfa['accepting_states'] ) or ( state_s not in dfa['accepting_states'] and state_t not in dfa['accepting_states'] ): z_next.add((state_s, state_t)) # Second and third condition of bisimularity check while z_current != z_next: z_current = z_next z_next = z_current.copy() for (state_1, state_2) in z_current: # for all s0,a s.t. ρ(s, a) = s_0 , there exists t 0 # s.t. ρ(t, a) = t 0 and (s_0 , t 0 ) ∈ Z i ; for a in dfa['alphabet']: if (state_1, a) in dfa['transitions'] \ and (state_2, a) in dfa['transitions']: if ( dfa['transitions'][state_1, a], dfa['transitions'][state_2, a] ) not in z_current: z_next.remove((state_1, state_2)) break else: # action a not possible in state element[0] # or element[1] z_next.remove((state_1, state_2)) break ################################################################ ### Equivalence Sets equivalence = dict() for (state_1, state_2) in z_current: equivalence.setdefault(state_1, set()).add(state_2) ################################################################ ### Minimal DFA construction dfa_min = { 'alphabet': dfa['alphabet'].copy(), 'states': set(), 'initial_state': dfa['initial_state'], 'accepting_states': set(), 'transitions': dfa['transitions'].copy() } # select one element for each equivalence set for equivalence_set in equivalence.values(): if dfa_min['states'].isdisjoint(equivalence_set): e = equivalence_set.pop() dfa_min['states'].add(e) # TODO highlight this instruction equivalence_set.add(e) dfa_min['accepting_states'] = \ dfa_min['states'].intersection(dfa['accepting_states']) for t in dfa['transitions']: if t[0] not in dfa_min['states']: dfa_min['transitions'].pop(t) elif dfa['transitions'][t] not in dfa_min['states']: dfa_min['transitions'][t] = \ equivalence[dfa['transitions'][t]]. \ intersection(dfa_min['states']).pop() return dfa_min
<SYSTEM_TASK:> Side effects on input! Removes unreachable states from a <END_TASK> <USER_TASK:> Description: def dfa_reachable(dfa: dict) -> dict: """ Side effects on input! Removes unreachable states from a DFA and returns the pruned DFA. It is possible to remove from a DFA A all unreachable states from the initial state without altering the language. The reachable DFA :math:`A_R` corresponding to A is defined as: :math:`A_R = (Σ, S_R , s_0 , ρ|S_R , F ∩ S_R )` where • :math:`S_R` set of reachable state from the initial one • :math:`ρ|S_R` is the restriction on :math:`S_R × Σ` of ρ. :param dict dfa: input DFA. :return: *(dict)* representing the pruned DFA. """
reachable_states = set() # set of reachable states from root boundary = set() reachable_states.add(dfa['initial_state']) boundary.add(dfa['initial_state']) while boundary: s = boundary.pop() for a in dfa['alphabet']: if (s, a) in dfa['transitions']: if dfa['transitions'][s, a] not in reachable_states: reachable_states.add(dfa['transitions'][s, a]) boundary.add(dfa['transitions'][s, a]) dfa['states'] = reachable_states dfa['accepting_states'] = \ dfa['accepting_states'].intersection(dfa['states']) transitions = dfa[ 'transitions'].copy() # TODO why copy? because for doesn't cycle # mutable set.... for t in transitions: if t[0] not in dfa['states']: dfa['transitions'].pop(t) elif dfa['transitions'][t] not in dfa['states']: dfa['transitions'].pop(t) return dfa
<SYSTEM_TASK:> Side effects on input! Removes from the DFA all states that <END_TASK> <USER_TASK:> Description: def dfa_co_reachable(dfa: dict) -> dict: """ Side effects on input! Removes from the DFA all states that do not reach a final state and returns the pruned DFA. It is possible to remove from a DFA A all states that do not reach a final state without altering the language. The co-reachable dfa :math:`A_F` corresponding to A is defined as: :math:`A_F = (Σ, S_F , s_0 , ρ|S_F , F )` where • :math:`S_F` is the set of states that reach a final state • :math:`ρ|S_F` is the restriction on :math:`S_F × Σ` of ρ. :param dict dfa: input DFA. :return: *(dict)* representing the pruned DFA. """
co_reachable_states = dfa['accepting_states'].copy() boundary = co_reachable_states.copy() # inverse transition function inverse_transitions = dict() for key, value in dfa['transitions'].items(): inverse_transitions.setdefault(value, set()).add(key) while boundary: s = boundary.pop() if s in inverse_transitions: for (state, action) in inverse_transitions[s]: if state not in co_reachable_states: boundary.add(state) co_reachable_states.add(state) dfa['states'] = co_reachable_states # If not s_0 ∈ S_F the resulting dfa is empty if dfa['initial_state'] not in dfa['states']: dfa = { 'alphabet': set(), 'states': set(), 'initial_state': None, 'accepting_states': set(), 'transitions': dict() } return dfa transitions = dfa['transitions'].copy() for t in transitions: if t[0] not in dfa['states']: dfa['transitions'].pop(t) elif dfa['transitions'][t] not in dfa['states']: dfa['transitions'].pop(t) return dfa
<SYSTEM_TASK:> Side effects on input! Returns the DFA in input trimmed, <END_TASK> <USER_TASK:> Description: def dfa_trimming(dfa: dict) -> dict: """ Side effects on input! Returns the DFA in input trimmed, so both reachable and co-reachable. Given a DFA A, the corresponding trimmed DFA contains only those states that are reachable from the initial state and that lead to a final state. The trimmed dfa :math:`A_{RF}` corresponding to A is defined as :math:`A_{RF} = (Σ, S_R ∩ S_F , s_0 , ρ|S_R∩S_F , F ∩ S_R )` where • :math:`S_R` set of reachable states from the initial state • :math:`S_F` set of states that reaches a final state • :math:`ρ|S_R∩S_F` is the restriction on :math:`(S_R ∩ S_F ) × Σ` of ρ. :param dict dfa: input DFA. :return: *(dict)* representing the trimmed input DFA. """
# Reachable DFA dfa = dfa_reachable(dfa) # Co-reachable DFA dfa = dfa_co_reachable(dfa) # trimmed DFA return dfa
<SYSTEM_TASK:> Compute the Luhn checksum for the provided string of digits. Note this <END_TASK> <USER_TASK:> Description: def checksum(string): """ Compute the Luhn checksum for the provided string of digits. Note this assumes the check digit is in place. """
digits = list(map(int, string)) odd_sum = sum(digits[-1::-2]) even_sum = sum([sum(divmod(2 * d, 10)) for d in digits[-2::-2]]) return (odd_sum + even_sum) % 10
<SYSTEM_TASK:> Determine if an IP address should be considered blocked. <END_TASK> <USER_TASK:> Description: def is_blocked(self, ip): """Determine if an IP address should be considered blocked."""
blocked = True if ip in self.allowed_admin_ips: blocked = False for allowed_range in self.allowed_admin_ip_ranges: if ipaddress.ip_address(ip) in ipaddress.ip_network(allowed_range): blocked = False return blocked
<SYSTEM_TASK:> Start a server which will watch .md and .rst files for changes. <END_TASK> <USER_TASK:> Description: def serve(args): """Start a server which will watch .md and .rst files for changes. If a md file changes, the Home Documentation is rebuilt. If a .rst file changes, the updated sphinx project is rebuilt Args: args (ArgumentParser): flags from the CLI """
# Sever's parameters port = args.serve_port or PORT host = "0.0.0.0" # Current working directory dir_path = Path().absolute() web_dir = dir_path / "site" # Update routes utils.set_routes() # Offline mode if args.offline: os.environ["MKINX_OFFLINE"] = "true" _ = subprocess.check_output("mkdocs build > /dev/null", shell=True) utils.make_offline() class MkinxHTTPHandler(SimpleHTTPRequestHandler): """Class routing urls (paths) to projects (resources) """ def translate_path(self, path): # default root -> cwd location = str(web_dir) route = location if len(path) != 0 and path != "/": for key, loc in utils.get_routes(): if path.startswith(key): location = loc path = path[len(key) :] break if location[-1] == "/" or not path or path[0] == "/": route = location + path else: route = location + "/" + path return route.split("?")[0] # Serve as deamon thread success = False count = 0 print("Waiting for server port...") try: while not success: try: httpd = socketserver.TCPServer((host, port), MkinxHTTPHandler) success = True except OSError: count += 1 finally: if not success and count > 20: s = "port {} seems occupied. Try with {} ? (y/n)" if "y" in input(s.format(port, port + 1)): port += 1 count = 0 else: print("You can specify a custom port with mkinx serve -s") return time.sleep(0.5) except KeyboardInterrupt: print("Aborting.") return httpd.allow_reuse_address = True print("\nServing at http://{}:{}\n".format(host, port)) thread = threading.Thread(target=httpd.serve_forever) thread.daemon = True thread.start() # Watch for changes event_handler = utils.MkinxFileHandler( patterns=["*.rst", "*.md", "*.yml", "*.yaml"] ) observer = Observer() observer.schedule(event_handler, path=str(dir_path), recursive=True) observer.start() try: while True: time.sleep(1) except KeyboardInterrupt: observer.stop() httpd.server_close() observer.join()
<SYSTEM_TASK:> Train and validate the LR on a train and test dataset <END_TASK> <USER_TASK:> Description: def train(self, X_train, Y_train, X_test, Y_test): """Train and validate the LR on a train and test dataset Args: X_train (np.array): Training data Y_train (np.array): Training labels X_test (np.array): Test data Y_test (np.array): Test labels """
while True: print(1) time.sleep(1) if random.randint(0, 9) >= 5: break
<SYSTEM_TASK:> Download a URL. <END_TASK> <USER_TASK:> Description: def download(url, path, kind='file', progressbar=True, replace=False, timeout=10., verbose=True): """Download a URL. This will download a file and store it in a '~/data/` folder, creating directories if need be. It will also work for zip files, in which case it will unzip all of the files to the desired location. Parameters ---------- url : string The url of the file to download. This may be a dropbox or google drive "share link", or a regular URL. If it is a share link, then it should point to a single file and not a folder. To download folders, zip them first. path : string The path where the downloaded file will be stored. If ``zipfile`` is True, then this must be a folder into which files will be zipped. kind : one of ['file', 'zip', 'tar', 'tar.gz'] The kind of file to be downloaded. If not 'file', then the file contents will be unpackaged according to the kind specified. Package contents will be placed in ``root_destination/<name>``. progressbar : bool Whether to display a progress bar during file download. replace : bool If True and the URL points to a single file, overwrite the old file if possible. timeout : float The URL open timeout. verbose : bool Whether to print download status to the screen. Returns ------- out_path : string A path to the downloaded file (or folder, in the case of a zip file). """
if kind not in ALLOWED_KINDS: raise ValueError('`kind` must be one of {}, got {}'.format( ALLOWED_KINDS, kind)) # Make sure we have directories to dump files path = op.expanduser(path) if len(path) == 0: raise ValueError('You must specify a path. For current directory use .') download_url = _convert_url_to_downloadable(url) if replace is False and op.exists(path): msg = ('Replace is False and data exists, so doing nothing. ' 'Use replace==True to re-download the data.') elif kind in ZIP_KINDS: # Create new folder for data if we need it if not op.isdir(path): if verbose: tqdm.write('Creating data folder...') os.makedirs(path) # Download the file to a temporary folder to unzip path_temp = _TempDir() path_temp_file = op.join(path_temp, "tmp.{}".format(kind)) _fetch_file(download_url, path_temp_file, timeout=timeout, verbose=verbose) # Unzip the file to the out path if verbose: tqdm.write('Extracting {} file...'.format(kind)) if kind == 'zip': zipper = ZipFile elif kind == 'tar': zipper = tarfile.open elif kind == 'tar.gz': zipper = partial(tarfile.open, mode='r:gz') with zipper(path_temp_file) as myobj: myobj.extractall(path) msg = 'Successfully downloaded / unzipped to {}'.format(path) else: if not op.isdir(op.dirname(path)): os.makedirs(op.dirname(path)) _fetch_file(download_url, path, timeout=timeout, verbose=verbose) msg = 'Successfully downloaded file to {}'.format(path) if verbose: tqdm.write(msg) return path
<SYSTEM_TASK:> Convert a url to the proper style depending on its website. <END_TASK> <USER_TASK:> Description: def _convert_url_to_downloadable(url): """Convert a url to the proper style depending on its website."""
if 'drive.google.com' in url: # For future support of google drive file_id = url.split('d/')[1].split('/')[0] base_url = 'https://drive.google.com/uc?export=download&id=' out = '{}{}'.format(base_url, file_id) elif 'dropbox.com' in url: if url.endswith('.png'): out = url + '?dl=1' else: out = url.replace('dl=0', 'dl=1') elif 'github.com' in url: out = url.replace('github.com', 'raw.githubusercontent.com') out = out.replace('blob/', '') else: out = url return out
<SYSTEM_TASK:> Calculate the md5sum for a file. <END_TASK> <USER_TASK:> Description: def md5sum(fname, block_size=1048576): # 2 ** 20 """Calculate the md5sum for a file. Parameters ---------- fname : str Filename. block_size : int Block size to use when reading. Returns ------- hash_ : str The hexadecimal digest of the hash. """
md5 = hashlib.md5() with open(fname, 'rb') as fid: while True: data = fid.read(block_size) if not data: break md5.update(data) return md5.hexdigest()
<SYSTEM_TASK:> Write a chunk to file and update the progress bar. <END_TASK> <USER_TASK:> Description: def _chunk_write(chunk, local_file, progress): """Write a chunk to file and update the progress bar."""
local_file.write(chunk) if progress is not None: progress.update(len(chunk))
<SYSTEM_TASK:> Turn number of bytes into human-readable str. <END_TASK> <USER_TASK:> Description: def sizeof_fmt(num): """Turn number of bytes into human-readable str. Parameters ---------- num : int The number of bytes. Returns ------- size : str The size in human-readable format. """
units = ['bytes', 'kB', 'MB', 'GB', 'TB', 'PB'] decimals = [0, 0, 1, 2, 2, 2] if num > 1: exponent = min(int(log(num, 1024)), len(units) - 1) quotient = float(num) / 1024 ** exponent unit = units[exponent] num_decimals = decimals[exponent] format_string = '{0:.%sf} {1}' % (num_decimals) return format_string.format(quotient, unit) if num == 0: return '0 bytes' if num == 1: return '1 byte'
<SYSTEM_TASK:> Decorator to automatically reexecute a function if the connection is <END_TASK> <USER_TASK:> Description: def retry(f, exc_classes=DEFAULT_EXC_CLASSES, logger=None, retry_log_level=logging.INFO, retry_log_message="Connection broken in '{f}' (error: '{e}'); " "retrying with new connection.", max_failures=None, interval=0, max_failure_log_level=logging.ERROR, max_failure_log_message="Max retries reached for '{f}'. Aborting."): """ Decorator to automatically reexecute a function if the connection is broken for any reason. """
exc_classes = tuple(exc_classes) @wraps(f) def deco(*args, **kwargs): failures = 0 while True: try: return f(*args, **kwargs) except exc_classes as e: if logger is not None: logger.log(retry_log_level, retry_log_message.format(f=f.func_name, e=e)) gevent.sleep(interval) failures += 1 if max_failures is not None \ and failures > max_failures: if logger is not None: logger.log(max_failure_log_level, max_failure_log_message.format( f=f.func_name, e=e)) raise return deco
<SYSTEM_TASK:> Get a connection from the pool, to make and receive traffic. <END_TASK> <USER_TASK:> Description: def get(self): """ Get a connection from the pool, to make and receive traffic. If the connection fails for any reason (socket.error), it is dropped and a new one is scheduled. Please use @retry as a way to automatically retry whatever operation you were performing. """
self.lock.acquire() try: c = self.conn.popleft() yield c except self.exc_classes: # The current connection has failed, drop it and create a new one gevent.spawn_later(1, self._addOne) raise except: self.conn.append(c) self.lock.release() raise else: # NOTE: cannot use finally because MUST NOT reuse the connection # if it failed (socket.error) self.conn.append(c) self.lock.release()
<SYSTEM_TASK:> Return a simple transformer function for parsing EFF annotations. N.B., <END_TASK> <USER_TASK:> Description: def eff_default_transformer(fills=EFF_DEFAULT_FILLS): """ Return a simple transformer function for parsing EFF annotations. N.B., ignores all but the first effect. """
def _transformer(vals): if len(vals) == 0: return fills else: # ignore all but first effect match_eff_main = _prog_eff_main.match(vals[0]) if match_eff_main is None: logging.warning( 'match_eff_main is None: vals={}'.format(str(vals[0])) ) return fills eff = [match_eff_main.group(1)] \ + match_eff_main.group(2).split(b'|') result = tuple( fill if v == b'' else int(v) if i == 5 or i == 10 else (1 if v == b'CODING' else 0) if i == 8 else v for i, (v, fill) in enumerate(list(zip(eff, fills))[:11]) ) return result return _transformer
<SYSTEM_TASK:> Return a simple transformer function for parsing ANN annotations. N.B., <END_TASK> <USER_TASK:> Description: def ann_default_transformer(fills=ANN_DEFAULT_FILLS): """ Return a simple transformer function for parsing ANN annotations. N.B., ignores all but the first effect. """
def _transformer(vals): if len(vals) == 0: return fills else: # ignore all but first effect ann = vals[0].split(b'|') ann = ann[:11] + _ann_split2(ann[11]) + _ann_split2(ann[12]) + \ _ann_split2(ann[13]) + ann[14:] result = tuple( fill if v == b'' else int(v.partition(b'/')[0]) if i == 8 else int(v) if 11 <= i < 18 else v for i, (v, fill) in enumerate(list(zip(ann, fills))[:18]) ) return result return _transformer
<SYSTEM_TASK:> Introduces a new overloaded function and registers its first implementation. <END_TASK> <USER_TASK:> Description: def overloaded(func): """ Introduces a new overloaded function and registers its first implementation. """
fn = unwrap(func) ensure_function(fn) def dispatcher(*args, **kwargs): resolved = None if dispatcher.__complex_parameters: cache_key_pos = [] cache_key_kw = [] for argset in (0, 1) if kwargs else (0,): if argset == 0: arg_pairs = enumerate(args) complexity_mapping = dispatcher.__complex_positions else: arg_pairs = kwargs.items() complexity_mapping = dispatcher.__complex_parameters for id, arg in arg_pairs: type_ = type(arg) element_type = None if id in complexity_mapping: try: element = next(iter(arg)) except TypeError: pass except StopIteration: element_type = _empty else: complexity = complexity_mapping[id] if complexity & 8 and isinstance(arg, tuple): element_type = tuple(type(el) for el in arg) elif complexity & 4 and hasattr(arg, 'keys'): element_type = (type(element), type(arg[element])) else: element_type = type(element) if argset == 0: cache_key_pos.append((type_, element_type)) else: cache_key_kw.append((id, type_, element_type)) else: cache_key_pos = (type(arg) for arg in args) cache_key_kw = ((name, type(arg)) for (name, arg) in kwargs.items()) if kwargs else None cache_key = (tuple(cache_key_pos), tuple(sorted(cache_key_kw)) if kwargs else None) try: resolved = dispatcher.__cache[cache_key] except KeyError: resolved = find(dispatcher, args, kwargs) if resolved: dispatcher.__cache[cache_key] = resolved if resolved: before = dispatcher.__hooks['before'] after = dispatcher.__hooks['after'] if before: before(*args, **kwargs) result = resolved(*args, **kwargs) if after: after(*args, **kwargs) return result else: return error(dispatcher.__name__) dispatcher.__dict__.update( __functions = [], __hooks = {'before': None, 'after': None}, __cache = {}, __complex_positions = {}, __complex_parameters = {}, __maxlen = 0, ) for attr in ('__module__', '__name__', '__qualname__', '__doc__'): setattr(dispatcher, attr, getattr(fn, attr, None)) if is_void(fn): update_docstring(dispatcher, fn) return dispatcher else: update_docstring(dispatcher) return register(dispatcher, func)
<SYSTEM_TASK:> Registers `func` as an implementation on `dispatcher`. <END_TASK> <USER_TASK:> Description: def register(dispatcher, func, *, hook=None): """ Registers `func` as an implementation on `dispatcher`. """
wrapper = None if isinstance(func, (classmethod, staticmethod)): wrapper = type(func) func = func.__func__ ensure_function(func) if isinstance(dispatcher, (classmethod, staticmethod)): wrapper = None dp = unwrap(dispatcher) try: dp.__functions except AttributeError: raise OverloadingError("%r has not been set up as an overloaded function." % dispatcher) fn = unwrap(func) if hook: dp.__hooks[hook] = func else: signature = get_signature(fn) for i, type_ in enumerate(signature.types): if not isinstance(type_, type): raise OverloadingError( "Failed to overload function '{0}': parameter '{1}' has " "an annotation that is not a type." .format(dp.__name__, signature.parameters[i])) for fninfo in dp.__functions: dup_sig = sig_cmp(signature, fninfo.signature) if dup_sig and signature.has_varargs == fninfo.signature.has_varargs: raise OverloadingError( "Failed to overload function '{0}': non-unique signature ({1})." .format(dp.__name__, str.join(', ', (_repr(t) for t in dup_sig)))) # All clear; register the function. dp.__functions.append(FunctionInfo(func, signature)) dp.__cache.clear() dp.__maxlen = max(dp.__maxlen, len(signature.parameters)) if typing: # For each parameter position and name, compute a bitwise union of complexity # values over all registered signatures. Retain the result for parameters where # a nonzero value occurs at least twice and at least one of those values is >= 2. # Such parameters require deep type-checking during function resolution. position_values = defaultdict(lambda: 0) keyword_values = defaultdict(lambda: 0) position_counter = Counter() keyword_counter = Counter() for fninfo in dp.__functions: sig = fninfo.signature complex_positions = {i: v for i, v in enumerate(sig.complexity) if v} complex_keywords = {p: v for p, v in zip(sig.parameters, sig.complexity) if v} for i, v in complex_positions.items(): position_values[i] |= v for p, v in complex_keywords.items(): keyword_values[p] |= v position_counter.update(complex_positions.keys()) keyword_counter.update(complex_keywords.keys()) dp.__complex_positions = { i: v for i, v in position_values.items() if v >= 2 and position_counter[i] > 1} dp.__complex_parameters = { p: v for p, v in keyword_values.items() if v >= 2 and keyword_counter[p] > 1} if wrapper is None: wrapper = lambda x: x if func.__name__ == dp.__name__: # The returned function is going to be bound to the invocation name # in the calling scope, so keep returning the dispatcher. return wrapper(dispatcher) else: return wrapper(func)
<SYSTEM_TASK:> Given the arguments contained in `args` and `kwargs`, returns the best match <END_TASK> <USER_TASK:> Description: def find(dispatcher, args, kwargs): """ Given the arguments contained in `args` and `kwargs`, returns the best match from the list of implementations registered on `dispatcher`. """
matches = [] full_args = args full_kwargs = kwargs for func, sig in dispatcher.__functions: params = sig.parameters param_count = len(params) # Filter out arguments that will be consumed by catch-all parameters # or by keyword-only parameters. if sig.has_varargs: args = full_args[:param_count] else: args = full_args if sig.has_varkw or sig.has_kwonly: kwargs = {kw: full_kwargs[kw] for kw in params if kw in full_kwargs} else: kwargs = full_kwargs kwarg_set = set(kwargs) arg_count = len(args) + len(kwargs) optional_count = len(sig.defaults) required_count = param_count - optional_count # Consider candidate functions that satisfy basic conditions: # - argument count matches signature # - all keyword arguments are recognized. if not 0 <= param_count - arg_count <= optional_count: continue if kwargs and not kwarg_set <= set(params): continue if kwargs and args and kwarg_set & set(params[:len(args)]): raise TypeError("%s() got multiple values for the same parameter" % dispatcher.__name__) arg_score = arg_count # >= 0 type_score = 0 specificity_score = [None] * dispatcher.__maxlen sig_score = required_count var_score = -sig.has_varargs indexed_kwargs = ((params.index(k), v) for k, v in kwargs.items()) if kwargs else () for param_pos, value in chain(enumerate(args), indexed_kwargs): param_name = params[param_pos] if value is None and sig.defaults.get(param_name, _empty) is None: expected_type = type(None) else: expected_type = sig.types[param_pos] specificity = compare(value, expected_type) if specificity[0] == -1: break specificity_score[param_pos] = specificity type_score += 1 else: score = (arg_score, type_score, specificity_score, sig_score, var_score) matches.append(Match(score, func, sig)) if matches: if len(matches) > 1: matches.sort(key=lambda m: m.score, reverse=True) if DEBUG: assert matches[0].score > matches[1].score return matches[0].func else: return None
<SYSTEM_TASK:> Gathers information about the call signature of `func`. <END_TASK> <USER_TASK:> Description: def get_signature(func): """ Gathers information about the call signature of `func`. """
code = func.__code__ # Names of regular parameters parameters = tuple(code.co_varnames[:code.co_argcount]) # Flags has_varargs = bool(code.co_flags & inspect.CO_VARARGS) has_varkw = bool(code.co_flags & inspect.CO_VARKEYWORDS) has_kwonly = bool(code.co_kwonlyargcount) # A mapping of parameter names to default values default_values = func.__defaults__ or () defaults = dict(zip(parameters[-len(default_values):], default_values)) # Type annotations for all parameters type_hints = typing.get_type_hints(func) if typing else func.__annotations__ types = tuple(normalize_type(type_hints.get(param, AnyType)) for param in parameters) # Type annotations for required parameters required = types[:-len(defaults)] if defaults else types # Complexity complexity = tuple(map(type_complexity, types)) if typing else None return Signature(parameters, types, complexity, defaults, required, has_varargs, has_varkw, has_kwonly)
<SYSTEM_TASK:> Reduces an arbitrarily complex type declaration into something manageable. <END_TASK> <USER_TASK:> Description: def normalize_type(type_, level=0): """ Reduces an arbitrarily complex type declaration into something manageable. """
if not typing or not isinstance(type_, typing.TypingMeta) or type_ is AnyType: return type_ if isinstance(type_, typing.TypeVar): if type_.__constraints__ or type_.__bound__: return type_ else: return AnyType if issubclass(type_, typing.Union): if not type_.__union_params__: raise OverloadingError("typing.Union must be parameterized") return typing.Union[tuple(normalize_type(t, level) for t in type_.__union_params__)] if issubclass(type_, typing.Tuple): params = type_.__tuple_params__ if level > 0 or params is None: return typing.Tuple elif type_.__tuple_use_ellipsis__: return typing.Tuple[normalize_type(params[0], level + 1), ...] else: return typing.Tuple[tuple(normalize_type(t, level + 1) for t in params)] if issubclass(type_, typing.Callable): return typing.Callable if isinstance(type_, typing.GenericMeta): base = find_base_generic(type_) if base is typing.Generic: return type_ else: return GenericWrapper(type_, base, level > 0) raise OverloadingError("%r not supported yet" % type_)
<SYSTEM_TASK:> Computes an indicator for the complexity of `type_`. <END_TASK> <USER_TASK:> Description: def type_complexity(type_): """Computes an indicator for the complexity of `type_`. If the return value is 0, the supplied type is not parameterizable. Otherwise, set bits in the return value denote the following features: - bit 0: The type could be parameterized but is not. - bit 1: The type represents an iterable container with 1 constrained type parameter. - bit 2: The type represents a mapping with a constrained value type (2 parameters). - bit 3: The type represents an n-tuple (n parameters). Since these features are mutually exclusive, only a `Union` can have more than one bit set. """
if (not typing or not isinstance(type_, (typing.TypingMeta, GenericWrapperMeta)) or type_ is AnyType): return 0 if issubclass(type_, typing.Union): return reduce(operator.or_, map(type_complexity, type_.__union_params__)) if issubclass(type_, typing.Tuple): if type_.__tuple_params__ is None: return 1 elif type_.__tuple_use_ellipsis__: return 2 else: return 8 if isinstance(type_, GenericWrapperMeta): type_count = 0 for p in reversed(type_.parameters): if type_count > 0: type_count += 1 if p is AnyType: continue if not isinstance(p, typing.TypeVar) or p.__constraints__ or p.__bound__: type_count += 1 return 1 << min(type_count, 2) return 0
<SYSTEM_TASK:> Locates the underlying generic whose structure and behavior are known. <END_TASK> <USER_TASK:> Description: def find_base_generic(type_): """Locates the underlying generic whose structure and behavior are known. For example, the base generic of a type that inherits from `typing.Mapping[T, int]` is `typing.Mapping`. """
for t in type_.__mro__: if t.__module__ == typing.__name__: return first_origin(t)
<SYSTEM_TASK:> Iterates over all generics `type_` derives from, including origins. <END_TASK> <USER_TASK:> Description: def iter_generic_bases(type_): """Iterates over all generics `type_` derives from, including origins. This function is only necessary because, in typing 3.5.0, a generic doesn't get included in the list of bases when it constructs a parameterized version of itself. This was fixed in aab2c59; now it would be enough to just iterate over the MRO. """
for t in type_.__mro__: if not isinstance(t, typing.GenericMeta): continue yield t t = t.__origin__ while t: yield t t = t.__origin__
<SYSTEM_TASK:> Compares two normalized type signatures for validation purposes. <END_TASK> <USER_TASK:> Description: def sig_cmp(sig1, sig2): """ Compares two normalized type signatures for validation purposes. """
types1 = sig1.required types2 = sig2.required if len(types1) != len(types2): return False dup_pos = [] dup_kw = {} for t1, t2 in zip(types1, types2): match = type_cmp(t1, t2) if match: dup_pos.append(match) else: break else: return tuple(dup_pos) kw_range = slice(len(dup_pos), len(types1)) kwds1 = sig1.parameters[kw_range] kwds2 = sig2.parameters[kw_range] if set(kwds1) != set(kwds2): return False kwtypes1 = dict(zip(sig1.parameters, types1)) kwtypes2 = dict(zip(sig2.parameters, types2)) for kw in kwds1: match = type_cmp(kwtypes1[kw], kwtypes2[kw]) if match: dup_kw[kw] = match else: break else: return tuple(dup_pos), dup_kw return False
<SYSTEM_TASK:> Determines if a function is a void function, i.e., one whose body contains <END_TASK> <USER_TASK:> Description: def is_void(func): """ Determines if a function is a void function, i.e., one whose body contains nothing but a docstring or an ellipsis. A void function can be used to introduce an overloaded function without actually registering an implementation. """
try: source = dedent(inspect.getsource(func)) except (OSError, IOError): return False fdef = next(ast.iter_child_nodes(ast.parse(source))) return ( type(fdef) is ast.FunctionDef and len(fdef.body) == 1 and type(fdef.body[0]) is ast.Expr and type(fdef.body[0].value) in {ast.Str, ast.Ellipsis})
<SYSTEM_TASK:> Collect the nearest type variables and effective parameters from the type, <END_TASK> <USER_TASK:> Description: def derive_configuration(cls): """ Collect the nearest type variables and effective parameters from the type, its bases, and their origins as necessary. """
base_params = cls.base.__parameters__ if hasattr(cls.type, '__args__'): # typing as of commit abefbe4 tvars = {p: p for p in base_params} types = {} for t in iter_generic_bases(cls.type): if t is cls.base: type_vars = tuple(tvars[p] for p in base_params) parameters = (types.get(tvar, tvar) for tvar in type_vars) break if t.__args__: for arg, tvar in zip(t.__args__, t.__origin__.__parameters__): if isinstance(arg, typing.TypeVar): tvars[tvar] = tvars.get(arg, arg) else: types[tvar] = arg else: # typing 3.5.0 tvars = [None] * len(base_params) for t in iter_generic_bases(cls.type): for i, p in enumerate(t.__parameters__): if tvars[i] is None and isinstance(p, typing.TypeVar): tvars[i] = p if all(tvars): type_vars = tvars parameters = cls.type.__parameters__ break cls.type_vars = type_vars cls.parameters = tuple(normalize_type(p, 1) for p in parameters)
<SYSTEM_TASK:> Returns a NFA that reads the intersection of the NFAs in <END_TASK> <USER_TASK:> Description: def nfa_intersection(nfa_1: dict, nfa_2: dict) -> dict: """ Returns a NFA that reads the intersection of the NFAs in input. Let :math:`A_1 = (Σ,S_1,S_1^0,ρ_1,F_1)` and :math:`A_2 =(Σ, S_2,S_2^0,ρ_2,F_2)` be two NFAs. There is a NFA :math:`A_∧` that runs simultaneously both :math:`A_1` and :math:`A_2` on the input word, so :math:`L(A_∧) = L(A_1)∩L(A_2)`. It is defined as: :math:`A_∧ = ( Σ , S , S_0 , ρ , F )` where • :math:`S = S_1 × S_2` • :math:`S_0 = S_1^0 × S_2^0` • :math:`F = F_1 × F_2` • :math:`((s,t), a, (s_X , t_X)) ∈ ρ` iff :math:`(s, a,s_X ) ∈ ρ_1` and :math:`(t, a, t_X ) ∈ ρ_2` :param dict nfa_1: first input NFA; :param dict nfa_2: second input NFA; :return: *(dict)* representing the intersected NFA. """
intersection = { 'alphabet': nfa_1['alphabet'].intersection(nfa_2['alphabet']), 'states': set(), 'initial_states': set(), 'accepting_states': set(), 'transitions': dict() } for init_1 in nfa_1['initial_states']: for init_2 in nfa_2['initial_states']: intersection['initial_states'].add((init_1, init_2)) intersection['states'].update(intersection['initial_states']) boundary = set() boundary.update(intersection['initial_states']) while boundary: (state_nfa_1, state_nfa_2) = boundary.pop() if state_nfa_1 in nfa_1['accepting_states'] \ and state_nfa_2 in nfa_2['accepting_states']: intersection['accepting_states'].add((state_nfa_1, state_nfa_2)) for a in intersection['alphabet']: if (state_nfa_1, a) not in nfa_1['transitions'] \ or (state_nfa_2, a) not in nfa_2['transitions']: continue s1 = nfa_1['transitions'][state_nfa_1, a] s2 = nfa_2['transitions'][state_nfa_2, a] for destination_1 in s1: for destination_2 in s2: next_state = (destination_1, destination_2) if next_state not in intersection['states']: intersection['states'].add(next_state) boundary.add(next_state) intersection['transitions'].setdefault( ((state_nfa_1, state_nfa_2), a), set()).add(next_state) if destination_1 in nfa_1['accepting_states'] \ and destination_2 in nfa_2['accepting_states']: intersection['accepting_states'].add(next_state) return intersection
<SYSTEM_TASK:> Returns a NFA that reads the union of the NFAs in input. <END_TASK> <USER_TASK:> Description: def nfa_union(nfa_1: dict, nfa_2: dict) -> dict: """ Returns a NFA that reads the union of the NFAs in input. Let :math:`A_1 = (Σ,S_1,S_1^0,ρ_1,F_1)` and :math:`A_2 =(Σ, S_2,S_2^0,ρ_2,F_2)` be two NFAs. here is a NFA :math:`A_∨` that nondeterministically chooses :math:`A_1` or :math:`A_2` and runs it on the input word. It is defined as: :math:`A_∨ = (Σ, S, S_0 , ρ, F )` where: • :math:`S = S_1 ∪ S_2` • :math:`S_0 = S_1^0 ∪ S_2^0` • :math:`F = F_1 ∪ F_2` • :math:`ρ = ρ_1 ∪ ρ_2` , that is :math:`(s, a, s' ) ∈ ρ` if :math:`[ s ∈ S_1\ and\ (s, a, s' ) ∈ ρ_1 ]` OR :math:`[ s ∈ S_2\ and\ (s, a, s' ) ∈ ρ_2 ]` Pay attention to avoid having the NFAs with state names in common, in case use :mod:`PySimpleAutomata.NFA.rename_nfa_states` function. :param dict nfa_1: first input NFA; :param dict nfa_2: second input NFA. :return: *(dict)* representing the united NFA. """
union = { 'alphabet': nfa_1['alphabet'].union(nfa_2['alphabet']), 'states': nfa_1['states'].union(nfa_2['states']), 'initial_states': nfa_1['initial_states'].union(nfa_2['initial_states']), 'accepting_states': nfa_1['accepting_states'].union(nfa_2['accepting_states']), 'transitions': nfa_1['transitions'].copy()} for trans in nfa_2['transitions']: for elem in nfa_2['transitions'][trans]: union['transitions'].setdefault(trans, set()).add(elem) return union
<SYSTEM_TASK:> Returns a DFA that reads the same language of the input NFA. <END_TASK> <USER_TASK:> Description: def nfa_determinization(nfa: dict) -> dict: """ Returns a DFA that reads the same language of the input NFA. Let A be an NFA, then there exists a DFA :math:`A_d` such that :math:`L(A_d) = L(A)`. Intuitively, :math:`A_d` collapses all possible runs of A on a given input word into one run over a larger state set. :math:`A_d` is defined as: :math:`A_d = (Σ, 2^S , s_0 , ρ_d , F_d )` where: • :math:`2^S` , i.e., the state set of :math:`A_d` , consists of all sets of states S in A; • :math:`s_0 = S^0` , i.e., the single initial state of :math:`A_d` is the set :math:`S_0` of initial states of A; • :math:`F_d = \{Q | Q ∩ F ≠ ∅\}`, i.e., the collection of sets of states that intersect F nontrivially; • :math:`ρ_d(Q, a) = \{s' | (s,a, s' ) ∈ ρ\ for\ some\ s ∈ Q\}`. :param dict nfa: input NFA. :return: *(dict)* representing a DFA """
def state_name(s): return str(set(sorted(s))) dfa = { 'alphabet': nfa['alphabet'].copy(), 'initial_state': None, 'states': set(), 'accepting_states': set(), 'transitions': dict() } if len(nfa['initial_states']) > 0: dfa['initial_state'] = state_name(nfa['initial_states']) dfa['states'].add(state_name(nfa['initial_states'])) sets_states = list() sets_queue = list() sets_queue.append(nfa['initial_states']) sets_states.append(nfa['initial_states']) if len(sets_states[0].intersection(nfa['accepting_states'])) > 0: dfa['accepting_states'].add(state_name(sets_states[0])) while sets_queue: current_set = sets_queue.pop(0) for a in dfa['alphabet']: next_set = set() for state in current_set: if (state, a) in nfa['transitions']: for next_state in nfa['transitions'][state, a]: next_set.add(next_state) if len(next_set) == 0: continue if next_set not in sets_states: sets_states.append(next_set) sets_queue.append(next_set) dfa['states'].add(state_name(next_set)) if next_set.intersection(nfa['accepting_states']): dfa['accepting_states'].add(state_name(next_set)) dfa['transitions'][state_name(current_set), a] = state_name(next_set) return dfa
<SYSTEM_TASK:> Returns a DFA reading the complemented language read by <END_TASK> <USER_TASK:> Description: def nfa_complementation(nfa: dict) -> dict: """ Returns a DFA reading the complemented language read by input NFA. Complement a nondeterministic automaton is possible complementing the determinization of it. The construction is effective, but it involves an exponential blow-up, since determinization involves an unavoidable exponential blow-up (i.e., if NFA has n states, then the DFA has :math:`2^n` states). :param dict nfa: input NFA. :return: *(dict)* representing a completed DFA. """
determinized_nfa = nfa_determinization(nfa) return DFA.dfa_complementation(determinized_nfa)
<SYSTEM_TASK:> Checks if a given word is accepted by a NFA. <END_TASK> <USER_TASK:> Description: def nfa_word_acceptance(nfa: dict, word: list) -> bool: """ Checks if a given word is accepted by a NFA. The word w is accepted by a NFA if exists at least an accepting run on w. :param dict nfa: input NFA; :param list word: list of symbols ∈ nfa['alphabet']; :return: *(bool)*, True if the word is accepted, False otherwise. """
current_level = set() current_level = current_level.union(nfa['initial_states']) next_level = set() for action in word: for state in current_level: if (state, action) in nfa['transitions']: next_level.update(nfa['transitions'][state, action]) if len(next_level) < 1: return False current_level = next_level next_level = set() if current_level.intersection(nfa['accepting_states']): return True else: return False
<SYSTEM_TASK:> In the project's index.html built file, replace the top "source" <END_TASK> <USER_TASK:> Description: def overwrite_view_source(project, dir_path): """In the project's index.html built file, replace the top "source" link with a link to the documentation's home, which is mkdoc's home Args: project (str): project to update dir_path (pathlib.Path): this file's path """
project_html_location = dir_path / project / HTML_LOCATION if not project_html_location.exists(): return files_to_overwrite = [ f for f in project_html_location.iterdir() if "html" in f.suffix ] for html_file in files_to_overwrite: with open(html_file, "r") as f: html = f.readlines() for i, l in enumerate(html): if TO_REPLACE_WITH_HOME in l: html[i] = NEW_HOME_LINK break with open(html_file, "w") as f: f.writelines(html)
<SYSTEM_TASK:> Find the projects listed in the Home Documentation's <END_TASK> <USER_TASK:> Description: def get_listed_projects(): """Find the projects listed in the Home Documentation's index.md file Returns: set(str): projects' names, with the '/' in their beginings """
index_path = Path().resolve() / "docs" / "index.md" with open(index_path, "r") as index_file: lines = index_file.readlines() listed_projects = set() project_section = False for _, l in enumerate(lines): idx = l.find(PROJECT_KEY) if idx >= 0: project_section = True if project_section: # Find first parenthesis after the key start = l.find("](") if start > 0: closing_parenthesis = sorted( [m.start() for m in re.finditer(r"\)", l) if m.start() > start] )[0] project = l[start + 2 : closing_parenthesis] listed_projects.add(project) # If the Projects section is over, stop iteration. # It will stop before seeing ## but wainting for it # Allows the user to use single # in the projects' descriptions if len(listed_projects) > 0 and l.startswith("#"): return listed_projects return listed_projects
<SYSTEM_TASK:> Deletes references to the external google fonts in the Home <END_TASK> <USER_TASK:> Description: def make_offline(): """Deletes references to the external google fonts in the Home Documentation's index.html file """
dir_path = Path(os.getcwd()).absolute() css_path = dir_path / "site" / "assets" / "stylesheets" material_css = css_path / "material-style.css" if not material_css.exists(): file_path = Path(__file__).resolve().parent copyfile(file_path / "material-style.css", material_css) copyfile(file_path / "material-icons.woff2", css_path / "material-icons.woff2") indexes = [] for root, _, filenames in os.walk(dir_path / "site"): for filename in fnmatch.filter(filenames, "index.html"): indexes.append(os.path.join(root, filename)) for index_file in indexes: update_index_to_offline(index_file)
<SYSTEM_TASK:> Utility function to deal with polymorphic filenames argument. <END_TASK> <USER_TASK:> Description: def _filenames_from_arg(filename): """Utility function to deal with polymorphic filenames argument."""
if isinstance(filename, string_types): filenames = [filename] elif isinstance(filename, (list, tuple)): filenames = filename else: raise Exception('filename argument must be string, list or tuple') for fn in filenames: if not os.path.exists(fn): raise ValueError('file not found: %s' % fn) if not os.path.isfile(fn): raise ValueError('not a file: %s' % fn) return filenames
<SYSTEM_TASK:> Utility function to obtain a cache file name and determine whether or <END_TASK> <USER_TASK:> Description: def _get_cache(vcf_fn, array_type, region, cachedir, compress, log): """Utility function to obtain a cache file name and determine whether or not a fresh cache file is available."""
# guard condition if isinstance(vcf_fn, (list, tuple)): raise Exception( 'caching only supported when loading from a single VCF file' ) # create cache file name cache_fn = _mk_cache_fn(vcf_fn, array_type=array_type, region=region, cachedir=cachedir, compress=compress) # decide whether or not a fresh cache file is available # (if not, we will parse the VCF and build array from scratch) if not os.path.exists(cache_fn): log('no cache file found') is_cached = False elif os.path.getmtime(vcf_fn) > os.path.getmtime(cache_fn): is_cached = False log('cache file out of date') else: is_cached = True log('cache file available') return cache_fn, is_cached
<SYSTEM_TASK:> Utility function to determine which fields to extract when loading <END_TASK> <USER_TASK:> Description: def _variants_fields(fields, exclude_fields, info_ids): """Utility function to determine which fields to extract when loading variants."""
if fields is None: # no fields specified by user # by default extract all standard and INFO fields fields = config.STANDARD_VARIANT_FIELDS + info_ids else: # fields have been specified for f in fields: # check for non-standard fields not declared in INFO header if f not in config.STANDARD_VARIANT_FIELDS and f not in info_ids: # support extracting INFO even if not declared in header, # but warn... print('WARNING: no INFO definition found for field %s' % f, file=sys.stderr) # process any exclusions if exclude_fields is not None: fields = [f for f in fields if f not in exclude_fields] return tuple(f for f in fields)
<SYSTEM_TASK:> Utility function to determine fill values for variants fields with <END_TASK> <USER_TASK:> Description: def _variants_fills(fields, fills, info_types): """Utility function to determine fill values for variants fields with missing values."""
if fills is None: # no fills specified by user fills = dict() for f, vcf_type in zip(fields, info_types): if f == 'FILTER': fills[f] = False elif f not in fills: if f in config.STANDARD_VARIANT_FIELDS: fills[f] = config.DEFAULT_VARIANT_FILL[f] else: fills[f] = config.DEFAULT_FILL_MAP[vcf_type] # convert to tuple for zipping with fields fills = tuple(fills[f] for f in fields) return fills
<SYSTEM_TASK:> Utility function to determine transformer functions for variants <END_TASK> <USER_TASK:> Description: def _info_transformers(fields, transformers): """Utility function to determine transformer functions for variants fields."""
if transformers is None: # no transformers specified by user transformers = dict() for f in fields: if f not in transformers: transformers[f] = config.DEFAULT_TRANSFORMER.get(f, None) return tuple(transformers[f] for f in fields)
<SYSTEM_TASK:> Utility function to build a numpy dtype for a variants array, <END_TASK> <USER_TASK:> Description: def _variants_dtype(fields, dtypes, arities, filter_ids, flatten_filter, info_types): """Utility function to build a numpy dtype for a variants array, given user arguments and information available from VCF header."""
dtype = list() for f, n, vcf_type in zip(fields, arities, info_types): if f == 'FILTER' and flatten_filter: # split FILTER into multiple boolean fields for flt in filter_ids: nm = 'FILTER_' + flt dtype.append((nm, 'b1')) elif f == 'FILTER' and not flatten_filter: # represent FILTER as a structured field t = [(flt, 'b1') for flt in filter_ids] dtype.append((f, t)) else: if dtypes is not None and f in dtypes: # user overrides default dtype t = dtypes[f] elif f in config.STANDARD_VARIANT_FIELDS: t = config.DEFAULT_VARIANT_DTYPE[f] elif f in config.DEFAULT_INFO_DTYPE: # known INFO field t = config.DEFAULT_INFO_DTYPE[f] else: t = config.DEFAULT_TYPE_MAP[vcf_type] # deal with arity if n == 1: dtype.append((f, t)) else: dtype.append((f, t, (n,))) return dtype
<SYSTEM_TASK:> Utility function to load an array from an iterator. <END_TASK> <USER_TASK:> Description: def _fromiter(it, dtype, count, progress, log): """Utility function to load an array from an iterator."""
if progress > 0: it = _iter_withprogress(it, progress, log) if count is not None: a = np.fromiter(it, dtype=dtype, count=count) else: a = np.fromiter(it, dtype=dtype) return a
<SYSTEM_TASK:> Utility function to load an array from an iterator, reporting progress <END_TASK> <USER_TASK:> Description: def _iter_withprogress(iterable, progress, log): """Utility function to load an array from an iterator, reporting progress as we go."""
before_all = time.time() before = before_all n = 0 for i, o in enumerate(iterable): yield o n = i+1 if n % progress == 0: after = time.time() log('%s rows in %.2fs; batch in %.2fs (%d rows/s)' % (n, after-before_all, after-before, progress/(after-before))) before = after after_all = time.time() log('%s rows in %.2fs (%d rows/s)' % (n, after_all-before_all, n/(after_all-before_all)))
<SYSTEM_TASK:> Load a numpy 1-dimensional structured array with data from the sample <END_TASK> <USER_TASK:> Description: def calldata(vcf_fn, region=None, samples=None, ploidy=2, fields=None, exclude_fields=None, dtypes=None, arities=None, fills=None, vcf_types=None, count=None, progress=0, logstream=None, condition=None, slice_args=None, verbose=True, cache=False, cachedir=None, skip_cached=False, compress_cache=False, truncate=True): """ Load a numpy 1-dimensional structured array with data from the sample columns of a VCF file. Parameters ---------- vcf_fn: string or list Name of the VCF file or list of file names. region: string Region to extract, e.g., 'chr1' or 'chr1:0-100000'. fields: list or array-like List of fields to extract from the VCF. exclude_fields: list or array-like Fields to exclude from extraction. dtypes: dict or dict-like Dictionary cotaining dtypes to use instead of the default inferred ones arities: dict or dict-like Override the amount of values to expect. fills: dict or dict-like Dictionary containing field:fillvalue mappings used to override the default fill in values in VCF fields. vcf_types: dict or dict-like Dictionary containing field:string mappings used to override any bogus type declarations in the VCF header. count: int Attempt to extract a specific number of records. progress: int If greater than 0, log parsing progress. logstream: file or file-like object Stream to use for logging progress. condition: array Boolean array defining which rows to load. slice_args: tuple or list Slice of the underlying iterator, e.g., (0, 1000, 10) takes every 10th row from the first 1000. verbose: bool Log more messages. cache: bool If True, save the resulting numpy array to disk, and load from the cache if present rather than rebuilding from the VCF. cachedir: string Manually specify the directory to use to store cache files. skip_cached: bool If True and cache file is fresh, do not load and return None. compress_cache: bool, optional If True, compress the cache file. truncate: bool, optional If True (default) only include variants whose start position is within the given region. If False, use default tabix behaviour. Examples -------- >>> from vcfnp import calldata, view2d >>> c = calldata('fixture/sample.vcf') >>> c array([ ((True, True, [0, 0], 0, 0, b'0|0', [10, 10]), (True, True, [0, 0], 0, 0, b'0|0', [10, 10]), (True, False, [0, 1], 0, 0, b'0/1', [3, 3])), ((True, True, [0, 0], 0, 0, b'0|0', [10, 10]), (True, True, [0, 0], 0, 0, b'0|0', [10, 10]), (True, False, [0, 1], 0, 0, b'0/1', [3, 3])), ((True, True, [0, 0], 1, 48, b'0|0', [51, 51]), (True, True, [1, 0], 8, 48, b'1|0', [51, 51]), (True, False, [1, 1], 5, 43, b'1/1', [0, 0])), ((True, True, [0, 0], 3, 49, b'0|0', [58, 50]), (True, True, [0, 1], 5, 3, b'0|1', [65, 3]), (True, False, [0, 0], 3, 41, b'0/0', [0, 0])), ((True, True, [1, 2], 6, 21, b'1|2', [23, 27]), (True, True, [2, 1], 0, 2, b'2|1', [18, 2]), (True, False, [2, 2], 4, 35, b'2/2', [0, 0])), ((True, True, [0, 0], 0, 54, b'0|0', [56, 60]), (True, True, [0, 0], 4, 48, b'0|0', [51, 51]), (True, False, [0, 0], 2, 61, b'0/0', [0, 0])), ((True, False, [0, 1], 4, 0, b'0/1', [0, 0]), (True, False, [0, 2], 2, 17, b'0/2', [0, 0]), (False, False, [-1, -1], 3, 40, b'./.', [0, 0])), ((True, False, [0, 0], 0, 0, b'0/0', [0, 0]), (True, True, [0, 0], 0, 0, b'0|0', [0, 0]), (False, False, [-1, -1], 0, 0, b'./.', [0, 0])), ((True, False, [0, -1], 0, 0, b'0', [0, 0]), (True, False, [0, 1], 0, 0, b'0/1', [0, 0]), (True, True, [0, 2], 0, 0, b'0|2', [0, 0]))], dtype=[('NA00001', [('is_called', '?'), ('is_phased', '?'), ('genotype', 'i1', (2,)), ('DP', '<u2'), ('GQ', 'u1'), ('GT', 'S3'), ('HQ', '<i4', (2,))]), ('NA00002', [('is_called', '?'), ('is_phased', '?'), ('genotype', 'i1', (2,)), ('DP', '<u2'), ('GQ', 'u1'), ('GT', 'S3'), ('HQ', '<i4', (2,))]), ('NA00003', [('is_called', '?'), ('is_phased', '?'), ('genotype', 'i1', (2,)), ('DP', '<u2'), ('GQ', 'u1'), ('GT', 'S3'), ('HQ', '<i4', (2,))])]) >>> c['NA00001'] array([(True, True, [0, 0], 0, 0, b'0|0', [10, 10]), (True, True, [0, 0], 0, 0, b'0|0', [10, 10]), (True, True, [0, 0], 1, 48, b'0|0', [51, 51]), (True, True, [0, 0], 3, 49, b'0|0', [58, 50]), (True, True, [1, 2], 6, 21, b'1|2', [23, 27]), (True, True, [0, 0], 0, 54, b'0|0', [56, 60]), (True, False, [0, 1], 4, 0, b'0/1', [0, 0]), (True, False, [0, 0], 0, 0, b'0/0', [0, 0]), (True, False, [0, -1], 0, 0, b'0', [0, 0])], dtype=[('is_called', '?'), ('is_phased', '?'), ('genotype', 'i1', (2,)), ('DP', '<u2'), ('GQ', 'u1'), ('GT', 'S3'), ('HQ', '<i4', (2,))]) >>> c2d = view2d(c) >>> c2d array([[(True, True, [0, 0], 0, 0, b'0|0', [10, 10]), (True, True, [0, 0], 0, 0, b'0|0', [10, 10]), (True, False, [0, 1], 0, 0, b'0/1', [3, 3])], [(True, True, [0, 0], 0, 0, b'0|0', [10, 10]), (True, True, [0, 0], 0, 0, b'0|0', [10, 10]), (True, False, [0, 1], 0, 0, b'0/1', [3, 3])], [(True, True, [0, 0], 1, 48, b'0|0', [51, 51]), (True, True, [1, 0], 8, 48, b'1|0', [51, 51]), (True, False, [1, 1], 5, 43, b'1/1', [0, 0])], [(True, True, [0, 0], 3, 49, b'0|0', [58, 50]), (True, True, [0, 1], 5, 3, b'0|1', [65, 3]), (True, False, [0, 0], 3, 41, b'0/0', [0, 0])], [(True, True, [1, 2], 6, 21, b'1|2', [23, 27]), (True, True, [2, 1], 0, 2, b'2|1', [18, 2]), (True, False, [2, 2], 4, 35, b'2/2', [0, 0])], [(True, True, [0, 0], 0, 54, b'0|0', [56, 60]), (True, True, [0, 0], 4, 48, b'0|0', [51, 51]), (True, False, [0, 0], 2, 61, b'0/0', [0, 0])], [(True, False, [0, 1], 4, 0, b'0/1', [0, 0]), (True, False, [0, 2], 2, 17, b'0/2', [0, 0]), (False, False, [-1, -1], 3, 40, b'./.', [0, 0])], [(True, False, [0, 0], 0, 0, b'0/0', [0, 0]), (True, True, [0, 0], 0, 0, b'0|0', [0, 0]), (False, False, [-1, -1], 0, 0, b'./.', [0, 0])], [(True, False, [0, -1], 0, 0, b'0', [0, 0]), (True, False, [0, 1], 0, 0, b'0/1', [0, 0]), (True, True, [0, 2], 0, 0, b'0|2', [0, 0])]], dtype=[('is_called', '?'), ('is_phased', '?'), ('genotype', 'i1', (2,)), ('DP', '<u2'), ('GQ', 'u1'), ('GT', 'S3'), ('HQ', '<i4', (2,))]) >>> c2d['genotype'] array([[[ 0, 0], [ 0, 0], [ 0, 1]], [[ 0, 0], [ 0, 0], [ 0, 1]], [[ 0, 0], [ 1, 0], [ 1, 1]], [[ 0, 0], [ 0, 1], [ 0, 0]], [[ 1, 2], [ 2, 1], [ 2, 2]], [[ 0, 0], [ 0, 0], [ 0, 0]], [[ 0, 1], [ 0, 2], [-1, -1]], [[ 0, 0], [ 0, 0], [-1, -1]], [[ 0, -1], [ 0, 1], [ 0, 2]]], dtype=int8) >>> c2d['genotype'][3, :] array([[0, 0], [0, 1], [0, 0]], dtype=int8) """
# flake8: noqa loader = _CalldataLoader(vcf_fn, region=region, samples=samples, ploidy=ploidy, fields=fields, exclude_fields=exclude_fields, dtypes=dtypes, arities=arities, fills=fills, vcf_types=vcf_types, count=count, progress=progress, logstream=logstream, condition=condition, slice_args=slice_args, verbose=verbose, cache=cache, cachedir=cachedir, skip_cached=skip_cached, compress_cache=compress_cache, truncate=truncate) arr = loader.load() return arr
<SYSTEM_TASK:> get datetime now according to USE_TZ and default time <END_TASK> <USER_TASK:> Description: def get_datetimenow(self): """ get datetime now according to USE_TZ and default time """
value = timezone.datetime.utcnow() if settings.USE_TZ: value = timezone.localtime( timezone.make_aware(value, timezone.utc), timezone.get_default_timezone() ) return value
<SYSTEM_TASK:> Exports a DFA to a JSON file. <END_TASK> <USER_TASK:> Description: def dfa_to_json(dfa: dict, name: str, path: str = './'): """ Exports a DFA to a JSON file. If *path* do not exists, it will be created. :param dict dfa: DFA to export; :param str name: name of the output file; :param str path: path where to save the JSON file (default: working directory) """
out = { 'alphabet': list(dfa['alphabet']), 'states': list(dfa['states']), 'initial_state': dfa['initial_state'], 'accepting_states': list(dfa['accepting_states']), 'transitions': list() } for t in dfa['transitions']: out['transitions'].append( [t[0], t[1], dfa['transitions'][t]]) if not os.path.exists(path): os.makedirs(path) file = open(os.path.join(path, name + '.json'), 'w') json.dump(out, file, sort_keys=True, indent=4) file.close()
<SYSTEM_TASK:> Imports a DFA from a DOT file. <END_TASK> <USER_TASK:> Description: def dfa_dot_importer(input_file: str) -> dict: """ Imports a DFA from a DOT file. Of DOT files are recognized the following attributes: • nodeX shape=doublecircle -> accepting node; • nodeX root=true -> initial node; • edgeX label="a" -> action in alphabet; • fake [style=invisible] -> dummy invisible node pointing to initial state (they will be skipped); • fake-> S [style=bold] -> dummy transition to draw the arrow pointing to initial state (it will be skipped). Forbidden names: • 'fake' used for graphical purpose to drawn the arrow of the initial state; • 'sink' used as additional state when completing a DFA; • 'None' used when no initial state is present. Forbidden characters: • " • ' • ( • ) • spaces :param str input_file: path to the DOT file; :return: *(dict)* representing a DFA. """
# pyDot Object g = pydot.graph_from_dot_file(input_file)[0] states = set() initial_state = None accepting_states = set() replacements = {'"': '', "'": '', '(': '', ')': '', ' ': ''} for node in g.get_nodes(): if node.get_name() == 'fake' \ or node.get_name() == 'None' \ or node.get_name() == 'graph' \ or node.get_name() == 'node': continue if 'style' in node.get_attributes() \ and node.get_attributes()['style'] == 'invisible': continue node_reference = __replace_all(replacements, node.get_name()).split(',') if len(node_reference) > 1: node_reference = tuple(node_reference) else: node_reference = node_reference[0] states.add(node_reference) for attribute in node.get_attributes(): if attribute == 'root': initial_state = node_reference if attribute == 'shape' and node.get_attributes()[ 'shape'] == 'doublecircle': accepting_states.add(node_reference) alphabet = set() transitions = {} for edge in g.get_edges(): if edge.get_source() == 'fake': continue label = __replace_all(replacements, edge.get_label()) alphabet.add(label) source = __replace_all(replacements, edge.get_source()).split(',') if len(source) > 1: source = tuple(source) else: source = source[0] destination = __replace_all(replacements, edge.get_destination()).split(',') if len(destination) > 1: destination = tuple(destination) else: destination = destination[0] transitions[source, label] = destination dfa = { 'alphabet': alphabet, 'states': states, 'initial_state': initial_state, 'accepting_states': accepting_states, 'transitions': transitions} return dfa
<SYSTEM_TASK:> Exports a NFA to a JSON file. <END_TASK> <USER_TASK:> Description: def nfa_to_json(nfa: dict, name: str, path: str = './'): """ Exports a NFA to a JSON file. :param dict nfa: NFA to export; :param str name: name of the output file; :param str path: path where to save the JSON file (default: working directory). """
transitions = list() # key[state in states, action in alphabet] # value [Set of arriving states in states] for p in nfa['transitions']: for dest in nfa['transitions'][p]: transitions.append([p[0], p[1], dest]) out = { 'alphabet': list(nfa['alphabet']), 'states': list(nfa['states']), 'initial_states': list(nfa['initial_states']), 'accepting_states': list(nfa['accepting_states']), 'transitions': transitions } if not os.path.exists(path): os.makedirs(path) file = open(os.path.join(path, name + '.json'), 'w') json.dump(out, file, sort_keys=True, indent=4) file.close()
<SYSTEM_TASK:> Imports a NFA from a DOT file. <END_TASK> <USER_TASK:> Description: def nfa_dot_importer(input_file: str) -> dict: """ Imports a NFA from a DOT file. Of .dot files are recognized the following attributes • nodeX shape=doublecircle -> accepting node; • nodeX root=true -> initial node; • edgeX label="a" -> action in alphabet; • fakeX style=invisible -> dummy invisible nodes pointing to initial state (it will be skipped); • fakeX->S [style=bold] -> dummy transitions to draw arrows pointing to initial states (they will be skipped). All invisible nodes are skipped. Forbidden names: • 'fake' used for graphical purpose to drawn the arrow of the initial state • 'sink' used as additional state when completing a NFA Forbidden characters: • " • ' • ( • ) • spaces :param str input_file: Path to input DOT file; :return: *(dict)* representing a NFA. """
# pyDot Object g = pydot.graph_from_dot_file(input_file)[0] states = set() initial_states = set() accepting_states = set() replacements = {'"': '', "'": '', '(': '', ')': '', ' ': ''} for node in g.get_nodes(): attributes = node.get_attributes() if node.get_name() == 'fake' \ or node.get_name() == 'None' \ or node.get_name() == 'graph' \ or node.get_name() == 'node': continue if 'style' in attributes \ and attributes['style'] == 'invisible': continue node_reference = __replace_all(replacements, node.get_name()).split(',') if len(node_reference) > 1: node_reference = tuple(node_reference) else: node_reference = node_reference[0] states.add(node_reference) for attribute in attributes: if attribute == 'root': initial_states.add(node_reference) if attribute == 'shape' \ and attributes['shape'] == 'doublecircle': accepting_states.add(node_reference) alphabet = set() transitions = {} for edge in g.get_edges(): source = __replace_all(replacements, edge.get_source()).split(',') if len(source) > 1: source = tuple(source) else: source = source[0] destination = __replace_all(replacements, edge.get_destination()).split(',') if len(destination) > 1: destination = tuple(destination) else: destination = destination[0] if source not in states or destination not in states: continue label = __replace_all(replacements, edge.get_label()) alphabet.add(label) transitions.setdefault((source, label), set()).add( destination) nfa = { 'alphabet': alphabet, 'states': states, 'initial_states': initial_states, 'accepting_states': accepting_states, 'transitions': transitions } return nfa
<SYSTEM_TASK:> Recursive call for word acceptance. <END_TASK> <USER_TASK:> Description: def __recursive_acceptance(afw, state, remaining_word): """ Recursive call for word acceptance. :param dict afw: input AFW; :param str state: current state; :param list remaining_word: list containing the remaining words. :return: *(bool)*, True if the word is accepted, false otherwise. """
# the word is accepted only if all the final states are # accepting states if len(remaining_word) == 0: if state in afw['accepting_states']: return True else: return False action = remaining_word[0] if (state, action) not in afw['transitions']: return False if afw['transitions'][state, action] == 'True': return True elif afw['transitions'][state, action] == 'False': return False transition = (state, action) # extract from the boolean formula of the transition the # states involved in it involved_states = list( set( re.findall(r"[\w']+", afw['transitions'][transition]) ).difference({'and', 'or', 'True', 'False'}) ) possible_assignments = set( itertools.product([True, False], repeat=len(involved_states))) # For all possible assignment of the the transition (a # boolean formula over the states) for assignment in possible_assignments: mapping = dict(zip(involved_states, assignment)) # If the assignment evaluation is positive if eval(afw['transitions'][transition], mapping): ok = True mapping.pop('__builtins__') # removes useless entry # added by the function eval() # Check if the word is accepted in ALL the states # mapped to True by the assignment for mapped_state in mapping: if mapping[mapped_state] == False: continue if not __recursive_acceptance(afw, mapped_state, remaining_word[1:]): # if one positive state of the assignment # doesn't accepts the word,the whole # assignment is discarded ok = False break if ok: # If at least one assignment accepts the word, # the word is accepted by the afw return True return False
<SYSTEM_TASK:> Side effect on input! Complete the afw adding not <END_TASK> <USER_TASK:> Description: def afw_completion(afw): """ Side effect on input! Complete the afw adding not present transitions and marking them as False. :param dict afw: input AFW. """
for state in afw['states']: for a in afw['alphabet']: if (state, a) not in afw['transitions']: afw['transitions'][state, a] = 'False' return afw
<SYSTEM_TASK:> Returns a AFW reading the same language of input NFA. <END_TASK> <USER_TASK:> Description: def nfa_to_afw_conversion(nfa: dict) -> dict: """ Returns a AFW reading the same language of input NFA. Let :math:`A = (Σ,S,S^0, ρ,F)` be an nfa. Then we define the afw AA such that :math:`L(AA) = L(A)` as follows :math:`AA = (Σ, S ∪ {s_0}, s_0 , ρ_A , F )` where :math:`s_0` is a new state and :math:`ρ_A` is defined as follows: • :math:`ρ_A(s, a)= ⋁_{(s,a,s')∈ρ}s'`, for all :math:`a ∈ Σ` and :math:`s ∈ S` • :math:`ρ_A(s^0, a)= ⋁_{s∈S^0,(s,a,s')∈ρ}s'`, for all :math:`a ∈ Σ` We take an empty disjunction in the definition of AA to be equivalent to false. Essentially, the transitions of A are viewed as disjunctions in AA . A special treatment is needed for the initial state, since we allow a set of initial states in nondeterministic automata, but only a single initial state in alternating automata. :param dict nfa: input NFA. :return: *(dict)* representing a AFW. """
afw = { 'alphabet': nfa['alphabet'].copy(), 'states': nfa['states'].copy(), 'initial_state': 'root', 'accepting_states': nfa['accepting_states'].copy(), 'transitions': dict() } # Make sure "root" node doesn't already exists, in case rename it i = 0 while afw['initial_state'] in nfa['states']: afw['initial_state'] = 'root' + str(i) i += 1 afw['states'].add(afw['initial_state']) for (state, action) in nfa['transitions']: boolean_formula = str() for destination in nfa['transitions'][state, action]: boolean_formula += destination + ' or ' # strip last ' or ' from the formula string boolean_formula = boolean_formula[0:-4] afw['transitions'][state, action] = boolean_formula if state in nfa['initial_states']: afw['transitions'][afw['initial_state'], action] = boolean_formula return afw
<SYSTEM_TASK:> Returns a NFA reading the same language of input AFW. <END_TASK> <USER_TASK:> Description: def afw_to_nfa_conversion(afw: dict) -> dict: """ Returns a NFA reading the same language of input AFW. Let :math:`A = (Σ, S, s^0 , ρ, F )` be an afw. Then we define the nfa :math:`A_N` such that :math:`L(A_N) = L(A)` as follows :math:`AN = (Σ, S_N , S^0_N , ρ_N , F_N )` where: • :math:`S_N = 2^S` • :math:`S^0_N= \{\{s^0 \}\}` • :math:`F_N=2^F` • :math:`(Q,a,Q') ∈ ρ_N` iff :math:`Q'` satisfies :math:`⋀_{ s∈Q} ρ(s, a)` We take an empty conjunction in the definition of :math:`ρ_N` to be equivalent to true; thus, :math:`(∅, a, ∅) ∈ ρ_N`. :param dict afw: input AFW. :return: *(dict)* representing a NFA. """
nfa = { 'alphabet': afw['alphabet'].copy(), 'initial_states': {(afw['initial_state'],)}, 'states': {(afw['initial_state'],)}, 'accepting_states': set(), 'transitions': dict() } # State of the NFA are composed by the union of more states of the AFW boundary = deepcopy(nfa['states']) possible_assignments = set( itertools.product([True, False], repeat=len(afw['states']))) while boundary: state = boundary.pop() # The state is accepting only if composed exclusively of final states if set(state).issubset(afw['accepting_states']): nfa['accepting_states'].add(state) for action in nfa['alphabet']: boolean_formula = 'True' # join the boolean formulas of the single states given the action for s in state: if (s, action) not in afw['transitions']: boolean_formula += ' and False' else: boolean_formula += \ ' and (' + \ afw['transitions'][s, action] + \ ')' for assignment in possible_assignments: mapping = dict(zip(afw['states'], assignment)) # If the formula is satisfied if eval(boolean_formula, mapping): # add the transition to the resulting NFA evaluation = \ tuple(k for k in mapping if mapping[k] is True) if evaluation not in nfa['states']: nfa['states'].add(evaluation) boundary.add(evaluation) nfa['transitions'].setdefault( (state, action), set()).add(evaluation) return nfa
<SYSTEM_TASK:> Returns the dual of the input formula. <END_TASK> <USER_TASK:> Description: def formula_dual(input_formula: str) -> str: """ Returns the dual of the input formula. The dual operation on formulas in :math:`B^+(X)` is defined as: the dual :math:`\overline{θ}` of a formula :math:`θ` is obtained from θ by switching :math:`∧` and :math:`∨`, and by switching :math:`true` and :math:`false`. :param str input_formula: original string. :return: *(str)*, dual of input formula. """
conversion_dictionary = { 'and': 'or', 'or': 'and', 'True': 'False', 'False': 'True' } return re.sub( '|'.join(re.escape(key) for key in conversion_dictionary.keys()), lambda k: conversion_dictionary[k.group(0)], input_formula)
<SYSTEM_TASK:> Returns a AFW reading the complemented language read by <END_TASK> <USER_TASK:> Description: def afw_complementation(afw: dict) -> dict: """ Returns a AFW reading the complemented language read by input AFW. Let :math:`A = (Σ, S, s^0 , ρ, F )`. Define :math:`Ā = (Σ, S, s^0 , \overline{ρ}, S − F )`, where :math:`\overline{ρ}(s, a) = \overline{ρ(s, a)}` for all :math:`s ∈ S` and :math:`a ∈ Σ`. That is, :math:`\overline{ρ}` is the dualized transition function. It can be shown that :math:`L( Ā) = Σ^∗ − L(A)`. The input afw need to be completed i.e. each non existing transition must be added pointing to False. :param dict afw: input AFW. :return: *(dict)* representing a AFW. """
completed_input = afw_completion(deepcopy(afw)) complemented_afw = { 'alphabet': completed_input['alphabet'], 'states': completed_input['states'], 'initial_state': completed_input['initial_state'], 'accepting_states': completed_input['states'].difference(afw['accepting_states']), 'transitions': dict() } for transition in completed_input['transitions']: complemented_afw['transitions'][transition] = \ formula_dual(completed_input['transitions'][transition]) return complemented_afw
<SYSTEM_TASK:> Returns a AFW that reads the union of the languages read <END_TASK> <USER_TASK:> Description: def afw_union(afw_1: dict, afw_2: dict) -> dict: """ Returns a AFW that reads the union of the languages read by input AFWs. Let :math:`A_1 = (Σ, S_1 , s^0_1, ρ_1 , F_1 )` and :math:`A_2 = (Σ, S_2 , s^0_2, ρ_2 , F_2 )` be alternating automata accepting the languages :math:`L( A_1)` and :math:`L(A_2)`. Then, :math:`B_∪ = (Σ, S_1 ∪ S_2 ∪ {root}, ρ_∪ , root , F_1 ∪ F_2 )` with :math:`ρ_∪ = ρ_1 ∪ ρ_2 ∪ [(root, a): ρ(s^0_1 , a) ∨ ρ(s^0_2 , a)]` accepts :math:`L(A_1) ∪ L(A_2)`. Pay attention to avoid having the AFWs with state names in common, in case use :mod:`PySimpleAutomata.AFW.rename_afw_states` function. :param dict afw_1: first input AFW; :param dict afw_2: second input AFW;. :return: *(dict)* representing the united AFW. """
# make sure new root state is unique initial_state = 'root' i = 0 while initial_state in afw_1['states'] or initial_state in afw_2['states']: initial_state = 'root' + str(i) i += 1 union = { 'alphabet': afw_1['alphabet'].union(afw_2['alphabet']), 'states': afw_1['states'].union(afw_2['states']).union({initial_state}), 'initial_state': initial_state, 'accepting_states': afw_1['accepting_states'].union(afw_2['accepting_states']), 'transitions': deepcopy(afw_1['transitions']) } # add also afw_2 transitions union['transitions'].update(afw_2['transitions']) # if just one initial state is accepting, so the new one is if afw_1['initial_state'] in afw_1['accepting_states'] \ or afw_2['initial_state'] in afw_2['accepting_states']: union['accepting_states'].add(union['initial_state']) # copy all transitions of initial states and eventually their conjunction # into the new initial state for action in union['alphabet']: if (afw_1['initial_state'], action) in afw_1['transitions']: union['transitions'][initial_state, action] = \ '(' + \ afw_1['transitions'][afw_1['initial_state'], action] + \ ')' if (afw_2['initial_state'], action) in afw_2['transitions']: union['transitions'][initial_state, action] += \ ' or (' + \ afw_2['transitions'][afw_2['initial_state'], action] + \ ')' elif (afw_2['initial_state'], action) in afw_2['transitions']: union['transitions'][initial_state, action] = \ '(' + \ afw_2['transitions'][afw_2['initial_state'], action] + \ ')' return union
<SYSTEM_TASK:> Returns a AFW that reads the intersection of the <END_TASK> <USER_TASK:> Description: def afw_intersection(afw_1: dict, afw_2: dict) -> dict: """ Returns a AFW that reads the intersection of the languages read by input AFWs. Let :math:`A_1 = (Σ, S_1 , s^0_1, ρ_1 , F_1 )` and :math:`A_2 = (Σ, S_2 , s^0_2, ρ_2 , F_2 )` be alternating automata accepting the languages :math:`L( A_1)` and :math:`L(A_2)`. Then, :math:`B_∩ = (Σ, S_1 ∪ S_2 ∪ {root}, root, ρ_∩ , F_1 ∪ F_2 )` with :math:`ρ_∩ = ρ_1 ∪ ρ_2 ∪ [(root, a): ρ(s^0_1 , a) ∧ ρ(s^0_2 , a)]` accepts :math:`L(A_1) ∩ L(A_2)`. :param dict afw_1: first input AFW; :param dict afw_2: second input AFW. :return: *(dict)* representing a AFW. """
# make sure new root state is unique initial_state = 'root' i = 0 while initial_state in afw_1['states'] or initial_state in afw_2['states']: initial_state = 'root' + str(i) i += 1 intersection = { 'alphabet': afw_1['alphabet'].union(afw_2['alphabet']), 'states': afw_1['states'].union(afw_2['states']).union({initial_state}), 'initial_state': initial_state, 'accepting_states': afw_1['accepting_states'].union(afw_2['accepting_states']), 'transitions': deepcopy(afw_1['transitions']) } # add also afw_2 transitions intersection['transitions'].update(afw_2['transitions']) # if both initial states are accepting, so the new one is if afw_1['initial_state'] in afw_1['accepting_states'] \ and afw_2['initial_state'] in afw_2['accepting_states']: intersection['accepting_states'].add( intersection['initial_state']) # New initial state transitions will be the conjunction of # precedent inital states ones for action in intersection['alphabet']: if (afw_1['initial_state'], action) in afw_1['transitions']: intersection['transitions'][initial_state, action] = \ '(' + \ afw_1['transitions'][afw_1['initial_state'], action] + \ ')' if (afw_2['initial_state'], action) in afw_2['transitions']: intersection['transitions'][initial_state, action] += \ ' and (' + \ afw_2['transitions'][afw_2['initial_state'], action] + \ ')' else: intersection['transitions'][ initial_state, action] += ' and False' elif (afw_2['initial_state'], action) in afw_2['transitions']: intersection['transitions'][initial_state, action] = \ 'False and (' + \ afw_2['transitions'][afw_2['initial_state'], action] + \ ')' return intersection
<SYSTEM_TASK:> Helper function to map values from their native Python types <END_TASK> <USER_TASK:> Description: def translate_to_dbus_type(typeof, value): """ Helper function to map values from their native Python types to Dbus types. :param type typeof: Target for type conversion e.g., 'dbus.Dictionary' :param value: Value to assign using type 'typeof' :return: 'value' converted to type 'typeof' :rtype: typeof """
if ((isinstance(value, types.UnicodeType) or isinstance(value, str)) and typeof is not dbus.String): # FIXME: This is potentially dangerous since it evaluates # a string in-situ return typeof(eval(value)) else: return typeof(value)
<SYSTEM_TASK:> Method to call in order to invoke the user callback. <END_TASK> <USER_TASK:> Description: def signal_handler(self, *args): """ Method to call in order to invoke the user callback. :param args: list of signal-dependent arguments :return: """
self.user_callback(self.signal, self.user_arg, *args)
<SYSTEM_TASK:> Helper to get a property value by name or all <END_TASK> <USER_TASK:> Description: def get_property(self, name=None): """ Helper to get a property value by name or all properties as a dictionary. See also :py:meth:`set_property` :param str name: defaults to None which means all properties in the object's dictionary are returned as a dict. Otherwise, the property name key is used and its value is returned. :return: Property value by property key, or a dictionary of all properties :raises KeyError: if the property key is not found in the object's dictionary :raises dbus.Exception: org.bluez.Error.DoesNotExist :raises dbus.Exception: org.bluez.Error.InvalidArguments """
if (name): return self._interface.GetProperties()[name] else: return self._interface.GetProperties()
<SYSTEM_TASK:> Helper to set a property value by name, translating to correct <END_TASK> <USER_TASK:> Description: def set_property(self, name, value): """ Helper to set a property value by name, translating to correct dbus type See also :py:meth:`get_property` :param str name: The property name in the object's dictionary whose value shall be set. :param value: Properties new value to be assigned. :return: :raises KeyError: if the property key is not found in the object's dictionary :raises dbus.Exception: org.bluez.Error.DoesNotExist :raises dbus.Exception: org.bluez.Error.InvalidArguments """
typeof = type(self.get_property(name)) self._interface.SetProperty(name, translate_to_dbus_type(typeof, value))
<SYSTEM_TASK:> Get document for model_instance. <END_TASK> <USER_TASK:> Description: def dump_document(cls, instance, fields_own=None, fields_to_many=None): """ Get document for model_instance. redefine dump rule for field x: def dump_document_x :param django.db.models.Model instance: model instance :param list<Field> or None fields: model_instance field to dump :return dict: document Related documents are not included to current one. In case of to-many field serialization ensure that models_instance has been select_related so, no database calls would be executed. Method ensures that document has cls.Meta.fieldnames_include and does not have cls.Meta.fieldnames_exclude Steps: 1) fieldnames_include could be properties, but not related models. Add them to fields_own. """
if fields_own is not None: fields_own = {f.name for f in fields_own} else: fields_own = { f.name for f in instance._meta.fields if f.rel is None and f.serialize } fields_own.add('id') fields_own = (fields_own | set(cls.Meta.fieldnames_include))\ - set(cls.Meta.fieldnames_exclude) document = {} # Include own fields for fieldname in fields_own: field_serializer = getattr( cls, "dump_document_{}".format(fieldname), None) if field_serializer is not None: value = field_serializer(instance) else: value = getattr(instance, fieldname) try: field = instance._meta.get_field(fieldname) except models.fields.FieldDoesNotExist: # Field is property, value already calculated pass else: if isinstance(field, models.fields.files.FileField): # TODO: Serializer depends on API here. value = cls.Meta.api.base_url + value.url elif isinstance(field, models.CommaSeparatedIntegerField): value = [v for v in value] document[fieldname] = value # Include to-one fields. It does not require database calls for field in instance._meta.fields: fieldname = "{}_id".format(field.name) # NOTE: check field is not related to parent model to exclude # <class>_ptr fields. OneToOne relationship field.rel.multiple = # False. Here make sure relationship is to parent model. if field.rel and not field.rel.multiple \ and isinstance(instance, field.rel.to): continue if field.rel and fieldname not in cls.Meta.fieldnames_exclude: document["links"] = document.get("links") or {} document["links"][field.name] = getattr(instance, fieldname) # Include to-many fields. It requires database calls. At this point we # assume that model was prefetch_related with child objects, which would # be included into 'linked' attribute. Here we need to add ids of linked # objects. To avoid database calls, iterate over objects manually and # get ids. fields_to_many = fields_to_many or [] for field in fields_to_many: document["links"] = document.get("links") or {} document["links"][field.related_resource_name] = [ obj.id for obj in getattr(instance, field.name).all()] return document