docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Encrypts a string using a given rsa.PublicKey object. If the message is larger than the key, it will split it up into a list and encrypt each line in the list. Args: message (string): The string to encrypt. public_key (rsa.PublicKey): The key object used to encrypt the message. Only the paired private key can decrypt it. Returns: A json string of the list of encrypted lines of the message.
def encrypt(self, message, public_key): # Get the maximum message length based on the key max_str_len = rsa.common.byte_size(public_key.n) - 11 # If the message is longer than the key size, split it into a list to # be encrypted if len(message) > max_str_len: message = textwrap.wrap(message, width=max_str_len) else: message = [message] # Create a list for the encrypted message to send enc_msg = [] # If we have a long message, loop through and encrypt each part of the # string for line in message: # Encrypt the line in the message into a bytestring enc_line = rsa.encrypt(line, public_key) # Convert the encrypted bytestring into ASCII, so we can send it # over the network enc_line_converted = binascii.b2a_base64(enc_line) enc_msg.append(enc_line_converted) # Serialize the encrypted message again with json enc_msg = json.dumps(enc_msg) # Return the list of encrypted strings return enc_msg
879,697
Decrypts a string using our own private key object. Args: message (string): The string of the message to decrypt. Returns: The unencrypted string.
def decrypt(self, message): # Unserialize the encrypted message message = json.loads(message) # Set up a list for the unencrypted lines of the message unencrypted_msg = [] for line in message: # Convert from ascii back to bytestring enc_line = binascii.a2b_base64(line) # Decrypt the line using our private key unencrypted_line = rsa.decrypt(enc_line, self.private_key) unencrypted_msg.append(unencrypted_line) # Convert the message from a list back into a string unencrypted_msg = "".join(unencrypted_msg) return unencrypted_msg
879,698
Create dummies for the elements of a set-valued column. Operates in place. Args: df: data frame columns: either a dictionary of column: values pairs or a collection of columns. cast: whether or not to cast values to set drop: whether or not to drop the binarized columns TODO: make interface same as binarize(). merge the two?
def binarize_sets(df, columns, cast=False, drop=True, min_freq=None): for column in columns: d = df[column].dropna() # avoid nulls if cast: d = d.apply(set) values = columns[column] if isinstance(columns, dict) else util.union(d) for value in values: name = values[value] if type(values) is dict else str(value) column_name = column + '_' + name.replace(' ', '_') series = d.apply(lambda c: value in c) series.fillna(0, inplace=True) if not min_freq or series.sum() >= min_freq: df[column_name] = series if drop: # list(columns) will return keys if columns was dict df.drop(list(columns), axis=1, inplace=True) return df
879,770
Performs mean imputation on a pandas dataframe. Args: train: an optional training mask with which to compute the mean value: instead of computing the mean, use this as the value argument to fillna dropna: whether to drop all null columns inplace: whether to perform the imputation inplace Returns: the imputed DataFrame
def impute(X, value=None, train=None, dropna=True, inplace=True): if value is None: Xfit = X[train] if train is not None else X value = Xfit.mean() else: if train is not None: raise ValueError("Cannot pass both train and value arguments") if dropna: null_columns = value.index[value.isnull()] if len(null_columns) > 0: logging.info('Dropping null columns: \n\t%s' % null_columns) if inplace: X.drop(null_columns, axis=1, inplace=True) else: X = X.drop(null_columns, axis=1, inplace=False) if inplace: X.fillna(value.dropna(), inplace=True) else: X = X.fillna(value.dropna(), inplace=False) return X
879,776
Copy source -> destination Args: source (str | None): Source file or folder destination (str | None): Destination file or folder ignore (callable | list | str | None): Names to be ignored adapter (callable | None): Optional function to call on 'source' before copy fatal (bool | None): Abort execution on failure if True logger (callable | None): Logger to use Returns: (int): 1 if effectively done, 0 if no-op, -1 on failure
def copy(source, destination, ignore=None, adapter=None, fatal=True, logger=LOG.debug): return _file_op(source, destination, _copy, adapter, fatal, logger, ignore=ignore)
880,055
Perform re.sub with the patterns in the given dict Args: dict_: {pattern: repl} source: str
def substitute(dict_, source): d_esc = (re.escape(k) for k in dict_.keys()) pattern = re.compile('|'.join(d_esc)) return pattern.sub(lambda x: dict_[x.group()], source)
880,108
Processes messages that have been delivered from the listener. Args: data (dict): A dictionary containing the uuid, euuid, and message response. E.g. {"cuuid": x, "euuid": y, "response": z}. Returns: None
def retransmit(self, data): # If that shit is still in self.event_uuids, then that means we STILL # haven't gotten a response from the client. Then we resend that shit # and WAIT if data["euuid"] in self.event_uuids: # Increment the current retry count of the euuid self.event_uuids[data["euuid"]] += 1 # If we've tried more than the maximum, just log an error # and stahap. if (self.event_uuids[data["euuid"]] > self.max_retries or data["cuuid"] not in self.registry): logger.warning("<%s> Retry limit exceeded. " "Timed out waiting for client for " "event: %s" % (data["cuuid"], data["euuid"])) logger.warning("<%s> Deleting event from currently processing " "event uuids" % data["cuuid"]) del self.event_uuids[data["euuid"]] else: # Retransmit that shit logger.debug("<%s> Timed out waiting for response. Retry %s. " "Retransmitting message: " "%s" % (data["cuuid"], pformat(self.event_uuids[data["euuid"]]), data["response"])) # Look up the host and port based on cuuid host = self.registry[data["cuuid"]]["host"] port = self.registry[data["cuuid"]]["port"] # Send the packet to the client self.listener.send_datagram(data["response"], (host, port)) # Then we set another schedule to check again logger.debug("<%s> Scheduling to retry in %s " "seconds" % (data["cuuid"], str(self.timeout))) self.listener.call_later(self.timeout, self.retransmit, data)
880,135
Processes messages that have been delivered from the listener. Args: msg (string): The raw packet data delivered from the listener. This data will be unserialized and then processed based on the packet's method. host (tuple): The (address, host) tuple of the source message. Returns: A response that will be sent back to the client via the listener.
def handle_message(self, msg, host): response = None # Unserialize the packet, and decrypt if the host has encryption enabled if host in self.encrypted_hosts: msg_data = unserialize_data(msg, self.compression, self.encryption) else: msg_data = unserialize_data(msg, self.compression) logger.debug("Packet received: " + pformat(msg_data)) # If the message data is blank, return none if not msg_data: return response # For debug purposes, check if the client is registered or not if self.is_registered(msg_data["cuuid"], host[0]): logger.debug("<%s> Client is currently registered" % msg_data["cuuid"]) else: logger.debug("<%s> Client is not registered" % msg_data["cuuid"]) if "method" in msg_data: if msg_data["method"] == "REGISTER": logger.debug("<%s> Register packet received" % msg_data["cuuid"]) response = self.register(msg_data, host) elif msg_data["method"] == "OHAI": if not self.discoverable: return False logger.debug("<%s> Autodiscover packet received" % msg_data["cuuid"]) response = self.autodiscover(msg_data) elif msg_data["method"] == "AUTH": logger.debug("<%s> Authentication packet recieved" % msg_data["cuuid"]) response = self.auth_server.verify_login(msg_data) if response: self.registry[host]["authenticated"] = True else: if self.auth_server: if self.registry[host]["authenticated"]: response = self.handle_message_registered(msg_data, host) else: response = self.handle_message_registered(msg_data, host) logger.debug("Packet processing completed") return response
880,136
Processes messages that have been delivered by a registered client. Args: msg (string): The raw packet data delivered from the listener. This data will be unserialized and then processed based on the packet's method. host (tuple): The (address, host) tuple of the source message. Returns: A response that will be sent back to the client via the listener.
def handle_message_registered(self, msg_data, host): response = None if msg_data["method"] == "EVENT": logger.debug("<%s> <euuid:%s> Event message " "received" % (msg_data["cuuid"], msg_data["euuid"])) response = self.event(msg_data["cuuid"], host, msg_data["euuid"], msg_data["event_data"], msg_data["timestamp"], msg_data["priority"]) elif msg_data["method"] == "OK EVENT": logger.debug("<%s> <euuid:%s> Event confirmation message " "received" % (msg_data["cuuid"], msg_data["euuid"])) try: del self.event_uuids[msg_data["euuid"]] except KeyError: logger.warning("<%s> <euuid:%s> Euuid does not exist in event " "buffer. Key was removed before we could process " "it." % (msg_data["cuuid"], msg_data["euuid"])) elif msg_data["method"] == "OK NOTIFY": logger.debug("<%s> <euuid:%s> Ok notify " "received" % (msg_data["cuuid"], msg_data["euuid"])) try: del self.event_uuids[msg_data["euuid"]] except KeyError: logger.warning("<%s> <euuid:%s> Euuid does not exist in event " "buffer. Key was removed before we could process " "it." % (msg_data["cuuid"], msg_data["euuid"])) return response
880,137
This function simply returns the server version number as a response to the client. Args: message (dict): A dictionary of the autodiscover message from the client. Returns: A JSON string of the "OHAI Client" server response with the server's version number. Examples: >>> response '{"method": "OHAI Client", "version": "1.0"}'
def autodiscover(self, message): # Check to see if the client's version is the same as our own. if message["version"] in self.allowed_versions: logger.debug("<%s> Client version matches server " "version." % message["cuuid"]) response = serialize_data({"method": "OHAI Client", "version": self.version, "server_name": self.server_name}, self.compression, encryption=False) else: logger.warning("<%s> Client version %s does not match allowed server " "versions %s" % (message["cuuid"], message["version"], self.version)) response = serialize_data({"method": "BYE REGISTER"}, self.compression, encryption=False) return response
880,138
This function will check to see if a given host with client uuid is currently registered. Args: cuuid (string): The client uuid that wishes to register. host (tuple): The (address, port) tuple of the client that is registering. Returns: Will return True if the client is registered and will return False if it is not.
def is_registered(self, cuuid, host): # Check to see if the host with the client uuid exists in the registry # table. if (cuuid in self.registry) and (self.registry[cuuid]["host"] == host): return True else: return False
880,140
Register the extension with Sphinx. Args: app: The Sphinx application.
def setup(app): for name, (default, rebuild, _) in ref.CONFIG_VALUES.iteritems(): app.add_config_value(name, default, rebuild) app.add_directive('javaimport', ref.JavarefImportDirective) app.add_role('javaref', ref.JavarefRole(app)) app.connect('builder-inited', initialize_env) app.connect('env-purge-doc', ref.purge_imports) app.connect('env-merge-info', ref.merge_imports) app.connect('build-finished', ref.cleanup)
880,205
Purge expired values from the environment. When certain configuration values change, related values in the environment must be cleared. While Sphinx can rebuild documents on configuration changes, it does not notify extensions when this happens. Instead, cache relevant values in the environment in order to detect when they change. Args: app: The Sphinx application.
def validate_env(app): if not hasattr(app.env, 'javalink_config_cache'): app.env.javalink_config_cache = {} for conf_attr, (_, _, env_attr) in ref.CONFIG_VALUES.iteritems(): if not env_attr: continue value = getattr(app.config, conf_attr) cached = app.env.javalink_config_cache.get(conf_attr, value) app.env.javalink_config_cache[conf_attr] = value if value != cached: app.verbose('[javalink] config.%s has changed, clearing related env', conf_attr) delattr(app.env, env_attr)
880,206
Example: short("examined /Users/joe/foo") => "examined ~/foo" Args: path: Path to represent in its short form Returns: (str): Short form, using '~' if applicable
def short(cls, path): if not path: return path path = str(path) if cls.paths: for p in cls.paths: if p: path = path.replace(p + "/", "") path = path.replace(cls.home, "~") return path
880,277
cartesian product of dict whose values are lists Args: d: dictionary to take product of. multiple dictionaries will first be merged by dict_merge kwargs: additional kwargs for convenience Returns: a list of dictionaries with the same keys as d and kwargs
def dict_product(*d, **kwargs): d = dict(dict_merge(*d), **kwargs) holdout = {k: d[k] for k in d if not isinstance(d[k], list)} d = {k: d[k] for k in d if k not in holdout} items = d.items() if len(items) == 0: dicts = [{}] else: keys, values = zip(*items) dicts = [dict_filter_none(dict(zip(keys, v))) for v in product(*values)] for d in dicts: d.update(holdout) return dicts
880,304
Indent all new lines Args: n_spaces: number of spaces to use for indentation initial: whether or not to start with an indent
def indent(s, n_spaces=2, initial=True): i = ' '*n_spaces t = s.replace('\n', '\n%s' % i) if initial: t = i + t return t
880,308
Create a new target representing a task and its parameters Args: task: Task instance to create target for; the task class has to inherit from :class:`ozelot.tasks.TaskBase`. Returns: ozelot.tasks.ORMTarget: a new target instance
def from_task(cls, task): target = cls(name=task.get_name(), params=task.get_param_string()) return target
880,317
Base query for a target. Args: session: database session to query in
def _base_query(self, session): return session.query(ORMTargetMarker) \ .filter(ORMTargetMarker.name == self.name) \ .filter(ORMTargetMarker.params == self.params)
880,318
Store entities and their attributes Args: df (pandas.DataFrame): data to store (storing appends 'id' and 'type' columns!) attribute_columns (list(str)): list of column labels that define attributes
def store(self, df, attribute_columns): # ID start values depend on currently stored entities/attributes! entity_id_start = models.Entity.get_max_id(self.session) + 1 attribute_id_start = models.Attribute.get_max_id(self.session) + 1 # append ID and type columns df['id'] = range(entity_id_start, entity_id_start + len(df)) df['type'] = self.type # store entities df[['id', 'type']].to_sql(name=models.Entity.__tablename__, con=self.client.engine, if_exists='append', index=False) # store attributes for col in attribute_columns: # ID column of df is the entity ID of the attribute attr_df = df[[col, 'id']].rename(columns={'id': 'entity_id', col: 'value'}) attr_df['name'] = col # add entity ID column, need to respect already existing entities attr_df['id'] = range(attribute_id_start, attribute_id_start + len(df)) attribute_id_start += len(df) # store attr_df.to_sql(name=models.Attribute.__tablename__, con=self.client.engine, if_exists='append', index=False)
880,326
Starts the listen loop. If threading is enabled, then the loop will be started in its own thread. Args: None Returns: None
def listen(self): self.listening = True if self.threading: from threading import Thread self.listen_thread = Thread(target=self.listen_loop) self.listen_thread.daemon = True self.listen_thread.start() self.scheduler_thread = Thread(target=self.scheduler) self.scheduler_thread.daemon = True self.scheduler_thread.start() else: self.listen_loop()
880,335
Starts the listen loop and executes the receieve_datagram method whenever a packet is receieved. Args: None Returns: None
def listen_loop(self): while self.listening: try: data, address = self.sock.recvfrom(self.bufsize) self.receive_datagram(data, address) if self.stats_enabled: self.stats['bytes_recieved'] += len(data) except socket.error as error: if error.errno == errno.WSAECONNRESET: logger.info("connection reset") else: raise logger.info("Shutting down the listener...")
880,336
Starts the scheduler to check for scheduled calls and execute them at the correct time. Args: sleep_time (float): The amount of time to wait in seconds between each loop iteration. This prevents the scheduler from consuming 100% of the host's CPU. Defaults to 0.2 seconds. Returns: None
def scheduler(self, sleep_time=0.2): while self.listening: # If we have any scheduled calls, execute them and remove them from # our list of scheduled calls. if self.scheduled_calls: timestamp = time.time() self.scheduled_calls[:] = [item for item in self.scheduled_calls if not self.time_reached(timestamp, item)] time.sleep(sleep_time) logger.info("Shutting down the call scheduler...")
880,337
Executes when UDP data has been received and sends the packet data to our app to process the request. Args: data (str): The raw serialized packet data received. address (tuple): The address and port of the origin of the received packet. E.g. (address, port). Returns: None
def receive_datagram(self, data, address): # If we do not specify an application, just print the data. if not self.app: logger.debug("Packet received", address, data) return False # Send the data we've recieved from the network and send it # to our application for processing. try: response = self.app.handle_message(data, address) except Exception as err: logger.error("Error processing message from " + str(address) + ":" + str(data)) logger.error(traceback.format_exc()) return False # If our application generated a response to this message, # send it to the original sender. if response: self.send_datagram(response, address)
880,341
Generate analysis output as html page Args: query_module (module): module to use for querying data for the desired model/pipeline variant, e.g. leonardo.standard.queries
def plots_html_page(query_module): # page template template = jenv.get_template("analysis.html") # container for template context context = dict(extended=config.EXTENDED) # a database client/session to run queries in cl = client.get_client() session = cl.create_session() # general styling seaborn.set_style('whitegrid') # # plot: painting area by decade, with linear regression # decade_df = query_module.decade_query() pix_size = pixels_to_inches((600, 400)) ax = seaborn.lmplot(x='decade', y='area', data=decade_df, size=pix_size[1], aspect=pix_size[0] / pix_size[1], scatter_kws={"s": 30, "alpha": 0.3}) ax.set(xlabel='Decade', ylabel='Area, m^2') context['area_by_decade_svg'] = fig_to_svg(plt.gcf()) plt.close('all') # # plot: painting area by gender, with logistic regression # if config.EXTENDED: gender_df = query_module.gender_query() pix_size = pixels_to_inches((600, 400)) g = seaborn.FacetGrid(gender_df, hue="gender", margin_titles=True, size=pix_size[1], aspect=pix_size[0] / pix_size[1]) bins = np.linspace(0, 5, 30) g.map(plt.hist, "area", bins=bins, lw=0, alpha=0.5, normed=True) g.axes[0, 0].set_xlabel('Area, m^2') g.axes[0, 0].set_ylabel('Percentage of paintings') context['area_by_gender_svg'] = fig_to_svg(plt.gcf()) plt.close('all') # # render template # out_file = path.join(out_dir, "analysis.html") html_content = template.render(**context) with open(out_file, 'w') as f: f.write(html_content) # done, clean up plt.close('all') session.close()
880,372
Convert `value` to bytes, accepts notations such as "4k" to mean 4096 bytes Args: value (str | unicode | int | None): Number of bytes optionally suffixed by a char from UNITS default_unit (str | unicode | None): Default unit to use for unqualified values base (int): Base to use (usually 1024) Returns: (int | None): Deduced bytesize value, if possible
def to_bytesize(value, default_unit=None, base=DEFAULT_BASE): if isinstance(value, (int, float)): return unitized(value, default_unit, base) if value is None: return None try: if value[-1].lower() == "b": # Accept notations such as "1mb", as they get used out of habit value = value[:-1] unit = value[-1:].lower() if unit.isdigit(): unit = default_unit else: value = value[:-1] return unitized(to_number(float, value), unit, base) except (IndexError, TypeError, ValueError): return None
880,631
BGEN file reader. Args: filename (str): The name of the BGEN file. sample_filename (str): The name of the sample file (optional). probability_threshold (float): The probability threshold.
def __init__(self, filename, sample_filename=None, chromosome=None, probability_threshold=0.9, cpus=1): # The BGEN reader (parallel or no) if cpus == 1: self.is_parallel = False self._bgen = PyBGEN(filename, prob_t=probability_threshold) else: self.is_parallel = True self._bgen = ParallelPyBGEN(filename, prob_t=probability_threshold, cpus=cpus) # Getting the samples self.samples = self._bgen.samples if self.samples is None: # The BGEN file didn't contain samples, so we read from file if sample_filename is None: raise ValueError("No sample information in BGEN file, " "requires a 'sample_filename'") self._parse_sample_file(sample_filename) # Does the user ask for a chromosome? self.chrom = chromosome
880,774
Get the genotypes from a well formed variant instance. Args: marker (Variant): A Variant instance. Returns: A list of Genotypes instance containing a pointer to the variant as well as a vector of encoded genotypes.
def get_variant_genotypes(self, variant): # The chromosome to search for (if a general one is set, that's the one # we need to search for) chrom = variant.chrom.name if self.chrom is not None and chrom == self.chrom: chrom = "NA" # Getting the results results = [] iterator = self._bgen.iter_variants_in_region( CHROM_STR_DECODE.get(chrom, chrom), variant.pos, variant.pos, ) for info, dosage in iterator: if (variant.alleles is None or variant.iterable_alleles_eq([info.a1, info.a2])): results.append(Genotypes( Variant( info.name, CHROM_STR_ENCODE.get(info.chrom, info.chrom), info.pos, [info.a1, info.a2], ), dosage, reference=info.a1, coded=info.a2, multiallelic=True, )) # If there are no results if not results: logging.variant_name_not_found(variant) return results
880,775
Iterates over the genotypes for variants using a list of names. Args: names (list): The list of names for variant extraction.
def iter_variants_by_names(self, names): if not self.is_parallel: yield from super().iter_variants_by_names(names) else: for info, dosage in self._bgen.iter_variants_by_names(names): yield Genotypes( Variant(info.name, CHROM_STR_ENCODE.get(info.chrom, info.chrom), info.pos, [info.a1, info.a2]), dosage, reference=info.a1, coded=info.a2, multiallelic=True, )
880,779
Get the genotype of a marker using it's name. Args: name (str): The name of the marker. Returns: list: A list of Genotypes.
def get_variant_by_name(self, name): results = [] try: for info, dosage in self._bgen.get_variant(name): results.append(Genotypes( Variant( info.name, CHROM_STR_ENCODE.get(info.chrom, info.chrom), info.pos, [info.a1, info.a2], ), dosage, reference=info.a1, coded=info.a2, multiallelic=False, )) except ValueError: logging.variant_name_not_found(name) return results
880,780
Iterates over the genotypes for variants using a list of names. Args: names (list): The list of names for variant extraction.
def iter_variants_by_names(self, names): for name in names: for result in self.get_variant_by_name(name): yield result
880,877
Calculate the average gradient for each shared variable across all towers. Note that this function provides a synchronization point across all towers. Args: tower_grads: List of lists of (gradient, variable) tuples. The outer list is over individual gradients. The inner list is over the gradient calculation for each tower. Returns: List of pairs of (gradient, variable) where the gradient has been averaged across all towers.
def avg_grads(tower_grads): average_grads = [] for grad_and_vars in zip(*tower_grads): # Note that each grad_and_vars looks like the following: # ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN)) grads = [] for g, _ in grad_and_vars: # Add 0 dimension to the gradients to represent the tower. expanded_g = tf.expand_dims(g, 0) # Append on a 'tower' dimension which we will average over below. grads.append(expanded_g) # Average over the 'tower' dimension. grad = tf.concat(0, grads) grad = tf.reduce_mean(grad, 0) # Keep in mind that the Variables are redundant because they are shared # across towers. So .. we will just return the first tower's pointer to # the Variable. v = grad_and_vars[0][1] grad_and_var = (grad, v) average_grads.append(grad_and_var) return average_grads
881,183
Process a string of content for include tags. This function assumes there are no blocks in the content. The content is split into segments, with include tags being replaced by Block objects. PARAMETERS: content -- str; content to be converted into a Block. block_map -- BlockMap link_stack -- LinkStack source_path -- str; the filepath of the file from which this content came. RETURNS: list of str; segments that the comprise the content.
def process_links(include_match, block_map, link_stack, source_path): leading_whitespace = include_match.group(1) include_path = include_match.group(2) # Optional block name. If match is None, block name was ommitted (default to 'all'). block_name = include_match.group(3) if block_name is not None: block_name = block_name.lstrip(':') else: block_name = ALL_BLOCK_NAME return retrieve_block_from_map( source_path, include_path.strip(), block_name.strip(), leading_whitespace, block_map, link_stack)
881,310
IMPUTE2 file reader. Args: filename (str): The name of the IMPUTE2 file. sample_filename (str): The name of the SAMPLE file. probability_threshold (float): The probability threshold. Note ==== If the sample IDs are not unique, the index is changed to be the sample family ID and individual ID (i.e. fid_iid).
def __init__(self, filename, sample_filename, probability_threshold=0.9): # Reading the samples self.samples = pd.read_csv(sample_filename, sep=" ", skiprows=2, names=["fid", "iid", "missing", "father", "mother", "sex", "plink_geno"], dtype=dict(fid=str, iid=str)) # We want to set the index for the samples try: self.samples = self.samples.set_index("iid", verify_integrity=True) except ValueError: logging.info( "Setting the index as 'fid_iid' because the individual IDs " "are not unique." ) self.samples["fid_iid"] = [ "{fid}_{iid}".format(fid=fid, iid=iid) for fid, iid in zip(self.samples.fid, self.samples.iid) ] self.samples = self.samples.set_index( "fid_iid", verify_integrity=True, ) # The IMPUTE2 file self._impute2_file = get_open_func(filename)(filename, "r") # If we have an index, we read it self.has_index = path.isfile(filename + ".idx") self._impute2_index = None self._index_has_location = False if self.has_index: self._impute2_index = get_index( filename, cols=[0, 1, 2], names=["chrom", "name", "pos"], sep=" ", ) # Checking for duplicated marker iD try: self._impute2_index = self._impute2_index.set_index( "name", verify_integrity=True, ) self._has_duplicated = False except ValueError as e: self._has_duplicated = True # Finding the duplicated markers duplicated = self._impute2_index.name.duplicated(keep=False) duplicated_markers = self._impute2_index.loc[ duplicated, "name" ] duplicated_marker_counts = duplicated_markers.value_counts() # The dictionary that will contain information about the # duplicated markers self._dup_markers = { m: [] for m in duplicated_marker_counts.index } # Logging a warning logging.found_duplicates(duplicated_marker_counts.iteritems()) # Renaming the markers counter = Counter() for i, marker in duplicated_markers.iteritems(): counter[marker] += 1 new_name = "{}:dup{}".format(marker, counter[marker]) self._impute2_index.loc[i, "name"] = new_name # Updating the dictionary containing the duplicated markers self._dup_markers[marker].append(new_name) # Resetting the index self._impute2_index = self._impute2_index.set_index( "name", verify_integrity=True, ) # Checking if we have chrom/pos in the index self._index_has_location = ( "chrom" in self._impute2_index.columns and "pos" in self._impute2_index.columns ) if self._index_has_location: # Setting the multiallelic values self._impute2_index["multiallelic"] = False self._impute2_index.loc[ self._impute2_index.duplicated(["chrom", "pos"], keep=False), "multiallelic" ] = True # Saving the probability threshold self.prob_t = probability_threshold
881,404
Get the genotypes from a well formed variant instance. Args: marker (Variant): A Variant instance. Returns: A list of Genotypes instance containing a pointer to the variant as well as a vector of encoded genotypes.
def get_variant_genotypes(self, variant): if not self.has_index: raise NotImplementedError("Not implemented when IMPUTE2 file is " "not indexed (see genipe)") # Find the variant in the index try: impute2_chrom = CHROM_STR_TO_INT[variant.chrom.name] except KeyError: raise ValueError( "Invalid chromosome ('{}') for IMPUTE2.".format(variant.chrom) ) variant_info = self._impute2_index[ (self._impute2_index.chrom == impute2_chrom) & (self._impute2_index.pos == variant.pos) ] if variant_info.shape[0] == 0: logging.variant_not_found(variant) return [] elif variant_info.shape[0] == 1: return self._get_biallelic_variant(variant, variant_info) else: return self._get_multialleic_variant(variant, variant_info)
881,405
Parses the current IMPUTE2 line (a single variant). Args: line (str): An IMPUTE2 line. Returns: Genotypes: The genotype in dosage format. Warning ======= By default, the genotypes object has multiallelic set to False.
def _parse_impute2_line(self, line): # Splitting row = line.rstrip("\r\n").split(" ") # Constructing the probabilities prob = np.array(row[5:], dtype=float) prob.shape = (prob.shape[0] // 3, 3) # Constructing the dosage dosage = 2 * prob[:, 2] + prob[:, 1] if self.prob_t > 0: dosage[~np.any(prob >= self.prob_t, axis=1)] = np.nan return Genotypes( Variant(row[1], CHROM_STR_ENCODE.get(row[0], row[0]), int(row[2]), [row[3], row[4]]), dosage, reference=row[3], coded=row[4], multiallelic=False, )
881,413
get ore:aggregates for this resource, optionally retrieving resource payload Args: retrieve (bool): if True, issue .refresh() on resource thereby confirming existence and retrieving payload
def get_related(self): if self.exists and hasattr(self.rdf.triples, 'ore') and hasattr(self.rdf.triples.ore, 'aggregates'): related = [ self.repo.parse_uri(uri) for uri in self.rdf.triples.ore.aggregates ] # return return related else: return []
881,416
get pcdm:hasMember for this resource Args: retrieve (bool): if True, issue .refresh() on resource thereby confirming existence and retrieving payload
def get_members(self, retrieve=False): if self.exists and hasattr(self.rdf.triples, 'pcdm') and hasattr(self.rdf.triples.pcdm, 'hasMember'): members = [ self.repo.parse_uri(uri) for uri in self.rdf.triples.pcdm.hasMember ] # return return members else: return []
881,420
get pcdm:hasFile for this resource Args: retrieve (bool): if True, issue .refresh() on resource thereby confirming existence and retrieving payload
def get_files(self, retrieve=False): if self.exists and hasattr(self.rdf.triples, 'pcdm') and hasattr(self.rdf.triples.pcdm, 'hasFile'): files = [ self.repo.parse_uri(uri) for uri in self.rdf.triples.pcdm.hasFile ] # return return files else: return []
881,421
get pcdm:hasRelatedFile for this resource Args: retrieve (bool): if True, issue .refresh() on resource thereby confirming existence and retrieving payload
def get_associated(self, retrieve=False): if self.exists and hasattr(self.rdf.triples, 'pcdm') and hasattr(self.rdf.triples.pcdm, 'hasRelatedFile'): files = [ self.repo.parse_uri(uri) for uri in self.rdf.triples.pcdm.hasRelatedFile ] # return return files else: return []
881,422
Binary plink file reader. Args: prefix (str): the prefix of the Plink binary files.
def __init__(self, prefix): self.bed = PyPlink(prefix) self.bim = self.bed.get_bim() self.fam = self.bed.get_fam() # Identify all multi-allelics. self.bim["multiallelic"] = False self.bim.loc[ self.bim.duplicated(["chrom", "pos"], keep=False), "multiallelic" ] = True # We want to set the index for the FAM file try: self.fam = self.fam.set_index("iid", verify_integrity=True) except ValueError: logging.info( "Setting the index as 'fid_iid' because the individual IDs " "are not unique." ) self.fam["fid_iid"] = [ "{fid}_{iid}".format(fid=fid, iid=iid) for fid, iid in zip(self.fam.fid, self.fam.iid) ] self.fam = self.fam.set_index("fid_iid", verify_integrity=True)
881,431
Get the genotypes from a well formed variant instance. Args: marker (Variant): A Variant instance. Returns: A list of Genotypes instance containing a pointer to the variant as well as a vector of encoded genotypes. Note ==== If the sample IDs are not unique, the index is changed to be the sample family ID and individual ID (i.e. fid_iid).
def get_variant_genotypes(self, variant): # Find the variant in the bim. try: plink_chrom = CHROM_STR_TO_INT[variant.chrom.name] except KeyError: raise ValueError( "Invalid chromosome ('{}') for Plink.".format(variant.chrom) ) info = self.bim.loc[ (self.bim.chrom == plink_chrom) & (self.bim.pos == variant.pos), : ] if info.shape[0] == 0: logging.variant_not_found(variant) return [] elif info.shape[0] == 1: return self._get_biallelic_variant(variant, info) else: return self._get_multialleic_variant(variant, info)
881,432
Get the genotype of a marker using it's name. Args: name (str): The name of the marker. Returns: list: A list of Genotypes (only one for PyPlink, see note below). Note ==== From PyPlink version 1.3.2 and onwards, each name is unique in the dataset. Hence, we can use the 'get_geno_marker' function and be sure only one variant is returned.
def get_variant_by_name(self, name): # From 1.3.2 onwards, PyPlink sets unique names. # Getting the genotypes try: geno, i = self.bed.get_geno_marker(name, return_index=True) except ValueError: if name in self.bed.get_duplicated_markers(): # The variant is a duplicated one, so we go through all the # variants with the same name and the :dupx suffix return [ self.get_variant_by_name(dup_name).pop() for dup_name in self.bed.get_duplicated_markers()[name] ] else: # The variant is not in the BIM file, so we return an empty # list logging.variant_name_not_found(name) return [] else: info = self.bim.iloc[i, :] return [Genotypes( Variant(info.name, CHROM_INT_TO_STR[info.chrom], info.pos, [info.a1, info.a2]), self._normalize_missing(geno), reference=info.a2, coded=info.a1, multiallelic=info.multiallelic, )]
881,438
Compute LD between a marker and a list of markers. Args: cur_geno (Genotypes): The genotypes of the marker. other_genotypes (list): A list of genotypes. Returns: numpy.array: An array containing the r or r**2 values between cur_geno and other_genotypes. Note: The genotypes will automatically be normalized using (x - mean) / std.
def compute_ld(cur_geno, other_genotypes, r2=False): # Normalizing the current genotypes norm_cur = normalize_genotypes(cur_geno) # Normalizing and creating the matrix for the other genotypes norm_others = np.stack( tuple(normalize_genotypes(g) for g in other_genotypes), axis=1, ) # Making sure the size is the same assert norm_cur.shape[0] == norm_others.shape[0] # Getting the number of "samples" per marker (taking into account NaN) n = ( ~np.isnan(norm_cur.reshape(norm_cur.shape[0], 1)) * ~np.isnan(norm_others) ).sum(axis=0) # Computing r (replacing NaN by 0) r = pd.Series( np.dot( np.nan_to_num(norm_cur), np.nan_to_num(norm_others) / n ), index=[g.variant.name for g in other_genotypes], name="r2" if r2 else "r", ) # Checking no "invalid" values (i.e. < -1 or > 1) r.loc[r > 1] = 1 r.loc[r < -1] = -1 if r2: return r ** 2 else: return r
881,475
Normalize the genotypes. Args: genotypes (Genotypes): The genotypes to normalize. Returns: numpy.array: The normalized genotypes.
def normalize_genotypes(genotypes): genotypes = genotypes.genotypes return (genotypes - np.nanmean(genotypes)) / np.nanstd(genotypes)
881,476
Infer the antibiotics resistance of the given record. Arguments: record (`~Bio.SeqRecord.SeqRecord`): an annotated sequence. Raises: RuntimeError: when there's not exactly one resistance cassette.
def find_resistance(record): for feature in record.features: labels = set(feature.qualifiers.get("label", [])) cassettes = labels.intersection(_ANTIBIOTICS) if len(cassettes) > 1: raise RuntimeError("multiple resistance cassettes detected") elif len(cassettes) == 1: return _ANTIBIOTICS.get(cassettes.pop()) raise RuntimeError("could not find the resistance of '{}'".format(record.id))
881,559
Build a index for the given file. Args: fn (str): the name of the file. cols (list): a list containing column to keep (as int). names (list): the name corresponding to the column to keep (as str). sep (str): the field separator. Returns: pandas.DataFrame: the index.
def generate_index(fn, cols=None, names=None, sep=" "): # Some assertions assert cols is not None, "'cols' was not set" assert names is not None, "'names' was not set" assert len(cols) == len(names) # Getting the open function bgzip, open_func = get_open_func(fn, return_fmt=True) # Reading the required columns data = pd.read_csv(fn, sep=sep, engine="c", usecols=cols, names=names, compression="gzip" if bgzip else None) # Getting the seek information f = open_func(fn, "rb") data["seek"] = np.fromiter(_seek_generator(f), dtype=np.uint)[:-1] f.close() # Saving the index to file write_index(get_index_fn(fn), data) return data
881,677
Get the opening function. Args: fn (str): the name of the file. return_fmt (bool): if the file format needs to be returned. Returns: tuple: either a tuple containing two elements: a boolean telling if the format is bgzip, and the opening function.
def get_open_func(fn, return_fmt=False): # The file might be compressed using bgzip bgzip = None with open(fn, "rb") as i_file: bgzip = i_file.read(3) == b"\x1f\x8b\x08" if bgzip and not HAS_BIOPYTHON: raise ValueError("needs BioPython to index a bgzip file") open_func = open if bgzip: open_func = BgzfReader # Trying to read try: with open_func(fn, "r") as i_file: if bgzip: if not i_file.seekable(): raise ValueError pass except ValueError: raise ValueError("{}: use bgzip for compression...".format(fn)) if return_fmt: return bgzip, open_func return open_func
881,678
Restores the index for a given file. Args: fn (str): the name of the file. cols (list): a list containing column to keep (as int). names (list): the name corresponding to the column to keep (as str). sep (str): the field separator. Returns: pandas.DataFrame: the index. If the index doesn't exist for the file, it is first created.
def get_index(fn, cols, names, sep): if not has_index(fn): # The index doesn't exists, generate it return generate_index(fn, cols, names, sep) # Retrieving the index file_index = read_index(get_index_fn(fn)) # Checking the names are there if len(set(names) - (set(file_index.columns) - {'seek'})) != 0: raise ValueError("{}: missing index columns: reindex".format(fn)) if "seek" not in file_index.columns: raise ValueError("{}: invalid index: reindex".format(fn)) return file_index
881,679
Writes the index to file. Args: fn (str): the name of the file that will contain the index. index (pandas.DataFrame): the index.
def write_index(fn, index): with open(fn, "wb") as o_file: o_file.write(_CHECK_STRING) o_file.write(zlib.compress(bytes( index.to_csv(None, index=False, encoding="utf-8"), encoding="utf-8", )))
881,680
Reads index from file. Args: fn (str): the name of the file containing the index. Returns: pandas.DataFrame: the index of the file. Before reading the index, we check the first couple of bytes to see if it is a valid index file.
def read_index(fn): index = None with open(fn, "rb") as i_file: if i_file.read(len(_CHECK_STRING)) != _CHECK_STRING: raise ValueError("{}: not a valid index file".format(fn)) index = pd.read_csv(io.StringIO( zlib.decompress(i_file.read()).decode(encoding="utf-8"), )) return index
881,681
Indexes an IMPUTE2 file. Args: fn (str): The name of the IMPUTE2 file.
def index_impute2(fn): logger.info("Indexing {} (IMPUTE2)".format(fn)) impute2_index(fn, cols=[0, 1, 2], names=["chrom", "name", "pos"], sep=" ") logger.info("Index generated")
881,695
Indexes a BGEN file. Args: fn (str): The name of the BGEN file.
def index_bgen(fn, legacy=False): logger.info("Indexing {} (BGEN) using 'bgenix'{}".format( fn, " (legacy mode)" if legacy else "", )) command = ["bgenix", "-g", fn, "-index"] if legacy: command.append("-with-rowid") try: logger.info("Executing '{}'".format(" ".join(command))) subprocess.Popen(command).communicate() except FileNotFoundError: logger.error("Cannot find 'bgenix', impossible to index {}".format(fn)) sys.exit(1) logger.info("Index generated")
881,696
Returns the default action fluents regardless of the current `state` and `timestep`. Args: state (Sequence[tf.Tensor]): The current state fluents. timestep (tf.Tensor): The current timestep. Returns: Sequence[tf.Tensor]: A tuple of action fluents.
def __call__(self, state: Sequence[tf.Tensor], timestep: tf.Tensor) -> Sequence[tf.Tensor]: return self._default
882,062
Render the simulated state-action `trajectories` for Navigation domain. Args: stats: Performance statistics. trajectories: NonFluents, states, actions, interms and rewards. batch: Number of batches to render.
def render(self, trajectories: Tuple[NonFluents, Fluents, Fluents, Fluents, np.array], batch: Optional[int] = None) -> None: non_fluents, initial_state, states, actions, interms, rewards = trajectories non_fluents = dict(non_fluents) states = dict((name, fluent[0]) for name, fluent in states) actions = dict((name, fluent[0]) for name, fluent in actions) rewards = rewards[0] idx = self._compiler.rddl.domain.state_fluent_ordering.index('location/1') start = initial_state[idx][0] g = non_fluents['GOAL/1'] path = states['location/1'] deltas = actions['move/1'] centers = non_fluents['DECELERATION_ZONE_CENTER/2'] decays = non_fluents['DECELERATION_ZONE_DECAY/1'] zones = [(x, y, d) for (x, y), d in zip(centers, decays)] self._ax1 = plt.gca() self._render_state_space() self._render_start_and_goal_positions(start, g) self._render_deceleration_zones(zones) self._render_state_action_trajectory(start, path, deltas) plt.title('Navigation', fontweight='bold') plt.legend(loc='lower right') plt.show()
882,086
Prints the first batch of simulated `trajectories`. Args: trajectories: NonFluents, states, actions, interms and rewards.
def _render_trajectories(self, trajectories: Tuple[NonFluents, Fluents, Fluents, Fluents, np.array]) -> None: if self._verbose: non_fluents, initial_state, states, actions, interms, rewards = trajectories shape = states[0][1].shape batch_size, horizon, = shape[0], shape[1] states = [(s[0], s[1][0]) for s in states] interms = [(f[0], f[1][0]) for f in interms] actions = [(a[0], a[1][0]) for a in actions] rewards = np.reshape(rewards, [batch_size, horizon])[0] self._render_batch(non_fluents, states, actions, interms, rewards)
882,129
Prints `non_fluents`, `states`, `actions`, `interms` and `rewards` for given `horizon`. Args: states (Sequence[Tuple[str, np.array]]): A state trajectory. actions (Sequence[Tuple[str, np.array]]): An action trajectory. interms (Sequence[Tuple[str, np.array]]): An interm state trajectory. rewards (np.array): Sequence of rewards (1-dimensional array). horizon (Optional[int]): Number of timesteps.
def _render_batch(self, non_fluents: NonFluents, states: Fluents, actions: Fluents, interms: Fluents, rewards: np.array, horizon: Optional[int] = None) -> None: if horizon is None: horizon = len(states[0][1]) self._render_round_init(horizon, non_fluents) for t in range(horizon): s = [(s[0], s[1][t]) for s in states] f = [(f[0], f[1][t]) for f in interms] a = [(a[0], a[1][t]) for a in actions] r = rewards[t] self._render_timestep(t, s, a, f, r) self._render_round_end(rewards)
882,130
Prints fluents and rewards for the given timestep `t`. Args: t (int): timestep s (Sequence[Tuple[str], np.array]: State fluents. a (Sequence[Tuple[str], np.array]: Action fluents. f (Sequence[Tuple[str], np.array]: Interm state fluents. r (np.float32): Reward.
def _render_timestep(self, t: int, s: Fluents, a: Fluents, f: Fluents, r: np.float32) -> None: print("============================") print("TIME = {}".format(t)) print("============================") fluent_variables = self._compiler.rddl.action_fluent_variables self._render_fluent_timestep('action', a, fluent_variables) fluent_variables = self._compiler.rddl.interm_fluent_variables self._render_fluent_timestep('interms', f, fluent_variables) fluent_variables = self._compiler.rddl.state_fluent_variables self._render_fluent_timestep('states', s, fluent_variables) self._render_reward(r)
882,131
Prints `fluents` of given `fluent_type` as list of instantiated variables with corresponding values. Args: fluent_type (str): Fluent type. fluents (Sequence[Tuple[str, np.array]]): List of pairs (fluent_name, fluent_values). fluent_variables (Sequence[Tuple[str, List[str]]]): List of pairs (fluent_name, args).
def _render_fluent_timestep(self, fluent_type: str, fluents: Sequence[Tuple[str, np.array]], fluent_variables: Sequence[Tuple[str, List[str]]]) -> None: for fluent_pair, variable_list in zip(fluents, fluent_variables): name, fluent = fluent_pair _, variables = variable_list print(name) fluent = fluent.flatten() for variable, value in zip(variables, fluent): print('- {}: {} = {}'.format(fluent_type, variable, value)) print()
882,132
Returns action fluents for the current `state` and `timestep`. Args: state (Sequence[tf.Tensor]): The current state fluents. timestep (tf.Tensor): The current timestep. Returns: Sequence[tf.Tensor]: A tuple of action fluents.
def __call__(self, state: Sequence[tf.Tensor], timestep: tf.Tensor) -> Sequence[tf.Tensor]: raise NotImplementedError
882,136
Renders the simulated `trajectories` for the given `batch`. Args: trajectories: NonFluents, states, actions, interms and rewards. batch: Number of batches to render.
def render(self, trajectories: Tuple[NonFluents, Fluents, Fluents, Fluents, np.array], batch: Optional[int] = None) -> None: raise NotImplementedError
882,164
Reads genotypes from a pandas DataFrame. Args: dataframe (pandas.DataFrame): The data. map_info (pandas.DataFrame): The mapping information. Note ==== The index of the dataframe should be the sample IDs. The index of the map_info should be the variant name, and there should be columns named chrom and pos.
def __init__(self, dataframe, map_info): self.df = dataframe self.map_info = map_info
882,256
Get the genotypes for a given variant (by name). Args: name (str): The name of the variant to retrieve the genotypes. Returns: list: A list of Genotypes. This is a list in order to keep the same behaviour as the other functions.
def get_variant_by_name(self, name): try: geno = self.df.loc[:, name].values info = self.map_info.loc[name, :] except KeyError: # The variant is not in the data, so we return an empty # list logging.variant_name_not_found(name) return [] else: return [Genotypes( Variant(info.name, info.chrom, info.pos, [info.a1, info.a2]), geno, reference=info.a2, coded=info.a1, multiallelic=False, )]
882,258
Hashes HTML block tags. PARAMETERS: text -- str; Markdown text hashes -- dict; a dictionary of all hashes, where keys are hashes and values are their unhashed versions. When HTML block tags are used, all content inside the tags is preserved as-is, without any Markdown processing. See block_tags for a list of block tags.
def hash_blocks(text, hashes): def sub(match): block = match.group(1) hashed = hash_text(block, 'block') hashes[hashed] = block return '\n\n' + hashed + '\n\n' return re_block.sub(sub, text)
882,428
parses and cleans up possible uri inputs, return instance of rdflib.term.URIRef Args: uri (rdflib.term.URIRef,str): input URI Returns: rdflib.term.URIRef
def parse_uri(self, uri=None): # no uri provided, assume root if not uri: return rdflib.term.URIRef(self.root) # string uri provided elif type(uri) == str: # assume "short" uri, expand with repo root if type(uri) == str and not uri.startswith('http'): return rdflib.term.URIRef("%s%s" % (self.root, uri)) # else, assume full uri else: return rdflib.term.URIRef(uri) # already rdflib.term.URIRef elif type(uri) == rdflib.term.URIRef: return uri # unknown input else: raise TypeError('invalid URI input')
882,679
Convenience method for creating a new resource Note: A Resource is instantiated, but is not yet created. Still requires resource.create(). Args: uri (rdflib.term.URIRef, str): uri of resource to create resource_type (NonRDFSource (Binary), BasicContainer, DirectContainer, IndirectContainer): resource type to create Returns: (NonRDFSource (Binary), BasicContainer, DirectContainer, IndirectContainer): instance of appropriate type
def create_resource(self, resource_type=None, uri=None): if resource_type in [NonRDFSource, Binary, BasicContainer, DirectContainer, IndirectContainer]: return resource_type(self, uri) else: raise TypeError("expecting Resource type, such as BasicContainer or NonRDFSource")
882,680
Request new transaction from repository, init new Transaction, store in self.txns Args: txn_name (str): human name for transaction Return: (Transaction): returns intance of newly created transaction
def start_txn(self, txn_name=None): # if no name provided, create one if not txn_name: txn_name = uuid.uuid4().hex # request new transaction txn_response = self.api.http_request('POST','%s/fcr:tx' % self.root, data=None, headers=None) # if 201, transaction was created if txn_response.status_code == 201: txn_uri = txn_response.headers['Location'] logger.debug("spawning transaction: %s" % txn_uri) # init new Transaction, and pass Expires header txn = Transaction( self, # pass the repository txn_name, txn_uri, expires = txn_response.headers['Expires']) # append to self self.txns[txn_name] = txn # return return txn
882,682
Retrieves known transaction and adds to self.txns. TODO: Perhaps this should send a keep-alive request as well? Obviously still needed, and would reset timer. Args: txn_prefix (str, rdflib.term.URIRef): uri of the transaction. e.g. http://localhost:8080/rest/txn:123456789 txn_name (str): local, human name for transaction Return: (Transaction) local instance of transactions from self.txns[txn_uri]
def get_txn(self, txn_name, txn_uri): # parse uri txn_uri = self.parse_uri(txn_uri) # request new transaction txn_response = self.api.http_request('GET',txn_uri, data=None, headers=None) # if 200, transaction exists if txn_response.status_code == 200: logger.debug("transactoin found: %s" % txn_uri) # init new Transaction, and pass Expires header txn = Transaction( self, # pass the repository txn_name, txn_uri, expires = None) # append to self self.txns[txn_name] = txn # return return txn # if 404, transaction does not exist elif txn_response.status_code in [404, 410]: logger.debug("transaction does not exist: %s" % txn_uri) return False else: raise Exception('HTTP %s, could not retrieve transaction' % txn_response.status_code)
882,683
Keep current transaction alive, updates self.expires Args: None Return: None: sets new self.expires
def keep_alive(self): # keep transaction alive txn_response = self.api.http_request('POST','%sfcr:tx' % self.root, data=None, headers=None) # if 204, transaction kept alive if txn_response.status_code == 204: logger.debug("continuing transaction: %s" % self.root) # update status and timer self.active = True self.expires = txn_response.headers['Expires'] return True # if 410, transaction does not exist elif txn_response.status_code == 410: logger.debug("transaction does not exist: %s" % self.root) self.active = False return False else: raise Exception('HTTP %s, could not continue transaction' % txn_response.status_code)
882,685
Ends transaction by committing, or rolling back, all changes during transaction. Args: close_type (str): expects "commit" or "rollback" Return: (bool)
def _close(self, close_type): # commit transaction txn_response = self.api.http_request('POST','%sfcr:tx/fcr:%s' % (self.root, close_type), data=None, headers=None) # if 204, transaction was closed if txn_response.status_code == 204: logger.debug("%s for transaction: %s, successful" % (close_type, self.root)) # update self.active self.active = False # return return True # if 410 or 404, transaction does not exist elif txn_response.status_code in [404, 410]: logger.debug("transaction does not exist: %s" % self.root) # update self.active self.active = False return False else: raise Exception('HTTP %s, could not commit transaction' % txn_response.status_code)
882,686
parse resource type from self.http_request() Note: uses isinstance() as plugins may extend these base LDP resource type. Args: response (requests.models.Response): response object Returns: [NonRDFSource, BasicContainer, DirectContainer, IndirectContainer]
def parse_resource_type(self, response): # parse 'Link' header links = [ link.split(";")[0].lstrip('<').rstrip('>') for link in response.headers['Link'].split(', ') if link.startswith('<http://www.w3.org/ns/ldp#')] # parse resource type string with self.repo.namespace_manager.compute_qname() ldp_resource_types = [ self.repo.namespace_manager.compute_qname(resource_type)[2] for resource_type in links] logger.debug('Parsed LDP resource types from LINK header: %s' % ldp_resource_types) # with LDP types in hand, select appropriate resource type # NonRDF Source if 'NonRDFSource' in ldp_resource_types: return NonRDFSource # Basic Container elif 'BasicContainer' in ldp_resource_types: return BasicContainer # Direct Container elif 'DirectContainer' in ldp_resource_types: return DirectContainer # Indirect Container elif 'IndirectContainer' in ldp_resource_types: return IndirectContainer else: logger.debug('could not determine resource type from Link header, returning False') return False
882,688
small function to parse RDF payloads from various repository endpoints Args: data (response.data): data from requests response headers (response.headers): headers from requests response Returns: (rdflib.Graph): parsed graph
def parse_rdf_payload(self, data, headers): # handle edge case for content-types not recognized by rdflib parser if headers['Content-Type'].startswith('text/plain'): logger.debug('text/plain Content-Type detected, using application/n-triples for parser') parse_format = 'application/n-triples' else: parse_format = headers['Content-Type'] # clean parse format for rdf parser (see: https://www.w3.org/2008/01/rdf-media-types) if ';charset' in parse_format: parse_format = parse_format.split(';')[0] # parse graph graph = rdflib.Graph().parse( data=data.decode('utf-8'), format=parse_format) # return graph return graph
882,689
Small method to loop through three graphs in self.diffs, identify unique namespace URIs. Then, loop through provided dictionary of prefixes and pin one to another. Args: None: uses self.prefixes and self.diffs Returns: None: sets self.update_namespaces and self.update_prefixes
def _derive_namespaces(self): # iterate through graphs and get unique namespace uris for graph in [self.diffs.overlap, self.diffs.removed, self.diffs.added]: for s,p,o in graph: try: ns_prefix, ns_uri, predicate = graph.compute_qname(p) # predicates self.update_namespaces.add(ns_uri) except: logger.debug('could not parse Object URI: %s' % ns_uri) try: ns_prefix, ns_uri, predicate = graph.compute_qname(o) # objects self.update_namespaces.add(ns_uri) except: logger.debug('could not parse Object URI: %s' % ns_uri) logger.debug(self.update_namespaces) # build unique prefixes dictionary # NOTE: can improve by using self.rdf.uris (reverse lookup of self.rdf.prefixes) for ns_uri in self.update_namespaces: for k in self.prefixes.__dict__: if str(ns_uri) == str(self.prefixes.__dict__[k]): logger.debug('adding prefix %s for uri %s to unique_prefixes' % (k,str(ns_uri))) self.update_prefixes[k] = self.prefixes.__dict__[k]
882,691
Using the three graphs derived from self._diff_graph(), build a sparql update query in the format: PREFIX foo: <http://foo.com> PREFIX bar: <http://bar.com> DELETE {...} INSERT {...} WHERE {...} Args: None: uses variables from self Returns: (str) sparql update query as string
def build_query(self): # derive namespaces to include prefixes in Sparql update query self._derive_namespaces() sparql_query = '' # add prefixes for ns_prefix, ns_uri in self.update_prefixes.items(): sparql_query += "PREFIX %s: <%s>\n" % (ns_prefix, str(ns_uri)) # deletes removed_serialized = self.diffs.removed.serialize(format='nt').decode('utf-8') sparql_query += '\nDELETE {\n%s}\n\n' % removed_serialized # inserts added_serialized = self.diffs.added.serialize(format='nt').decode('utf-8') sparql_query += '\nINSERT {\n%s}\n\n' % added_serialized # where (not yet implemented) sparql_query += 'WHERE {}' # debug # logger.debug(sparql_query) # return query return sparql_query
882,692
Handles response from self.create() Args: response (requests.models.Response): response object from self.create() ignore_tombstone (bool): If True, will attempt creation, if tombstone exists (409), will delete tombstone and retry
def _handle_create(self, response, ignore_tombstone, auto_refresh): # 201, success, refresh if response.status_code == 201: # if not specifying uri, capture from response and append to object self.uri = self.repo.parse_uri(response.text) # creation successful if auto_refresh: self.refresh() elif auto_refresh == None: if self.repo.default_auto_refresh: self.refresh() # fire resource._post_create hook if exists if hasattr(self,'_post_create'): self._post_create(auto_refresh=auto_refresh) # 404, assumed POST, target location does not exist elif response.status_code == 404: raise Exception('HTTP 404, for this POST request target location does not exist') # 409, conflict, resource likely exists elif response.status_code == 409: raise Exception('HTTP 409, resource already exists') # 410, tombstone present elif response.status_code == 410: if ignore_tombstone: response = self.repo.api.http_request('DELETE', '%s/fcr:tombstone' % self.uri) if response.status_code == 204: logger.debug('tombstone removed, retrying create') self.create() else: raise Exception('HTTP %s, Could not remove tombstone for %s' % (response.status_code, self.uri)) else: raise Exception('tombstone for %s detected, aborting' % self.uri) # 415, unsupported media type elif response.status_code == 415: raise Exception('HTTP 415, unsupported media type') # unknown status code else: raise Exception('HTTP %s, unknown error creating resource' % response.status_code) # if all goes well, return self return self
882,696
Small method to return headers of an OPTIONS request to self.uri Args: None Return: (dict) response headers from OPTIONS request
def options(self): # http request response = self.repo.api.http_request('OPTIONS', self.uri) return response.headers
882,697
Method to copy resource to another location Args: destination (rdflib.term.URIRef, str): URI location to move resource Returns: (Resource) new, moved instance of resource
def copy(self, destination): # set move headers destination_uri = self.repo.parse_uri(destination) # http request response = self.repo.api.http_request('COPY', self.uri, data=None, headers={'Destination':destination_uri.toPython()}) # handle response if response.status_code == 201: return destination_uri else: raise Exception('HTTP %s, could not move resource %s to %s' % (response.status_code, self.uri, destination_uri))
882,699
Method to delete resources. Args: remove_tombstone (bool): If True, will remove tombstone at uri/fcr:tombstone when removing resource. Returns: (bool)
def delete(self, remove_tombstone=True): response = self.repo.api.http_request('DELETE', self.uri) # update exists if response.status_code == 204: # removal successful, updating self self._empty_resource_attributes() if remove_tombstone: self.repo.api.http_request('DELETE', '%s/fcr:tombstone' % self.uri) return True
882,700
Performs GET request and refreshes RDF information for resource. Args: None Returns: None
def refresh(self, refresh_binary=True): updated_self = self.repo.get_resource(self.uri) # if resource type of updated_self != self, raise exception if not isinstance(self, type(updated_self)): raise Exception('Instantiated %s, but repository reports this resource is %s' % (type(updated_self), type(self)) ) if updated_self: # update attributes self.status_code = updated_self.status_code self.rdf.data = updated_self.rdf.data self.headers = updated_self.headers self.exists = updated_self.exists # update graph if RDFSource if type(self) != NonRDFSource: self._parse_graph() # empty versions self.versions = SimpleNamespace() # if NonRDF, set binary attributes if type(updated_self) == NonRDFSource and refresh_binary: self.binary.refresh(updated_self) # fire resource._post_create hook if exists if hasattr(self,'_post_refresh'): self._post_refresh() # cleanup del(updated_self) else: logger.debug('resource %s not found, dumping values') self._empty_resource_attributes()
882,701
Parse incoming rdf as self.rdf.orig_graph, create copy at self.rdf.graph Args: data (): payload from GET request, expected RDF content in various serialization formats Returns: None
def _build_rdf(self, data=None): # recreate rdf data self.rdf = SimpleNamespace() self.rdf.data = data self.rdf.prefixes = SimpleNamespace() self.rdf.uris = SimpleNamespace() # populate prefixes for prefix,uri in self.repo.context.items(): setattr(self.rdf.prefixes, prefix, rdflib.Namespace(uri)) # graph self._parse_graph()
882,702
use Content-Type from headers to determine parsing method Args: None Return: None: sets self.rdf by parsing data from GET request, or setting blank graph of resource does not yet exist
def _parse_graph(self): # if resource exists, parse self.rdf.data if self.exists: self.rdf.graph = self.repo.api.parse_rdf_payload(self.rdf.data, self.headers) # else, create empty graph else: self.rdf.graph = rdflib.Graph() # bind any additional namespaces from repo instance, but do not override self.rdf.namespace_manager = rdflib.namespace.NamespaceManager(self.rdf.graph) for ns_prefix, ns_uri in self.rdf.prefixes.__dict__.items(): self.rdf.namespace_manager.bind(ns_prefix, ns_uri, override=False) # conversely, add namespaces from parsed graph to self.rdf.prefixes for ns_prefix, ns_uri in self.rdf.graph.namespaces(): setattr(self.rdf.prefixes, ns_prefix, rdflib.Namespace(ns_uri)) setattr(self.rdf.uris, rdflib.Namespace(ns_uri), ns_prefix) # pin old graph to resource, create copy graph for modifications self.rdf._orig_graph = copy.deepcopy(self.rdf.graph) # parse triples for object-like access self.parse_object_like_triples()
882,703
method to parse triples from self.rdf.graph for object-like access Args: None Returns: None: sets self.rdf.triples
def parse_object_like_triples(self): # parse triples as object-like attributes in self.rdf.triples self.rdf.triples = SimpleNamespace() # prepare triples for s,p,o in self.rdf.graph: # get ns info ns_prefix, ns_uri, predicate = self.rdf.graph.compute_qname(p) # if prefix as list not yet added, add if not hasattr(self.rdf.triples, ns_prefix): setattr(self.rdf.triples, ns_prefix, SimpleNamespace()) # same for predicate if not hasattr(getattr(self.rdf.triples, ns_prefix), predicate): setattr(getattr(self.rdf.triples, ns_prefix), predicate, []) # append object for this prefix getattr(getattr(self.rdf.triples, ns_prefix), predicate).append(o)
882,704
small method to empty values if resource is removed or absent Args: None Return: None: empties selected resource attributes
def _empty_resource_attributes(self): self.status_code = 404 self.headers = {} self.exists = False # build RDF self.rdf = self._build_rdf() # if NonRDF, empty binary data if type(self) == NonRDFSource: self.binary.empty()
882,707
Method to handle possible values passed for adding, removing, modifying triples. Detects type of input and sets appropriate http://www.w3.org/2001/XMLSchema# datatype Args: object_input (str,int,datetime,): many possible inputs Returns: (rdflib.term.Literal): with appropriate datatype attribute
def _handle_object(self, object_input): # if object is string, convert to rdflib.term.Literal with appropriate datatype if type(object_input) == str: return rdflib.term.Literal(object_input, datatype=rdflib.XSD.string) # integer elif type(object_input) == int: return rdflib.term.Literal(object_input, datatype=rdflib.XSD.int) # float elif type(object_input) == float: return rdflib.term.Literal(object_input, datatype=rdflib.XSD.float) # date elif type(object_input) == datetime.datetime: return rdflib.term.Literal(object_input, datatype=rdflib.XSD.date) else: return object_input
882,708
add triple by providing p,o, assumes s = subject Args: p (rdflib.term.URIRef): predicate o (): object auto_refresh (bool): whether or not to update object-like self.rdf.triples Returns: None: adds triple to self.rdf.graph
def add_triple(self, p, o, auto_refresh=True): self.rdf.graph.add((self.uri, p, self._handle_object(o))) # determine if triples refreshed self._handle_triple_refresh(auto_refresh)
882,709
Assuming the predicate or object matches a single triple, sets the other for that triple. Args: p (rdflib.term.URIRef): predicate o (): object auto_refresh (bool): whether or not to update object-like self.rdf.triples Returns: None: modifies pre-existing triple in self.rdf.graph
def set_triple(self, p, o, auto_refresh=True): self.rdf.graph.set((self.uri, p, self._handle_object(o))) # determine if triples refreshed self._handle_triple_refresh(auto_refresh)
882,710
remove triple by supplying p,o Args: p (rdflib.term.URIRef): predicate o (): object auto_refresh (bool): whether or not to update object-like self.rdf.triples Returns: None: removes triple from self.rdf.graph
def remove_triple(self, p, o, auto_refresh=True): self.rdf.graph.remove((self.uri, p, self._handle_object(o))) # determine if triples refreshed self._handle_triple_refresh(auto_refresh)
882,711
method to return hierarchical children of this resource Args: as_resources (bool): if True, opens each as appropriate resource type instead of return URI only Returns: (list): list of resources
def children(self, as_resources=False): children = [o for s,p,o in self.rdf.graph.triples((None, self.rdf.prefixes.ldp.contains, None))] # if as_resources, issue GET requests for children and return if as_resources: logger.debug('retrieving children as resources') children = [ self.repo.get_resource(child) for child in children ] return children
882,714
method to return hierarchical parents of this resource Args: as_resources (bool): if True, opens each as appropriate resource type instead of return URI only Returns: (list): list of resources
def parents(self, as_resources=False): parents = [o for s,p,o in self.rdf.graph.triples((None, self.rdf.prefixes.fedora.hasParent, None))] # if as_resources, issue GET requests for children and return if as_resources: logger.debug('retrieving parent as resource') parents = [ self.repo.get_resource(parent) for parent in parents ] return parents
882,715
method to return hierarchical siblings of this resource. Args: as_resources (bool): if True, opens each as appropriate resource type instead of return URI only Returns: (list): list of resources
def siblings(self, as_resources=False): siblings = set() # loop through parents and get children for parent in self.parents(as_resources=True): for sibling in parent.children(as_resources=as_resources): siblings.add(sibling) # remove self if as_resources: siblings.remove(self) if not as_resources: siblings.remove(self.uri) return list(siblings)
882,716
method to create a new version of the resource as it currently stands - Note: this will create a version based on the current live instance of the resource, not the local version, which might require self.update() to update. Args: version_label (str): label to be used for version Returns: (ResourceVersion): instance of ResourceVersion, also appended to self.versions
def create_version(self, version_label): # create version version_response = self.repo.api.http_request('POST', '%s/fcr:versions' % self.uri, data=None, headers={'Slug':version_label}) # if 201, assume success if version_response.status_code == 201: logger.debug('version created: %s' % version_response.headers['Location']) # affix version self._affix_version(version_response.headers['Location'], version_label)
882,718
retrieves all versions of an object, and stores them at self.versions Args: None Returns: None: appends instances
def get_versions(self): # get all versions versions_response = self.repo.api.http_request('GET', '%s/fcr:versions' % self.uri) # parse response versions_graph = self.repo.api.parse_rdf_payload(versions_response.content, versions_response.headers) # loop through fedora.hasVersion for version_uri in versions_graph.objects(self.uri, self.rdf.prefixes.fedora.hasVersion): # get label version_label = versions_graph.value(version_uri, self.rdf.prefixes.fedora.hasVersionLabel, None).toPython() # affix version self._affix_version(version_uri, version_label)
882,719
Convenience method to return RDF data for resource, optionally selecting serialization format. Inspired by .dump from Samvera. Args: format (str): expecting serialization formats accepted by rdflib.serialization(format=)
def dump(self,format='ttl'): return self.rdf.graph.serialize(format=format).decode('utf-8')
882,720
method to revert resource to this version by issuing PATCH Args: None Returns: None: sends PATCH request, and refreshes parent resource
def revert_to(self): # send patch response = self.resource.repo.api.http_request('PATCH', self.uri) # if response 204 if response.status_code == 204: logger.debug('reverting to previous version of resource, %s' % self.uri) # refresh current resource handle self._current_resource.refresh() else: raise Exception('HTTP %s, could not revert to resource version, %s' % (response.status_code, self.uri))
882,722
method to refresh binary attributes and data Args: updated_self (Resource): resource this binary data attaches to Returns: None: updates attributes
def refresh(self, updated_self): logger.debug('refreshing binary attributes') self.mimetype = updated_self.binary.mimetype self.data = updated_self.binary.data
882,726
Sets Content-Type header based on headers and/or self.binary.mimetype values Implicitly favors Content-Type header if set Args: None Returns: None: sets attributes in self.binary and headers
def _prep_binary_mimetype(self): # neither present if not self.mimetype and 'Content-Type' not in self.resource.headers.keys(): raise Exception('to create/update NonRDFSource, mimetype or Content-Type header is required') # mimetype, no Content-Type elif self.mimetype and 'Content-Type' not in self.resource.headers.keys(): logger.debug('setting Content-Type header with provided mimetype: %s' % self.mimetype) self.resource.headers['Content-Type'] = self.mimetype
882,728
Sets delivery method of either payload or header Favors Content-Location header if set Args: None Returns: None: sets attributes in self.binary and headers
def _prep_binary_content(self): # nothing present if not self.data and not self.location and 'Content-Location' not in self.resource.headers.keys(): raise Exception('creating/updating NonRDFSource requires content from self.binary.data, self.binary.location, or the Content-Location header') elif 'Content-Location' in self.resource.headers.keys(): logger.debug('Content-Location header found, using') self.delivery = 'header' # if Content-Location is not set, look for self.data_location then self.data elif 'Content-Location' not in self.resource.headers.keys(): # data_location set, trumps Content self.data if self.location: # set appropriate header self.resource.headers['Content-Location'] = self.location self.delivery = 'header' # data attribute is plain text, binary, or file-like object elif self.data: # if file-like object, set flag for api.http_request if isinstance(self.data, io.BufferedIOBase): logger.debug('detected file-like object') self.delivery = 'payload' # else, just bytes else: logger.debug('detected bytes') self.delivery = 'payload'
882,729
method to return a particular byte range from NonRDF resource's binary data https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html Args: byte_start(int): position of range start byte_end(int): position of range end Returns: (requests.Response): streamable response
def range(self, byte_start, byte_end, stream=True): response = self.resource.repo.api.http_request( 'GET', self.resource.uri, data=None, headers={ 'Content-Type':self.mimetype, 'Range':'bytes=%s-%s' % (byte_start, byte_end) }, is_rdf=False, stream=stream) # expects 206 if response.status_code == 206: return response else: raise Exception('HTTP %s, but was expecting 206' % response.status_code)
882,730
Issues fixity check, return parsed graph Args: None Returns: (dict): ('verdict':(bool): verdict of fixity check, 'premis_graph':(rdflib.Graph): parsed PREMIS graph from check)
def fixity(self, response_format=None): # if no response_format, use default if not response_format: response_format = self.repo.default_serialization # issue GET request for fixity check response = self.repo.api.http_request('GET', '%s/fcr:fixity' % self.uri) # parse fixity_graph = self.repo.api.parse_rdf_payload(response.content, response.headers) # determine verdict for outcome in fixity_graph.objects(None, self.rdf.prefixes.premis.hasEventOutcome): if outcome.toPython() == 'SUCCESS': verdict = True else: verdict = False return { 'verdict':verdict, 'premis_graph':fixity_graph }
882,732
Perform a Yelp Phone API Search based on phone number given. Args: phone - Phone number to search by cc - ISO 3166-1 alpha-2 country code. (Optional)
def by_phone(self, phone, cc=None): header, content = self._http_request(self.BASE_URL, phone=phone, cc=cc) return json.loads(content)
882,782
Perform a Yelp Neighborhood API Search based on a geopoint. Args: lat - geopoint latitude long - geopoint longitude
def by_geopoint(self, lat, long): header, content = self._http_request(self.BASE_URL, lat=lat, long=long) return json.loads(content)
882,783