text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Set the value of a client cookie. This can only be called while <END_TASK> <USER_TASK:> Description: def cookie_set(self, name, value): """ Set the value of a client cookie. This can only be called while headers can be sent. :param str name: The name of the cookie value to set. :param str value: The value of the cookie to set. """
if not self.headers_active: raise RuntimeError('headers have already been ended') cookie = "{0}={1}; Path=/; HttpOnly".format(name, value) self.send_header('Set-Cookie', cookie)
<SYSTEM_TASK:> Inspect the Content-Type header to retrieve the charset that the client <END_TASK> <USER_TASK:> Description: def get_content_type_charset(self, default='UTF-8'): """ Inspect the Content-Type header to retrieve the charset that the client has specified. :param str default: The default charset to return if none exists. :return: The charset of the request. :rtype: str """
encoding = default header = self.headers.get('Content-Type', '') idx = header.find('charset=') if idx > 0: encoding = (header[idx + 8:].split(' ', 1)[0] or encoding) return encoding
<SYSTEM_TASK:> Close the web socket connection and stop processing results. If the <END_TASK> <USER_TASK:> Description: def close(self): """ Close the web socket connection and stop processing results. If the connection is still open, a WebSocket close message will be sent to the peer. """
if not self.connected: return self.connected = False if self.handler.wfile.closed: return if select.select([], [self.handler.wfile], [], 0)[1]: with self.lock: self.handler.wfile.write(b'\x88\x00') self.handler.wfile.flush() self.on_closed()
<SYSTEM_TASK:> Send a message to the peer over the socket. <END_TASK> <USER_TASK:> Description: def send_message(self, opcode, message): """ Send a message to the peer over the socket. :param int opcode: The opcode for the message to send. :param bytes message: The message data to send. """
if not isinstance(message, bytes): message = message.encode('utf-8') length = len(message) if not select.select([], [self.handler.wfile], [], 0)[1]: self.logger.error('the socket is not ready for writing') self.close() return buffer = b'' buffer += struct.pack('B', 0x80 + opcode) if length <= 125: buffer += struct.pack('B', length) elif 126 <= length <= 65535: buffer += struct.pack('>BH', 126, length) else: buffer += struct.pack('>BQ', 127, length) buffer += message self._last_sent_opcode = opcode self.lock.acquire() try: self.handler.wfile.write(buffer) self.handler.wfile.flush() except Exception: self.logger.error('an error occurred while sending a message', exc_info=True) self.close() finally: self.lock.release()
<SYSTEM_TASK:> The primary dispatch function to handle incoming WebSocket messages. <END_TASK> <USER_TASK:> Description: def on_message(self, opcode, message): """ The primary dispatch function to handle incoming WebSocket messages. :param int opcode: The opcode of the message that was received. :param bytes message: The data contained within the message. """
self.logger.debug("processing {0} (opcode: 0x{1:02x}) message".format(self._opcode_names.get(opcode, 'UNKNOWN'), opcode)) if opcode == self._opcode_close: self.close() elif opcode == self._opcode_ping: if len(message) > 125: self.close() return self.send_message(self._opcode_pong, message) elif opcode == self._opcode_pong: pass elif opcode == self._opcode_binary: self.on_message_binary(message) elif opcode == self._opcode_text: try: message = self._decode_string(message) except UnicodeDecodeError: self.logger.warning('closing connection due to invalid unicode within a text message') self.close() else: self.on_message_text(message) elif opcode == self._opcode_continue: self.close() else: self.logger.warning("received unknown opcode: {0} (0x{0:02x})".format(opcode)) self.close()
<SYSTEM_TASK:> Build a serializer object from a MIME Content-Type string. <END_TASK> <USER_TASK:> Description: def from_content_type(cls, content_type): """ Build a serializer object from a MIME Content-Type string. :param str content_type: The Content-Type string to parse. :return: A new serializer instance. :rtype: :py:class:`.Serializer` """
name = content_type options = {} if ';' in content_type: name, options_str = content_type.split(';', 1) for part in options_str.split(';'): part = part.strip() if '=' in part: key, value = part.split('=') else: key, value = (part, None) options[key] = value # old style compatibility if name.endswith('+zlib'): options['compression'] = 'zlib' name = name[:-5] return cls(name, charset=options.get('charset', 'UTF-8'), compression=options.get('compression'))
<SYSTEM_TASK:> Serialize a python data type for transmission or storage. <END_TASK> <USER_TASK:> Description: def dumps(self, data): """ Serialize a python data type for transmission or storage. :param data: The python object to serialize. :return: The serialized representation of the object. :rtype: bytes """
data = g_serializer_drivers[self.name]['dumps'](data) if sys.version_info[0] == 3 and isinstance(data, str): data = data.encode(self._charset) if self._compression == 'zlib': data = zlib.compress(data) assert isinstance(data, bytes) return data
<SYSTEM_TASK:> Deserialize the data into it's original python object. <END_TASK> <USER_TASK:> Description: def loads(self, data): """ Deserialize the data into it's original python object. :param bytes data: The serialized object to load. :return: The original python object. """
if not isinstance(data, bytes): raise TypeError("loads() argument 1 must be bytes, not {0}".format(type(data).__name__)) if self._compression == 'zlib': data = zlib.decompress(data) if sys.version_info[0] == 3 and self.name.startswith('application/'): data = data.decode(self._charset) data = g_serializer_drivers[self.name]['loads'](data, (self._charset if sys.version_info[0] == 3 else None)) if isinstance(data, list): data = tuple(data) return data
<SYSTEM_TASK:> Shutdown the server and stop responding to requests. <END_TASK> <USER_TASK:> Description: def shutdown(self): """Shutdown the server and stop responding to requests."""
self.__should_stop.set() if self.__server_thread == threading.current_thread(): self.__is_shutdown.set() self.__is_running.clear() else: if self.__wakeup_fd is not None: os.write(self.__wakeup_fd.write_fd, b'\x00') self.__is_shutdown.wait() if self.__wakeup_fd is not None: self.__wakeup_fd.close() self.__wakeup_fd = None for server in self.sub_servers: server.shutdown()
<SYSTEM_TASK:> Enable or disable requiring authentication on all incoming requests. <END_TASK> <USER_TASK:> Description: def auth_set(self, status): """ Enable or disable requiring authentication on all incoming requests. :param bool status: Whether to enable or disable requiring authentication. """
if not bool(status): self.__config['basic_auth'] = None self.logger.info('basic authentication has been disabled') else: self.__config['basic_auth'] = {} self.logger.info('basic authentication has been enabled')
<SYSTEM_TASK:> Delete the credentials for a specific username if specified or all <END_TASK> <USER_TASK:> Description: def auth_delete_creds(self, username=None): """ Delete the credentials for a specific username if specified or all stored credentials. :param str username: The username of the credentials to delete. """
if not username: self.__config['basic_auth'] = {} self.logger.info('basic authentication database has been cleared of all entries') return del self.__config['basic_auth'][username]
<SYSTEM_TASK:> Context manager to temporarily change the values of object attributes <END_TASK> <USER_TASK:> Description: def setattr_context(obj, **kwargs): """ Context manager to temporarily change the values of object attributes while executing a function. Example ------- >>> class Foo: pass >>> f = Foo(); f.attr = 'hello' >>> with setattr_context(f, attr='goodbye'): ... print(f.attr) goodbye >>> print(f.attr) hello """
old_kwargs = dict([(key, getattr(obj, key)) for key in kwargs]) [setattr(obj, key, val) for key, val in kwargs.items()] try: yield finally: [setattr(obj, key, val) for key, val in old_kwargs.items()]
<SYSTEM_TASK:> Validate input arrays <END_TASK> <USER_TASK:> Description: def validate_inputs(*arrays, **kwargs): """Validate input arrays This checks that - Arrays are mutually broadcastable - Broadcasted arrays are one-dimensional Optionally, arrays are sorted according to the ``sort_by`` argument. Parameters ---------- *args : ndarrays All non-keyword arguments are arrays which will be validated sort_by : array If specified, sort all inputs by the order given in this array. """
arrays = np.broadcast_arrays(*arrays) sort_by = kwargs.pop('sort_by', None) if kwargs: raise ValueError("unrecognized arguments: {0}".format(kwargs.keys())) if arrays[0].ndim != 1: raise ValueError("Input arrays should be one-dimensional.") if sort_by is not None: isort = np.argsort(sort_by) if isort.shape != arrays[0].shape: raise ValueError("sort shape must equal array shape.") arrays = tuple([a[isort] for a in arrays]) return arrays
<SYSTEM_TASK:> Private function to prepare & check variables for smooth utilities <END_TASK> <USER_TASK:> Description: def _prep_smooth(t, y, dy, span, t_out, span_out, period): """Private function to prepare & check variables for smooth utilities"""
# If period is provided, sort by phases. Otherwise sort by t if period: t = t % period if t_out is not None: t_out = t_out % period t, y, dy = validate_inputs(t, y, dy, sort_by=t) if span_out is not None: if t_out is None: raise ValueError("Must specify t_out when span_out is given") if span is not None: raise ValueError("Must specify only one of span, span_out") span, t_out = np.broadcast_arrays(span_out, t_out) indices = np.searchsorted(t, t_out) elif span is None: raise ValueError("Must specify either span_out or span") else: indices = None return t, y, dy, span, t_out, span_out, indices
<SYSTEM_TASK:> Perform a moving-average smooth of the data <END_TASK> <USER_TASK:> Description: def moving_average_smooth(t, y, dy, span=None, cv=True, t_out=None, span_out=None, period=None): """Perform a moving-average smooth of the data Parameters ---------- t, y, dy : array_like time, value, and error in value of the input data span : array_like the integer spans of the data cv : boolean (default=True) if True, treat the problem as a cross-validation, i.e. don't use each point in the evaluation of its own smoothing. t_out : array_like (optional) the output times for the moving averages span_out : array_like (optional) the spans associated with the output times t_out period : float if provided, then consider the inputs periodic with the given period Returns ------- y_smooth : array_like smoothed y values at each time t (or t_out) """
prep = _prep_smooth(t, y, dy, span, t_out, span_out, period) t, y, dy, span, t_out, span_out, indices = prep w = 1. / (dy ** 2) w, yw = windowed_sum([w, y * w], t=t, span=span, subtract_mid=cv, indices=indices, period=period) if t_out is None or span_out is not None: return yw / w else: i = np.minimum(len(t) - 1, np.searchsorted(t, t_out)) return yw[i] / w[i]
<SYSTEM_TASK:> Perform a linear smooth of the data <END_TASK> <USER_TASK:> Description: def linear_smooth(t, y, dy, span=None, cv=True, t_out=None, span_out=None, period=None): """Perform a linear smooth of the data Parameters ---------- t, y, dy : array_like time, value, and error in value of the input data span : array_like the integer spans of the data cv : boolean (default=True) if True, treat the problem as a cross-validation, i.e. don't use each point in the evaluation of its own smoothing. t_out : array_like (optional) the output times for the moving averages span_out : array_like (optional) the spans associated with the output times t_out period : float if provided, then consider the inputs periodic with the given period Returns ------- y_smooth : array_like smoothed y values at each time t or t_out """
t_input = t prep = _prep_smooth(t, y, dy, span, t_out, span_out, period) t, y, dy, span, t_out, span_out, indices = prep if period: t_input = np.asarray(t_input) % period w = 1. / (dy ** 2) w, yw, tw, tyw, ttw = windowed_sum([w, y * w, w, y * w, w], t=t, tpowers=[0, 0, 1, 1, 2], span=span, indices=indices, subtract_mid=cv, period=period) denominator = (w * ttw - tw * tw) slope = (tyw * w - tw * yw) intercept = (ttw * yw - tyw * tw) if np.any(denominator == 0): raise ValueError("Zero denominator in linear smooth. This usually " "indicates that the input contains duplicate points.") if t_out is None: return (slope * t_input + intercept) / denominator elif span_out is not None: return (slope * t_out + intercept) / denominator else: i = np.minimum(len(t) - 1, np.searchsorted(t, t_out)) return (slope[i] * t_out + intercept[i]) / denominator[i]
<SYSTEM_TASK:> Multiple linear interpolations <END_TASK> <USER_TASK:> Description: def multinterp(x, y, xquery, slow=False): """Multiple linear interpolations Parameters ---------- x : array_like, shape=(N,) sorted array of x values y : array_like, shape=(N, M) array of y values corresponding to each x value xquery : array_like, shape=(M,) array of query values slow : boolean, default=False if True, use slow method (used mainly for unit testing) Returns ------- yquery : ndarray, shape=(M,) The interpolated values corresponding to each x query. """
x, y, xquery = map(np.asarray, (x, y, xquery)) assert x.ndim == 1 assert xquery.ndim == 1 assert y.shape == x.shape + xquery.shape # make sure xmin < xquery < xmax in all cases xquery = np.clip(xquery, x.min(), x.max()) if slow: from scipy.interpolate import interp1d return np.array([interp1d(x, y)(xq) for xq, y in zip(xquery, y.T)]) elif len(x) == 3: # Most common case: use a faster approach yq_lower = y[0] + (xquery - x[0]) * (y[1] - y[0]) / (x[1] - x[0]) yq_upper = y[1] + (xquery - x[1]) * (y[2] - y[1]) / (x[2] - x[1]) return np.where(xquery < x[1], yq_lower, yq_upper) else: i = np.clip(np.searchsorted(x, xquery, side='right') - 1, 0, len(x) - 2) j = np.arange(len(xquery)) return y[i, j] + ((xquery - x[i]) * (y[i + 1, j] - y[i, j]) / (x[i + 1] - x[i]))
<SYSTEM_TASK:> Create a consulate.session object, and query for its leader to ensure <END_TASK> <USER_TASK:> Description: def _create_session(self, test_connection=False): """ Create a consulate.session object, and query for its leader to ensure that the connection is made. :param test_connection: call .leader() to ensure that the connection is valid :type test_connection: bool :return consulate.Session instance """
session = consulate.Session(host=self.host, port=self.port) if test_connection: session.status.leader() return session
<SYSTEM_TASK:> Applies all config values defined in consul's kv store to self.app. <END_TASK> <USER_TASK:> Description: def apply_remote_config(self, namespace=None): """ Applies all config values defined in consul's kv store to self.app. There is no guarantee that these values will not be overwritten later elsewhere. :param namespace: kv namespace/directory. Defaults to DEFAULT_KV_NAMESPACE :return: None """
if namespace is None: namespace = "config/{service}/{environment}/".format( service=os.environ.get('SERVICE', 'generic_service'), environment=os.environ.get('ENVIRONMENT', 'generic_environment') ) for k, v in iteritems(self.session.kv.find(namespace)): k = k.replace(namespace, '') try: self.app.config[k] = json.loads(v) except (TypeError, ValueError): self.app.logger.warning("Couldn't de-serialize {} to json, using raw value".format(v)) self.app.config[k] = v msg = "Set {k}={v} from consul kv '{ns}'".format( k=k, v=v, ns=namespace, ) self.app.logger.debug(msg)
<SYSTEM_TASK:> register this service with consul <END_TASK> <USER_TASK:> Description: def register_service(self, **kwargs): """ register this service with consul kwargs passed to Consul.agent.service.register """
kwargs.setdefault('name', self.app.name) self.session.agent.service.register(**kwargs)
<SYSTEM_TASK:> Query the consul DNS server for the service IP and port <END_TASK> <USER_TASK:> Description: def _resolve(self): """ Query the consul DNS server for the service IP and port """
endpoints = {} r = self.resolver.query(self.service, 'SRV') for rec in r.response.additional: name = rec.name.to_text() addr = rec.items[0].address endpoints[name] = {'addr': addr} for rec in r.response.answer[0].items: name = '.'.join(rec.target.labels) endpoints[name]['port'] = rec.port return [ 'http://{ip}:{port}'.format( ip=v['addr'], port=v['port'] ) for v in endpoints.values() ]
<SYSTEM_TASK:> Internal routine to pad arrays for periodic models. <END_TASK> <USER_TASK:> Description: def _pad_arrays(t, arrays, indices, span, period): """Internal routine to pad arrays for periodic models."""
N = len(t) if indices is None: indices = np.arange(N) pad_left = max(0, 0 - np.min(indices - span // 2)) pad_right = max(0, np.max(indices + span - span // 2) - (N - 1)) if pad_left + pad_right > 0: Nright, pad_right = divmod(pad_right, N) Nleft, pad_left = divmod(pad_left, N) t = np.concatenate([t[N - pad_left:] - (Nleft + 1) * period] + [t + i * period for i in range(-Nleft, Nright + 1)] + [t[:pad_right] + (Nright + 1) * period]) arrays = [np.concatenate([a[N - pad_left:]] + (Nleft + Nright + 1) * [a] + [a[:pad_right]]) for a in arrays] pad_left = pad_left % N Nright = pad_right / N pad_right = pad_right % N return (t, arrays, slice(pad_left + Nleft * N, pad_left + (Nleft + 1) * N)) else: return (t, arrays, slice(None))
<SYSTEM_TASK:> Search all the available I2C devices in the system <END_TASK> <USER_TASK:> Description: def get_i2c_bus_numbers(glober = glob.glob): """Search all the available I2C devices in the system"""
res = [] for device in glober("/dev/i2c-*"): r = re.match("/dev/i2c-([\d]){1,2}", device) res.append(int(r.group(1))) return res
<SYSTEM_TASK:> Parse the name for led number <END_TASK> <USER_TASK:> Description: def get_led_register_from_name(self, name): """Parse the name for led number :param name: attribute name, like: led_1 """
res = re.match('^led_([0-9]{1,2})$', name) if res is None: raise AttributeError("Unknown attribute: '%s'" % name) led_num = int(res.group(1)) if led_num < 0 or led_num > 15: raise AttributeError("Unknown attribute: '%s'" % name) return self.calc_led_register(led_num)
<SYSTEM_TASK:> Set PWM value for the specified LED <END_TASK> <USER_TASK:> Description: def set_pwm(self, led_num, value): """Set PWM value for the specified LED :param led_num: LED number (0-15) :param value: the 12 bit value (0-4095) """
self.__check_range('led_number', led_num) self.__check_range('led_value', value) register_low = self.calc_led_register(led_num) self.write(register_low, value_low(value)) self.write(register_low + 1, value_high(value))
<SYSTEM_TASK:> Send the controller to sleep <END_TASK> <USER_TASK:> Description: def sleep(self): """Send the controller to sleep"""
logger.debug("Sleep the controller") self.write(Registers.MODE_1, self.mode_1 | (1 << Mode1.SLEEP))
<SYSTEM_TASK:> Write raw byte value to the specified register <END_TASK> <USER_TASK:> Description: def write(self, reg, value): """Write raw byte value to the specified register :param reg: the register number (0-69, 250-255) :param value: byte value """
# TODO: check reg: 0-69, 250-255 self.__check_range('register_value', value) logger.debug("Write '%s' to register '%s'" % (value, reg)) self.__bus.write_byte_data(self.__address, reg, value)
<SYSTEM_TASK:> Set the frequency for all PWM output <END_TASK> <USER_TASK:> Description: def set_pwm_frequency(self, value): """Set the frequency for all PWM output :param value: the frequency in Hz """
self.__check_range('pwm_frequency', value) reg_val = self.calc_pre_scale(value) logger.debug("Calculated prescale value is %s" % reg_val) self.sleep() self.write(Registers.PRE_SCALE, reg_val) self.wake()
<SYSTEM_TASK:> Check if the color provided by the user is valid. <END_TASK> <USER_TASK:> Description: def check_valid_color(color): """Check if the color provided by the user is valid. If color is invalid the default is returned. """
if color in list(mcolors.CSS4_COLORS.keys()) + ["#4CB391"]: logging.info("Nanoplotter: Valid color {}.".format(color)) return color else: logging.info("Nanoplotter: Invalid color {}, using default.".format(color)) sys.stderr.write("Invalid color {}, using default.\n".format(color)) return "#4CB391"
<SYSTEM_TASK:> Check if the specified figure format is valid. <END_TASK> <USER_TASK:> Description: def check_valid_format(figformat): """Check if the specified figure format is valid. If format is invalid the default is returned. Probably installation-dependent """
fig = plt.figure() if figformat in list(fig.canvas.get_supported_filetypes().keys()): logging.info("Nanoplotter: valid output format {}".format(figformat)) return figformat else: logging.info("Nanoplotter: invalid output format {}".format(figformat)) sys.stderr.write("Invalid format {}, using default.\n".format(figformat)) return "png"
<SYSTEM_TASK:> Taking channel information and creating post run channel activity plots. <END_TASK> <USER_TASK:> Description: def spatial_heatmap(array, path, title=None, color="Greens", figformat="png"): """Taking channel information and creating post run channel activity plots."""
logging.info("Nanoplotter: Creating heatmap of reads per channel using {} reads." .format(array.size)) activity_map = Plot( path=path + "." + figformat, title="Number of reads generated per channel") layout = make_layout(maxval=np.amax(array)) valueCounts = pd.value_counts(pd.Series(array)) for entry in valueCounts.keys(): layout.template[np.where(layout.structure == entry)] = valueCounts[entry] plt.figure() ax = sns.heatmap( data=pd.DataFrame(layout.template, index=layout.yticks, columns=layout.xticks), xticklabels="auto", yticklabels="auto", square=True, cbar_kws={"orientation": "horizontal"}, cmap=color, linewidths=0.20) ax.set_title(title or activity_map.title) activity_map.fig = ax.get_figure() activity_map.save(format=figformat) plt.close("all") return [activity_map]
<SYSTEM_TASK:> Check if the data contains reads created within the same `days` timeframe. <END_TASK> <USER_TASK:> Description: def check_valid_time_and_sort(df, timescol, days=5, warning=True): """Check if the data contains reads created within the same `days` timeframe. if not, print warning and only return part of the data which is within `days` days Resetting the index twice to get also an "index" column for plotting the cum_yield_reads plot """
timediff = (df[timescol].max() - df[timescol].min()).days if timediff < days: return df.sort_values(timescol).reset_index(drop=True).reset_index() else: if warning: sys.stderr.write( "\nWarning: data generated is from more than {} days.\n".format(str(days))) sys.stderr.write("Likely this indicates you are combining multiple runs.\n") sys.stderr.write( "Plots based on time are invalid and therefore truncated to first {} days.\n\n" .format(str(days))) logging.warning("Time plots truncated to first {} days: invalid timespan: {} days" .format(str(days), str(timediff))) return df[df[timescol] < timedelta(days=days)] \ .sort_values(timescol) \ .reset_index(drop=True) \ .reset_index()
<SYSTEM_TASK:> Making plots of time vs read length, time vs quality and cumulative yield. <END_TASK> <USER_TASK:> Description: def time_plots(df, path, title=None, color="#4CB391", figformat="png", log_length=False, plot_settings=None): """Making plots of time vs read length, time vs quality and cumulative yield."""
dfs = check_valid_time_and_sort(df, "start_time") logging.info("Nanoplotter: Creating timeplots using {} reads.".format(len(dfs))) cumyields = cumulative_yield(dfs=dfs.set_index("start_time"), path=path, figformat=figformat, title=title, color=color) reads_pores_over_time = plot_over_time(dfs=dfs.set_index("start_time"), path=path, figformat=figformat, title=title, color=color) violins = violin_plots_over_time(dfs=dfs, path=path, figformat=figformat, title=title, log_length=log_length, plot_settings=plot_settings) return cumyields + reads_pores_over_time + violins
<SYSTEM_TASK:> Create a violin or boxplot from the received DataFrame. <END_TASK> <USER_TASK:> Description: def violin_or_box_plot(df, y, figformat, path, y_name, title=None, plot="violin", log=False, palette=None): """Create a violin or boxplot from the received DataFrame. The x-axis should be divided based on the 'dataset' column, the y-axis is specified in the arguments """
comp = Plot(path=path + "NanoComp_" + y.replace(' ', '_') + '.' + figformat, title="Comparing {}".format(y)) if y == "quals": comp.title = "Comparing base call quality scores" if plot == 'violin': logging.info("Nanoplotter: Creating violin plot for {}.".format(y)) process_violin_and_box(ax=sns.violinplot(x="dataset", y=y, data=df, inner=None, cut=0, palette=palette, linewidth=0), log=log, plot_obj=comp, title=title, y_name=y_name, figformat=figformat, ymax=np.amax(df[y])) elif plot == 'box': logging.info("Nanoplotter: Creating box plot for {}.".format(y)) process_violin_and_box(ax=sns.boxplot(x="dataset", y=y, data=df, palette=palette), log=log, plot_obj=comp, title=title, y_name=y_name, figformat=figformat, ymax=np.amax(df[y])) elif plot == 'ridge': logging.info("Nanoplotter: Creating ridges plot for {}.".format(y)) comp.fig, axes = joypy.joyplot(df, by="dataset", column=y, title=title or comp.title, x_range=[-0.05, np.amax(df[y])]) if log: xticks = [float(i.get_text()) for i in axes[-1].get_xticklabels()] axes[-1].set_xticklabels([10**i for i in xticks]) axes[-1].set_xticklabels(axes[-1].get_xticklabels(), rotation=30, ha='center') comp.save(format=figformat) else: logging.error("Unknown comp plot type {}".format(plot)) sys.exit("Unknown comp plot type {}".format(plot)) plt.close("all") return [comp]
<SYSTEM_TASK:> Create barplots based on number of reads and total sum of nucleotides sequenced. <END_TASK> <USER_TASK:> Description: def output_barplot(df, figformat, path, title=None, palette=None): """Create barplots based on number of reads and total sum of nucleotides sequenced."""
logging.info("Nanoplotter: Creating barplots for number of reads and total throughput.") read_count = Plot(path=path + "NanoComp_number_of_reads." + figformat, title="Comparing number of reads") ax = sns.countplot(x="dataset", data=df, palette=palette) ax.set(ylabel='Number of reads', title=title or read_count.title) plt.xticks(rotation=30, ha='center') read_count.fig = ax.get_figure() read_count.save(format=figformat) plt.close("all") throughput_bases = Plot(path=path + "NanoComp_total_throughput." + figformat, title="Comparing throughput in gigabases") if "aligned_lengths" in df: throughput = df.groupby('dataset')['aligned_lengths'].sum() ylabel = 'Total gigabase aligned' else: throughput = df.groupby('dataset')['lengths'].sum() ylabel = 'Total gigabase sequenced' ax = sns.barplot(x=list(throughput.index), y=throughput / 1e9, palette=palette, order=df["dataset"].unique()) ax.set(ylabel=ylabel, title=title or throughput_bases.title) plt.xticks(rotation=30, ha='center') throughput_bases.fig = ax.get_figure() throughput_bases.save(format=figformat) plt.close("all") return read_count, throughput_bases
<SYSTEM_TASK:> Use plotly to create an overlay of length histograms <END_TASK> <USER_TASK:> Description: def overlay_histogram(df, path, palette=None): """ Use plotly to create an overlay of length histograms Return html code, but also save as png Only has 10 colors, which get recycled up to 5 times. """
if palette is None: palette = plotly.colors.DEFAULT_PLOTLY_COLORS * 5 hist = Plot(path=path + "NanoComp_OverlayHistogram.html", title="Histogram of read lengths") hist.html, hist.fig = plot_overlay_histogram(df, palette, title=hist.title) hist.save() hist_norm = Plot(path=path + "NanoComp_OverlayHistogram_Normalized.html", title="Normalized histogram of read lengths") hist_norm.html, hist_norm.fig = plot_overlay_histogram( df, palette, title=hist_norm.title, histnorm="probability") hist_norm.save() log_hist = Plot(path=path + "NanoComp_OverlayLogHistogram.html", title="Histogram of log transformed read lengths") log_hist.html, log_hist.fig = plot_log_histogram(df, palette, title=log_hist.title) log_hist.save() log_hist_norm = Plot(path=path + "NanoComp_OverlayLogHistogram_Normalized.html", title="Normalized histogram of log transformed read lengths") log_hist_norm.html, log_hist_norm.fig = plot_log_histogram( df, palette, title=log_hist_norm.title, histnorm="probability") log_hist_norm.save() return [hist, hist_norm, log_hist, log_hist_norm]
<SYSTEM_TASK:> Plot overlaying histograms with log transformation of length <END_TASK> <USER_TASK:> Description: def plot_log_histogram(df, palette, title, histnorm=""): """ Plot overlaying histograms with log transformation of length Return both html and fig for png """
data = [go.Histogram(x=np.log10(df.loc[df["dataset"] == d, "lengths"]), opacity=0.4, name=d, histnorm=histnorm, marker=dict(color=c)) for d, c in zip(df["dataset"].unique(), palette)] xtickvals = [10**i for i in range(10) if not 10**i > 10 * np.amax(df["lengths"])] html = plotly.offline.plot( {"data": data, "layout": go.Layout(barmode='overlay', title=title, xaxis=dict(tickvals=np.log10(xtickvals), ticktext=xtickvals))}, output_type="div", show_link=False) fig = go.Figure( {"data": data, "layout": go.Layout(barmode='overlay', title=title, xaxis=dict(tickvals=np.log10(xtickvals), ticktext=xtickvals))}) return html, fig
<SYSTEM_TASK:> Glob for the poor. <END_TASK> <USER_TASK:> Description: def get_file(db_folder, file_name): """Glob for the poor."""
if not os.path.isdir(db_folder): return file_name = file_name.lower().strip() for cand_name in os.listdir(db_folder): if cand_name.lower().strip() == file_name: return os.path.join(db_folder, cand_name)
<SYSTEM_TASK:> Parse a cronos database. <END_TASK> <USER_TASK:> Description: def parse(db_folder, out_folder): """ Parse a cronos database. Convert the database located in ``db_folder`` into CSV files in the directory ``out_folder``. """
# The database structure, containing table and column definitions as # well as other data. stru_dat = get_file(db_folder, 'CroStru.dat') # Index file for the database, which contains offsets for each record. data_tad = get_file(db_folder, 'CroBank.tad') # Actual data records, can only be decoded using CroBank.tad. data_dat = get_file(db_folder, 'CroBank.dat') if None in [stru_dat, data_tad, data_dat]: raise CronosException("Not all database files are present.") meta, tables = parse_structure(stru_dat) for table in tables: # TODO: do we want to export the "FL" table? if table['abbr'] == 'FL' and table['name'] == 'Files': continue fh = open(make_csv_file_name(meta, table, out_folder), 'w') columns = table.get('columns') writer = csv.writer(fh) writer.writerow([encode_cell(c['name']) for c in columns]) for row in parse_data(data_tad, data_dat, table.get('id'), columns): writer.writerow([encode_cell(c) for c in row]) fh.close()
<SYSTEM_TASK:> Return the base64 encoding of the figure file and insert in html image tag. <END_TASK> <USER_TASK:> Description: def encode1(self): """Return the base64 encoding of the figure file and insert in html image tag."""
data_uri = b64encode(open(self.path, 'rb').read()).decode('utf-8').replace('\n', '') return '<img src="data:image/png;base64,{0}">'.format(data_uri)
<SYSTEM_TASK:> Return the base64 encoding of the fig attribute and insert in html image tag. <END_TASK> <USER_TASK:> Description: def encode2(self): """Return the base64 encoding of the fig attribute and insert in html image tag."""
buf = BytesIO() self.fig.savefig(buf, format='png', bbox_inches='tight', dpi=100) buf.seek(0) string = b64encode(buf.read()) return '<img src="data:image/png;base64,{0}">'.format(urlquote(string))
<SYSTEM_TASK:> Read the spectra from the files generated by Quanty and store them <END_TASK> <USER_TASK:> Description: def loadFromDisk(self, calculation): """ Read the spectra from the files generated by Quanty and store them as a list of spectum objects. """
suffixes = { 'Isotropic': 'iso', 'Circular Dichroism (R-L)': 'cd', 'Right Polarized (R)': 'r', 'Left Polarized (L)': 'l', 'Linear Dichroism (V-H)': 'ld', 'Vertical Polarized (V)': 'v', 'Horizontal Polarized (H)': 'h', } self.raw = list() for spectrumName in self.toPlot: suffix = suffixes[spectrumName] path = '{}_{}.spec'.format(calculation.baseName, suffix) try: data = np.loadtxt(path, skiprows=5) except (OSError, IOError) as e: raise e rows, columns = data.shape if calculation.experiment in ['XAS', 'XPS', 'XES']: xMin = calculation.xMin xMax = calculation.xMax xNPoints = calculation.xNPoints if calculation.experiment == 'XES': x = np.linspace(xMin, xMax, xNPoints + 1) x = x[::-1] y = data[:, 2] y = y / np.abs(y.max()) else: x = np.linspace(xMin, xMax, xNPoints + 1) y = data[:, 2::2].flatten() spectrum = Spectrum1D(x, y) spectrum.name = spectrumName if len(suffix) > 2: spectrum.shortName = suffix.title() else: spectrum.shortName = suffix.upper() if calculation.experiment in ['XAS', ]: spectrum.xLabel = 'Absorption Energy (eV)' elif calculation.experiment in ['XPS', ]: spectrum.xLabel = 'Binding Energy (eV)' elif calculation.experiment in ['XES', ]: spectrum.xLabel = 'Emission Energy (eV)' spectrum.yLabel = 'Intensity (a.u.)' self.broadenings = {'gaussian': (calculation.xGaussian, ), } else: xMin = calculation.xMin xMax = calculation.xMax xNPoints = calculation.xNPoints yMin = calculation.yMin yMax = calculation.yMax yNPoints = calculation.yNPoints x = np.linspace(xMin, xMax, xNPoints + 1) y = np.linspace(yMin, yMax, yNPoints + 1) z = data[:, 2::2] spectrum = Spectrum2D(x, y, z) spectrum.name = spectrumName if len(suffix) > 2: spectrum.shortName = suffix.title() else: spectrum.shortName = suffix.upper() spectrum.xLabel = 'Incident Energy (eV)' spectrum.yLabel = 'Energy Transfer (eV)' self.broadenings = {'gaussian': (calculation.xGaussian, calculation.yGaussian), } self.raw.append(spectrum) # Process the spectra once they where read from disk. self.process()
<SYSTEM_TASK:> Update the selection to contain only the result specified by <END_TASK> <USER_TASK:> Description: def updateResultsView(self, index): """ Update the selection to contain only the result specified by the index. This should be the last index of the model. Finally updade the context menu. The selectionChanged signal is used to trigger the update of the Quanty dock widget and result details dialog. :param index: Index of the last item of the model. :type index: QModelIndex """
flags = (QItemSelectionModel.Clear | QItemSelectionModel.Rows | QItemSelectionModel.Select) self.resultsView.selectionModel().select(index, flags) self.resultsView.resizeColumnsToContents() self.resultsView.setFocus()
<SYSTEM_TASK:> Updating the plotting widget should not require any information <END_TASK> <USER_TASK:> Description: def updatePlotWidget(self): """Updating the plotting widget should not require any information about the current state of the widget."""
pw = self.getPlotWidget() pw.reset() results = self.resultsModel.getCheckedItems() for result in results: if isinstance(result, ExperimentalData): spectrum = result.spectra['Expt'] spectrum.legend = '{}-{}'.format(result.index, 'Expt') spectrum.xLabel = 'X' spectrum.yLabel = 'Y' spectrum.plot(plotWidget=pw) else: if len(results) > 1 and result.experiment in ['RIXS', ]: continue for spectrum in result.spectra.processed: spectrum.legend = '{}-{}'.format( result.index, spectrum.shortName) if spectrum.name in result.spectra.toPlotChecked: spectrum.plot(plotWidget=pw)
<SYSTEM_TASK:> Return the row of the child. <END_TASK> <USER_TASK:> Description: def row(self): """Return the row of the child."""
if self.parent is not None: children = self.parent.getChildren() # The index method of the list object. return children.index(self) else: return 0
<SYSTEM_TASK:> Return the index of the parent for a given index of the <END_TASK> <USER_TASK:> Description: def parent(self, index): """Return the index of the parent for a given index of the child. Unfortunately, the name of the method has to be parent, even though a more verbose name like parentIndex, would avoid confusion about what parent actually is - an index or an item. """
childItem = self.item(index) parentItem = childItem.parent if parentItem == self.rootItem: parentIndex = QModelIndex() else: parentIndex = self.createIndex(parentItem.row(), 0, parentItem) return parentIndex
<SYSTEM_TASK:> Set the role data for the item at index to value. <END_TASK> <USER_TASK:> Description: def setData(self, index, value, role): """Set the role data for the item at index to value."""
if not index.isValid(): return False item = self.item(index) column = index.column() if role == Qt.EditRole: items = list() items.append(item) if self.sync: parentIndex = self.parent(index) # Iterate over the siblings of the parent index. for sibling in self.siblings(parentIndex): siblingNode = self.item(sibling) for child in siblingNode.children: if child.getItemData(0) == item.getItemData(0): items.append(child) for item in items: columnData = str(item.getItemData(column)) if columnData and columnData != value: try: item.setItemData(column, float(value)) except ValueError: return False else: return False elif role == Qt.CheckStateRole: item.setCheckState(value) if value == Qt.Unchecked or value == Qt.Checked: state = value self.itemCheckStateChanged.emit(index, state) self.dataChanged.emit(index, index) return True
<SYSTEM_TASK:> Return the active flags for the given index. Add editable <END_TASK> <USER_TASK:> Description: def flags(self, index): """Return the active flags for the given index. Add editable flag to items other than the first column. """
activeFlags = (Qt.ItemIsEnabled | Qt.ItemIsSelectable | Qt.ItemIsUserCheckable) item = self.item(index) column = index.column() if column > 0 and not item.childCount(): activeFlags = activeFlags | Qt.ItemIsEditable return activeFlags
<SYSTEM_TASK:> Return the data contained in the model. <END_TASK> <USER_TASK:> Description: def _getModelData(self, modelData, parentItem=None): """Return the data contained in the model."""
if parentItem is None: parentItem = self.rootItem for item in parentItem.getChildren(): key = item.getItemData(0) if item.childCount(): modelData[key] = odict() self._getModelData(modelData[key], item) else: if isinstance(item.getItemData(2), float): modelData[key] = [item.getItemData(1), item.getItemData(2)] else: modelData[key] = item.getItemData(1)
<SYSTEM_TASK:> Convolve an array with a kernel using FFT. <END_TASK> <USER_TASK:> Description: def convolve_fft(array, kernel): """ Convolve an array with a kernel using FFT. Implemntation based on the convolve_fft function from astropy. https://github.com/astropy/astropy/blob/master/astropy/convolution/convolve.py """
array = np.asarray(array, dtype=np.complex) kernel = np.asarray(kernel, dtype=np.complex) if array.ndim != kernel.ndim: raise ValueError("Image and kernel must have same number of " "dimensions") array_shape = array.shape kernel_shape = kernel.shape new_shape = np.array(array_shape) + np.array(kernel_shape) array_slices = [] kernel_slices = [] for (new_dimsize, array_dimsize, kernel_dimsize) in zip( new_shape, array_shape, kernel_shape): center = new_dimsize - (new_dimsize + 1) // 2 array_slices += [slice(center - array_dimsize // 2, center + (array_dimsize + 1) // 2)] kernel_slices += [slice(center - kernel_dimsize // 2, center + (kernel_dimsize + 1) // 2)] array_slices = tuple(array_slices) kernel_slices = tuple(kernel_slices) if not np.all(new_shape == array_shape): big_array = np.zeros(new_shape, dtype=np.complex) big_array[array_slices] = array else: big_array = array if not np.all(new_shape == kernel_shape): big_kernel = np.zeros(new_shape, dtype=np.complex) big_kernel[kernel_slices] = kernel else: big_kernel = kernel array_fft = np.fft.fftn(big_array) kernel_fft = np.fft.fftn(np.fft.ifftshift(big_kernel)) rifft = np.fft.ifftn(array_fft * kernel_fft) return rifft[array_slices].real
<SYSTEM_TASK:> Method executed when the event 'set' is triggered. <END_TASK> <USER_TASK:> Description: def __validate(self, target, value, oldvalue, initiator): """ Method executed when the event 'set' is triggered. :param target: Object triggered :param value: New value :param oldvalue: Previous value :param initiator: Column modified :return: :raise ValidateError: """
if value == oldvalue: return value if self.allow_null and value is None: return value if self.check_value(value): return value else: if self.throw_exception: if self.message: self.message = self.message.format( field=self.field, new_value=value, old_value=oldvalue, key=initiator.key) raise ValidateError(self.message) else: raise ValidateError('Value %s from column %s is not valid' % (value, initiator.key)) return oldvalue
<SYSTEM_TASK:> Create an SQLAlchemy event listening the 'set' in a particular column. <END_TASK> <USER_TASK:> Description: def __create_event(self): """ Create an SQLAlchemy event listening the 'set' in a particular column. :rtype : object """
if not event.contains(self.field, 'set', self.__validate): event.listen(self.field, 'set', self.__validate, retval=True)
<SYSTEM_TASK:> Remove the listener to stop the validation <END_TASK> <USER_TASK:> Description: def stop(self): """ Remove the listener to stop the validation """
if event.contains(self.field, 'set', self.__validate): event.remove(self.field, 'set', self.__validate)
<SYSTEM_TASK:> Restart the listener <END_TASK> <USER_TASK:> Description: def start(self): """ Restart the listener """
if not event.contains(self.field, 'set', self.__validate): self.__create_event()
<SYSTEM_TASK:> Translate a nucleotide sequence into an amino acid sequence. <END_TASK> <USER_TASK:> Description: def nt2aa(ntseq): """Translate a nucleotide sequence into an amino acid sequence. Parameters ---------- ntseq : str Nucleotide sequence composed of A, C, G, or T (uppercase or lowercase) Returns ------- aaseq : str Amino acid sequence Example -------- >>> nt2aa('TGTGCCTGGAGTGTAGCTCCGGACAGGGGTGGCTACACCTTC') 'CAWSVAPDRGGYTF' """
nt2num = {'A': 0, 'C': 1, 'G': 2, 'T': 3, 'a': 0, 'c': 1, 'g': 2, 't': 3} aa_dict ='KQE*TPASRRG*ILVLNHDYTPASSRGCILVFKQE*TPASRRGWMLVLNHDYTPASSRGCILVF' return ''.join([aa_dict[nt2num[ntseq[i]] + 4*nt2num[ntseq[i+1]] + 16*nt2num[ntseq[i+2]]] for i in range(0, len(ntseq), 3) if i+2 < len(ntseq)])
<SYSTEM_TASK:> Represent nucleotide sequence by sequence of codon symbols. <END_TASK> <USER_TASK:> Description: def nt2codon_rep(ntseq): """Represent nucleotide sequence by sequence of codon symbols. 'Translates' the nucleotide sequence into a symbolic representation of 'amino acids' where each codon gets its own unique character symbol. These characters should be reserved only for representing the 64 individual codons --- note that this means it is important that this function matches the corresponding function in the preprocess script and that any custom alphabet does not use these symbols. Defining symbols for each individual codon allows for Pgen computation of inframe nucleotide sequences. Parameters ---------- ntseq : str A Nucleotide sequence (normally a CDR3 nucleotide sequence) to be 'translated' into the codon - symbol representation. Can be either uppercase or lowercase, but only composed of A, C, G, or T. Returns ------- codon_rep : str The codon - symbolic representation of ntseq. Note that if len(ntseq) == 3L --> len(codon_rep) == L Example -------- >>> nt2codon_rep('TGTGCCTGGAGTGTAGCTCCGGACAGGGGTGGCTACACCTTC') '\xbb\x96\xab\xb8\x8e\xb6\xa5\x92\xa8\xba\x9a\x93\x94\x9f' """
# nt2num = {'A': 0, 'C': 1, 'G': 2, 'T': 3, 'a': 0, 'c': 1, 'g': 2, 't': 3} #Use single characters not in use to represent each individual codon --- this function is called in constructing the codon dictionary codon_rep ='\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf' return ''.join([codon_rep[nt2num[ntseq[i]] + 4*nt2num[ntseq[i+1]] + 16*nt2num[ntseq[i+2]]] for i in range(0, len(ntseq), 3) if i+2 < len(ntseq)])
<SYSTEM_TASK:> Cut genomic sequence from the right. <END_TASK> <USER_TASK:> Description: def cutR_seq(seq, cutR, max_palindrome): """Cut genomic sequence from the right. Parameters ---------- seq : str Nucleotide sequence to be cut from the right cutR : int cutR - max_palindrome = how many nucleotides to cut from the right. Negative cutR implies complementary palindromic insertions. max_palindrome : int Length of the maximum palindromic insertion. Returns ------- seq : str Nucleotide sequence after being cut from the right Examples -------- >>> cutR_seq('TGCGCCAGCAGTGAGTC', 0, 4) 'TGCGCCAGCAGTGAGTCGACT' >>> cutR_seq('TGCGCCAGCAGTGAGTC', 8, 4) 'TGCGCCAGCAGTG' """
complement_dict = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'} #can include lower case if wanted if cutR < max_palindrome: seq = seq + ''.join([complement_dict[nt] for nt in seq[cutR - max_palindrome:]][::-1]) #reverse complement palindrome insertions else: seq = seq[:len(seq) - cutR + max_palindrome] #deletions return seq
<SYSTEM_TASK:> Cut genomic sequence from the left. <END_TASK> <USER_TASK:> Description: def cutL_seq(seq, cutL, max_palindrome): """Cut genomic sequence from the left. Parameters ---------- seq : str Nucleotide sequence to be cut from the right cutL : int cutL - max_palindrome = how many nucleotides to cut from the left. Negative cutL implies complementary palindromic insertions. max_palindrome : int Length of the maximum palindromic insertion. Returns ------- seq : str Nucleotide sequence after being cut from the left Examples -------- >>> cutL_seq('TGAACACTGAAGCTTTCTTT', 8, 4) 'CACTGAAGCTTTCTTT' >>> cutL_seq('TGAACACTGAAGCTTTCTTT', 0, 4) 'TTCATGAACACTGAAGCTTTCTTT' """
complement_dict = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'} #can include lower case if wanted if cutL < max_palindrome: seq = ''.join([complement_dict[nt] for nt in seq[:max_palindrome - cutL]][::-1]) + seq #reverse complement palindrome insertions else: seq = seq[cutL-max_palindrome:] #deletions return seq
<SYSTEM_TASK:> Generate the sub_codons_left dictionary of codon prefixes. <END_TASK> <USER_TASK:> Description: def generate_sub_codons_left(codons_dict): """Generate the sub_codons_left dictionary of codon prefixes. Parameters ---------- codons_dict : dict Dictionary, keyed by the allowed 'amino acid' symbols with the values being lists of codons corresponding to the symbol. Returns ------- sub_codons_left : dict Dictionary of the 1 and 2 nucleotide prefixes (read from 5') for each codon in an 'amino acid' grouping """
sub_codons_left = {} for aa in codons_dict.keys(): sub_codons_left[aa] = list(set([x[0] for x in codons_dict[aa]] + [x[:2] for x in codons_dict[aa]])) return sub_codons_left
<SYSTEM_TASK:> Calculate the steady state dist of a 4 state markov transition matrix. <END_TASK> <USER_TASK:> Description: def calc_steady_state_dist(R): """Calculate the steady state dist of a 4 state markov transition matrix. Parameters ---------- R : ndarray Markov transition matrix Returns ------- p_ss : ndarray Steady state probability distribution """
#Calc steady state distribution for a dinucleotide bias matrix w, v = np.linalg.eig(R) for i in range(4): if np.abs(w[i] - 1) < 1e-8: return np.real(v[:, i] / np.sum(v[:, i])) return -1
<SYSTEM_TASK:> Generate a random insertion nucleotide sequence of length ins_len. <END_TASK> <USER_TASK:> Description: def rnd_ins_seq(ins_len, C_R, CP_first_nt): """Generate a random insertion nucleotide sequence of length ins_len. Draws the sequence identity (for a set length) from the distribution defined by the dinucleotide markov model of transition matrix R. Parameters ---------- ins_len : int Length of nucleotide sequence to be inserted. C_R : ndarray (4, 4) array of the cumulative transition probabilities defined by the Markov transition matrix R CP_first_nt : ndarray (4,) array of the cumulative probabilities for the first inserted nucleotide Returns ------- seq : str Randomly generated insertion sequence of length ins_len. Examples -------- >>> rnd_ins_seq(7, CP_generative_model['C_Rvd'], CP_generative_model['C_first_nt_bias_insVD']) 'GATGGAC' >>> rnd_ins_seq(7, CP_generative_model['C_Rvd'], CP_generative_model['C_first_nt_bias_insVD']) 'ACCCCCG' >>> rnd_ins_seq(3, CP_generative_model['C_Rvd'], CP_generative_model['C_first_nt_bias_insVD']) 'GCC' """
nt2num = {'A': 0, 'C': 1, 'G': 2, 'T': 3} num2nt = 'ACGT' if ins_len == 0: return '' seq = num2nt[CP_first_nt.searchsorted(np.random.random())] ins_len += -1 while ins_len > 0: seq += num2nt[C_R[nt2num[seq[-1]], :].searchsorted(np.random.random())] ins_len += -1 return seq
<SYSTEM_TASK:> Returns all the class attributues. <END_TASK> <USER_TASK:> Description: def getFields(self): """ Returns all the class attributues. @rtype: dict @return: A dictionary containing all the class attributes. """
d = {} for i in self._attrsList: key = i value = getattr(self, i) d[key] = value return d
<SYSTEM_TASK:> Lerp. Linear interpolation from self to a <END_TASK> <USER_TASK:> Description: def lerp(self, a, t): """ Lerp. Linear interpolation from self to a"""
return self.plus(a.minus(self).times(t));
<SYSTEM_TASK:> Create a new vertex between this vertex and `other` by linearly <END_TASK> <USER_TASK:> Description: def interpolate(self, other, t): """ Create a new vertex between this vertex and `other` by linearly interpolating all properties using a parameter of `t`. Subclasses should override this to interpolate additional properties. """
return Vertex(self.pos.lerp(other.pos, t), self.normal.lerp(other.normal, t))
<SYSTEM_TASK:> Split `polygon` by this plane if needed, then put the polygon or polygon <END_TASK> <USER_TASK:> Description: def splitPolygon(self, polygon, coplanarFront, coplanarBack, front, back): """ Split `polygon` by this plane if needed, then put the polygon or polygon fragments in the appropriate lists. Coplanar polygons go into either `coplanarFront` or `coplanarBack` depending on their orientation with respect to this plane. Polygons in front or in back of this plane go into either `front` or `back` """
COPLANAR = 0 # all the vertices are within EPSILON distance from plane FRONT = 1 # all the vertices are in front of the plane BACK = 2 # all the vertices are at the back of the plane SPANNING = 3 # some vertices are in front, some in the back # Classify each point as well as the entire polygon into one of the above # four classes. polygonType = 0 vertexLocs = [] numVertices = len(polygon.vertices) for i in range(numVertices): t = self.normal.dot(polygon.vertices[i].pos) - self.w loc = -1 if t < -Plane.EPSILON: loc = BACK elif t > Plane.EPSILON: loc = FRONT else: loc = COPLANAR polygonType |= loc vertexLocs.append(loc) # Put the polygon in the correct list, splitting it when necessary. if polygonType == COPLANAR: normalDotPlaneNormal = self.normal.dot(polygon.plane.normal) if normalDotPlaneNormal > 0: coplanarFront.append(polygon) else: coplanarBack.append(polygon) elif polygonType == FRONT: front.append(polygon) elif polygonType == BACK: back.append(polygon) elif polygonType == SPANNING: f = [] b = [] for i in range(numVertices): j = (i+1) % numVertices ti = vertexLocs[i] tj = vertexLocs[j] vi = polygon.vertices[i] vj = polygon.vertices[j] if ti != BACK: f.append(vi) if ti != FRONT: if ti != BACK: b.append(vi.clone()) else: b.append(vi) if (ti | tj) == SPANNING: # interpolation weight at the intersection point t = (self.w - self.normal.dot(vi.pos)) / self.normal.dot(vj.pos.minus(vi.pos)) # intersection point on the plane v = vi.interpolate(vj, t) f.append(v) b.append(v.clone()) if len(f) >= 3: front.append(Polygon(f, polygon.shared)) if len(b) >= 3: back.append(Polygon(b, polygon.shared))
<SYSTEM_TASK:> Convert solid space to empty space and empty space to solid space. <END_TASK> <USER_TASK:> Description: def invert(self): """ Convert solid space to empty space and empty space to solid space. """
for poly in self.polygons: poly.flip() self.plane.flip() if self.front: self.front.invert() if self.back: self.back.invert() temp = self.front self.front = self.back self.back = temp
<SYSTEM_TASK:> Recursively remove all polygons in `polygons` that are inside this BSP <END_TASK> <USER_TASK:> Description: def clipPolygons(self, polygons): """ Recursively remove all polygons in `polygons` that are inside this BSP tree. """
if not self.plane: return polygons[:] front = [] back = [] for poly in polygons: self.plane.splitPolygon(poly, front, back, front, back) if self.front: front = self.front.clipPolygons(front) if self.back: back = self.back.clipPolygons(back) else: back = [] front.extend(back) return front
<SYSTEM_TASK:> Remove all polygons in this BSP tree that are inside the other BSP tree <END_TASK> <USER_TASK:> Description: def clipTo(self, bsp): """ Remove all polygons in this BSP tree that are inside the other BSP tree `bsp`. """
self.polygons = bsp.clipPolygons(self.polygons) if self.front: self.front.clipTo(bsp) if self.back: self.back.clipTo(bsp)
<SYSTEM_TASK:> Return a list of all polygons in this BSP tree. <END_TASK> <USER_TASK:> Description: def allPolygons(self): """ Return a list of all polygons in this BSP tree. """
polygons = self.polygons[:] if self.front: polygons.extend(self.front.allPolygons()) if self.back: polygons.extend(self.back.allPolygons()) return polygons
<SYSTEM_TASK:> Returns the rate from the default currency to `currency`. <END_TASK> <USER_TASK:> Description: def get_rate(currency): """Returns the rate from the default currency to `currency`."""
source = get_rate_source() try: return Rate.objects.get(source=source, currency=currency).value except Rate.DoesNotExist: raise CurrencyConversionException( "Rate for %s in %s do not exists. " "Please run python manage.py update_rates" % ( currency, source.name))
<SYSTEM_TASK:> Convert 'amount' from 'currency_from' to 'currency_to' <END_TASK> <USER_TASK:> Description: def base_convert_money(amount, currency_from, currency_to): """ Convert 'amount' from 'currency_from' to 'currency_to' """
source = get_rate_source() # Get rate for currency_from. if source.base_currency != currency_from: rate_from = get_rate(currency_from) else: # If currency from is the same as base currency its rate is 1. rate_from = Decimal(1) # Get rate for currency_to. rate_to = get_rate(currency_to) if isinstance(amount, float): amount = Decimal(amount).quantize(Decimal('.000001')) # After finishing the operation, quantize down final amount to two points. return ((amount / rate_from) * rate_to).quantize(Decimal("1.00"))
<SYSTEM_TASK:> Convert 'amount' from 'currency_from' to 'currency_to' and return a Money <END_TASK> <USER_TASK:> Description: def convert_money(amount, currency_from, currency_to): """ Convert 'amount' from 'currency_from' to 'currency_to' and return a Money instance of the converted amount. """
new_amount = base_convert_money(amount, currency_from, currency_to) return moneyed.Money(new_amount, currency_to)
<SYSTEM_TASK:> Format a datetime object with Java SimpleDateFormat's-like string. <END_TASK> <USER_TASK:> Description: def format_date(format_string=None, datetime_obj=None): """ Format a datetime object with Java SimpleDateFormat's-like string. If datetime_obj is not given - use current datetime. If format_string is not given - return number of millisecond since epoch. :param format_string: :param datetime_obj: :return: :rtype string """
datetime_obj = datetime_obj or datetime.now() if format_string is None: seconds = int(datetime_obj.strftime("%s")) milliseconds = datetime_obj.microsecond // 1000 return str(seconds * 1000 + milliseconds) else: formatter = SimpleDateFormat(format_string) return formatter.format_datetime(datetime_obj)
<SYSTEM_TASK:> Tries to determine if a buffer is empty. <END_TASK> <USER_TASK:> Description: def allZero(buffer): """ Tries to determine if a buffer is empty. @type buffer: str @param buffer: Buffer to test if it is empty. @rtype: bool @return: C{True} if the given buffer is empty, i.e. full of zeros, C{False} if it doesn't. """
allZero = True for byte in buffer: if byte != "\x00": allZero = False break return allZero
<SYSTEM_TASK:> Reads as many bytes indicated in the size parameter at the specific offset. <END_TASK> <USER_TASK:> Description: def readAt(self, offset, size): """ Reads as many bytes indicated in the size parameter at the specific offset. @type offset: int @param offset: Offset of the value to be read. @type size: int @param size: This parameter indicates how many bytes are going to be read from a given offset. @rtype: str @return: A packed string containing the read data. """
if offset > self.length: if self.log: print "Warning: Trying to read: %d bytes - only %d bytes left" % (nroBytes, self.length - self.offset) offset = self.length - self.offset tmpOff = self.tell() self.setOffset(offset) r = self.read(size) self.setOffset(tmpOff) return r
<SYSTEM_TASK:> Send a notification to channels <END_TASK> <USER_TASK:> Description: def send(self, message, channel_name=None, fail_silently=False, options=None): # type: (Text, Optional[str], bool, Optional[SendOptions]) -> None """Send a notification to channels :param message: A message """
if channel_name is None: channels = self.settings["CHANNELS"] else: try: channels = { "__selected__": self.settings["CHANNELS"][channel_name] } except KeyError: raise Exception("channels does not exist %s", channel_name) for _, config in channels.items(): if "_backend" not in config: raise ImproperlyConfigured( "Specify the backend class in the channel configuration") backend = self._load_backend(config["_backend"]) # type: Any config = deepcopy(config) del config["_backend"] channel = backend(**config) channel.send(message, fail_silently=fail_silently, options=options)
<SYSTEM_TASK:> Prepares and sends an HTTP request. Returns the HTTPResponse object. <END_TASK> <USER_TASK:> Description: def request(self, method, path, params=None, headers=None, cookies=None, data=None, json=None, allow_redirects=None, timeout=None): """ Prepares and sends an HTTP request. Returns the HTTPResponse object. :param method: str :param path: str :return: response :rtype: HTTPResponse """
headers = headers or {} timeout = timeout if timeout is not None else self._timeout allow_redirects = allow_redirects if allow_redirects is not None else self._allow_redirects if self._keep_alive and self.__session is None: self.__session = requests.Session() if self.__session is not None and not self._use_cookies: self.__session.cookies.clear() address = self._bake_address(path) req_headers = copy.deepcopy(self._additional_headers) req_headers.update(headers) response = http.request(method, address, session=self.__session, params=params, headers=headers, cookies=cookies, data=data, json=json, allow_redirects=allow_redirects, timeout=timeout) if self._auto_assert_ok: response.assert_ok() return response
<SYSTEM_TASK:> Read anchor position and functionality from file. <END_TASK> <USER_TASK:> Description: def load_genomic_CDR3_anchor_pos_and_functionality(anchor_pos_file_name): """Read anchor position and functionality from file. Parameters ---------- anchor_pos_file_name : str File name for the functionality and position of a conserved residue that defines the CDR3 region for each V or J germline sequence. Returns ------- anchor_pos_and_functionality : dict Residue anchor position and functionality for each gene/allele. """
anchor_pos_and_functionality = {} anchor_pos_file = open(anchor_pos_file_name, 'r') first_line = True for line in anchor_pos_file: if first_line: first_line = False continue split_line = line.split(',') split_line = [x.strip() for x in split_line] anchor_pos_and_functionality[split_line[0]] = [int(split_line[1]), split_line[2].strip().strip('()')] return anchor_pos_and_functionality
<SYSTEM_TASK:> Add palindromic inserted nucleotides to germline V sequences. <END_TASK> <USER_TASK:> Description: def generate_cutV_genomic_CDR3_segs(self): """Add palindromic inserted nucleotides to germline V sequences. The maximum number of palindromic insertions are appended to the germline V segments so that delV can index directly for number of nucleotides to delete from a segment. Sets the attribute cutV_genomic_CDR3_segs. """
max_palindrome = self.max_delV_palindrome self.cutV_genomic_CDR3_segs = [] for CDR3_V_seg in [x[1] for x in self.genV]: if len(CDR3_V_seg) < max_palindrome: self.cutV_genomic_CDR3_segs += [cutR_seq(CDR3_V_seg, 0, len(CDR3_V_seg))] else: self.cutV_genomic_CDR3_segs += [cutR_seq(CDR3_V_seg, 0, max_palindrome)]
<SYSTEM_TASK:> Add palindromic inserted nucleotides to germline J sequences. <END_TASK> <USER_TASK:> Description: def generate_cutJ_genomic_CDR3_segs(self): """Add palindromic inserted nucleotides to germline J sequences. The maximum number of palindromic insertions are appended to the germline J segments so that delJ can index directly for number of nucleotides to delete from a segment. Sets the attribute cutJ_genomic_CDR3_segs. """
max_palindrome = self.max_delJ_palindrome self.cutJ_genomic_CDR3_segs = [] for CDR3_J_seg in [x[1] for x in self.genJ]: if len(CDR3_J_seg) < max_palindrome: self.cutJ_genomic_CDR3_segs += [cutL_seq(CDR3_J_seg, 0, len(CDR3_J_seg))] else: self.cutJ_genomic_CDR3_segs += [cutL_seq(CDR3_J_seg, 0, max_palindrome)]
<SYSTEM_TASK:> Add palindromic inserted nucleotides to germline V sequences. <END_TASK> <USER_TASK:> Description: def generate_cutD_genomic_CDR3_segs(self): """Add palindromic inserted nucleotides to germline V sequences. The maximum number of palindromic insertions are appended to the germline D segments so that delDl and delDr can index directly for number of nucleotides to delete from a segment. Sets the attribute cutV_genomic_CDR3_segs. """
max_palindrome_L = self.max_delDl_palindrome max_palindrome_R = self.max_delDr_palindrome self.cutD_genomic_CDR3_segs = [] for CDR3_D_seg in [x[1] for x in self.genD]: if len(CDR3_D_seg) < min(max_palindrome_L, max_palindrome_R): self.cutD_genomic_CDR3_segs += [cutR_seq(cutL_seq(CDR3_D_seg, 0, len(CDR3_D_seg)), 0, len(CDR3_D_seg))] else: self.cutD_genomic_CDR3_segs += [cutR_seq(cutL_seq(CDR3_D_seg, 0, max_palindrome_L), 0, max_palindrome_R)]
<SYSTEM_TASK:> This method has to be module level function <END_TASK> <USER_TASK:> Description: def spawn_worker(params): """ This method has to be module level function :type params: Params """
setup_logging(params) log.info("Adding worker: idx=%s\tconcurrency=%s\tresults=%s", params.worker_index, params.concurrency, params.report) worker = Worker(params) worker.start() worker.join()
<SYSTEM_TASK:> Check for MZ signature. <END_TASK> <USER_TASK:> Description: def hasMZSignature(self, rd): """ Check for MZ signature. @type rd: L{ReadData} @param rd: A L{ReadData} object. @rtype: bool @return: True is the given L{ReadData} stream has the MZ signature. Otherwise, False. """
rd.setOffset(0) sign = rd.read(2) if sign == "MZ": return True return False
<SYSTEM_TASK:> Check for PE signature. <END_TASK> <USER_TASK:> Description: def hasPESignature(self, rd): """ Check for PE signature. @type rd: L{ReadData} @param rd: A L{ReadData} object. @rtype: bool @return: True is the given L{ReadData} stream has the PE signature. Otherwise, False. """
rd.setOffset(0) e_lfanew_offset = unpack("<L", rd.readAt(0x3c, 4))[0] sign = rd.readAt(e_lfanew_offset, 2) if sign == "PE": return True return False
<SYSTEM_TASK:> Performs validations over some fields of the PE structure to determine if the loaded file has a valid PE format. <END_TASK> <USER_TASK:> Description: def validate(self): """ Performs validations over some fields of the PE structure to determine if the loaded file has a valid PE format. @raise PEException: If an invalid value is found into the PE instance. """
# Ange Albertini (@angie4771) can kill me for this! :) if self.dosHeader.e_magic.value != consts.MZ_SIGNATURE: raise excep.PEException("Invalid MZ signature. Found %d instead of %d." % (self.dosHeader.magic.value, consts.MZ_SIGNATURE)) if self.dosHeader.e_lfanew.value > len(self): raise excep.PEException("Invalid e_lfanew value. Probably not a PE file.") if self.ntHeaders.signature.value != consts.PE_SIGNATURE: raise excep.PEException("Invalid PE signature. Found %d instead of %d." % (self.ntHeaders.optionaHeader.signature.value, consts.PE_SIGNATURE)) if self.ntHeaders.optionalHeader.numberOfRvaAndSizes.value > 0x10: print excep.PEWarning("Suspicious value for NumberOfRvaAndSizes: %d." % self.ntHeaders.optionaHeader.numberOfRvaAndSizes.value)
<SYSTEM_TASK:> Returns the data between the last section header and the begenning of data from the first section. <END_TASK> <USER_TASK:> Description: def _getPaddingDataToSectionOffset(self): """ Returns the data between the last section header and the begenning of data from the first section. @rtype: str @return: Data between last section header and the begenning of the first section. """
start = self._getPaddingToSectionOffset() end = self.sectionHeaders[0].pointerToRawData.value - start return self._data[start:start+end]
<SYSTEM_TASK:> Returns the digital signature within a digital signed PE file. <END_TASK> <USER_TASK:> Description: def _getSignature(self, readDataInstance, dataDirectoryInstance): """ Returns the digital signature within a digital signed PE file. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} instance containing a PE file data. @type dataDirectoryInstance: L{DataDirectory} @param dataDirectoryInstance: A L{DataDirectory} object containing the information about directories. @rtype: str @return: A string with the digital signature. @raise InstanceErrorException: If the C{readDataInstance} or the C{dataDirectoryInstance} were not specified. """
signature = "" if readDataInstance is not None and dataDirectoryInstance is not None: securityDirectory = dataDirectoryInstance[consts.SECURITY_DIRECTORY] if(securityDirectory.rva.value and securityDirectory.size.value): readDataInstance.setOffset(self.getOffsetFromRva(securityDirectory.rva.value)) signature = readDataInstance.read(securityDirectory.size.value) else: raise excep.InstanceErrorException("ReadData instance or DataDirectory instance not specified.") return signature
<SYSTEM_TASK:> Returns the overlay data from the PE file. <END_TASK> <USER_TASK:> Description: def _getOverlay(self, readDataInstance, sectionHdrsInstance): """ Returns the overlay data from the PE file. @type readDataInstance: L{ReadData} @param readDataInstance: A L{ReadData} instance containing the PE file data. @type sectionHdrsInstance: L{SectionHeaders} @param sectionHdrsInstance: A L{SectionHeaders} instance containing the information about the sections present in the PE file. @rtype: str @return: A string with the overlay data from the PE file. @raise InstanceErrorException: If the C{readDataInstance} or the C{sectionHdrsInstance} were not specified. """
if readDataInstance is not None and sectionHdrsInstance is not None: # adjust the offset in readDataInstance to the RawOffset + RawSize of the last section try: offset = sectionHdrsInstance[-1].pointerToRawData.value + sectionHdrsInstance[-1].sizeOfRawData.value readDataInstance.setOffset(offset) except excep.WrongOffsetValueException: if self._verbose: print "It seems that the file has no overlay data." else: raise excep.InstanceErrorException("ReadData instance or SectionHeaders instance not specified.") return readDataInstance.data[readDataInstance.offset:]
<SYSTEM_TASK:> Converts an offset to an RVA. <END_TASK> <USER_TASK:> Description: def getOffsetFromRva(self, rva): """ Converts an offset to an RVA. @type rva: int @param rva: The RVA to be converted. @rtype: int @return: An integer value representing an offset in the PE file. """
offset = -1 s = self.getSectionByRva(rva) if s != offset: offset = (rva - self.sectionHeaders[s].virtualAddress.value) + self.sectionHeaders[s].pointerToRawData.value else: offset = rva return offset
<SYSTEM_TASK:> Converts a RVA to an offset. <END_TASK> <USER_TASK:> Description: def getRvaFromOffset(self, offset): """ Converts a RVA to an offset. @type offset: int @param offset: The offset value to be converted to RVA. @rtype: int @return: The RVA obtained from the given offset. """
rva = -1 s = self.getSectionByOffset(offset) if s: rva = (offset - self.sectionHeaders[s].pointerToRawData.value) + self.sectionHeaders[s].virtualAddress.value return rva
<SYSTEM_TASK:> Given an offset in the file, tries to determine the section this offset belong to. <END_TASK> <USER_TASK:> Description: def getSectionByOffset(self, offset): """ Given an offset in the file, tries to determine the section this offset belong to. @type offset: int @param offset: Offset value. @rtype: int @return: An index, starting at 0, that represents the section the given offset belongs to. """
index = -1 for i in range(len(self.sectionHeaders)): if (offset < self.sectionHeaders[i].pointerToRawData.value + self.sectionHeaders[i].sizeOfRawData.value): index = i break return index
<SYSTEM_TASK:> Given a string representing a section name, tries to find the section index. <END_TASK> <USER_TASK:> Description: def getSectionIndexByName(self, name): """ Given a string representing a section name, tries to find the section index. @type name: str @param name: A section name. @rtype: int @return: The index, starting at 0, of the section. """
index = -1 if name: for i in range(len(self.sectionHeaders)): if self.sectionHeaders[i].name.value.find(name) >= 0: index = i break return index
<SYSTEM_TASK:> Given a RVA in the file, tries to determine the section this RVA belongs to. <END_TASK> <USER_TASK:> Description: def getSectionByRva(self, rva): """ Given a RVA in the file, tries to determine the section this RVA belongs to. @type rva: int @param rva: RVA value. @rtype: int @return: An index, starting at 1, that represents the section the given RVA belongs to. """
index = -1 if rva < self.sectionHeaders[0].virtualAddress.value: return index for i in range(len(self.sectionHeaders)): fa = self.ntHeaders.optionalHeader.fileAlignment.value prd = self.sectionHeaders[i].pointerToRawData.value srd = self.sectionHeaders[i].sizeOfRawData.value if len(str(self)) - self._adjustFileAlignment(prd, fa) < srd: size = self.sectionHeaders[i].misc.value else: size = max(srd, self.sectionHeaders[i].misc.value) if (self.sectionHeaders[i].virtualAddress.value <= rva) and rva < (self.sectionHeaders[i].virtualAddress.value + size): index = i break return index
<SYSTEM_TASK:> Returns the offset to last section header present in the PE file. <END_TASK> <USER_TASK:> Description: def _getPaddingToSectionOffset(self): """ Returns the offset to last section header present in the PE file. @rtype: int @return: The offset where the end of the last section header resides in the PE file. """
return len(str(self.dosHeader) + str(self.dosStub) + str(self.ntHeaders) + str(self.sectionHeaders))
<SYSTEM_TASK:> Parse all the directories in the PE file. <END_TASK> <USER_TASK:> Description: def fullLoad(self): """Parse all the directories in the PE file."""
self._parseDirectories(self.ntHeaders.optionalHeader.dataDirectory, self.PE_TYPE)
<SYSTEM_TASK:> Fixes the necessary fields in the PE file instance in order to create a valid PE32. i.e. SizeOfImage. <END_TASK> <USER_TASK:> Description: def _fixPe(self): """ Fixes the necessary fields in the PE file instance in order to create a valid PE32. i.e. SizeOfImage. """
sizeOfImage = 0 for sh in self.sectionHeaders: sizeOfImage += sh.misc self.ntHeaders.optionaHeader.sizeoOfImage.value = self._sectionAlignment(sizeOfImage + 0x1000)
<SYSTEM_TASK:> Gets binary data at a given RVA. <END_TASK> <USER_TASK:> Description: def getDataAtRva(self, rva, size): """ Gets binary data at a given RVA. @type rva: int @param rva: The RVA to get the data from. @type size: int @param size: The size of the data to be obtained. @rtype: str @return: The data obtained at the given RVA. """
return self.getDataAtOffset(self.getOffsetFromRva(rva), size)
<SYSTEM_TASK:> Gets binary data at a given offset. <END_TASK> <USER_TASK:> Description: def getDataAtOffset(self, offset, size): """ Gets binary data at a given offset. @type offset: int @param offset: The offset to get the data from. @type size: int @param size: The size of the data to be obtained. @rtype: str @return: The data obtained at the given offset. """
data = str(self) return data[offset:offset+size]
<SYSTEM_TASK:> Parses the delay imports directory. <END_TASK> <USER_TASK:> Description: def _parseDelayImportDirectory(self, rva, size, magic = consts.PE32): """ Parses the delay imports directory. @type rva: int @param rva: The RVA where the delay imports directory starts. @type size: int @param size: The size of the delay imports directory. @type magic: int @param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}. @rtype: str @return: The delay imports directory data. """
return self.getDataAtRva(rva, size)
<SYSTEM_TASK:> Parses the bound import directory. <END_TASK> <USER_TASK:> Description: def _parseBoundImportDirectory(self, rva, size, magic = consts.PE32): """ Parses the bound import directory. @type rva: int @param rva: The RVA where the bound import directory starts. @type size: int @param size: The size of the bound import directory. @type magic: int @param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}. @rtype: L{ImageBoundImportDescriptor} @return: A new L{ImageBoundImportDescriptor} object. """
data = self.getDataAtRva(rva, size) rd = utils.ReadData(data) boundImportDirectory = directories.ImageBoundImportDescriptor.parse(rd) # parse the name of every bounded import. for i in range(len(boundImportDirectory) - 1): if hasattr(boundImportDirectory[i], "forwarderRefsList"): if boundImportDirectory[i].forwarderRefsList: for forwarderRefEntry in boundImportDirectory[i].forwarderRefsList: offset = forwarderRefEntry.offsetModuleName.value forwarderRefEntry.moduleName = self.readStringAtRva(offset + rva) offset = boundImportDirectory[i].offsetModuleName.value boundImportDirectory[i].moduleName = self.readStringAtRva(offset + rva) return boundImportDirectory