_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
31
13.1k
language
stringclasses
1 value
meta_information
dict
q3100
header
train
def header(headers, width=WIDTH, align=ALIGN, style=STYLE, add_hr=True): """Returns a formatted row of column header strings Parameters ---------- headers : list of strings A list of n strings, the column headers width : int The width of each column (Default: 11) style : string or tuple, optional A formatting style (see STYLES) Returns ------- headerstr : string
python
{ "resource": "" }
q3101
row
train
def row(values, width=WIDTH, format_spec=FMT, align=ALIGN, style=STYLE): """Returns a formatted row of data Parameters ---------- values : array_like An iterable array of data (numbers or strings), each value is printed in a separate column width : int The width of each column (Default: 11) format_spec : string The precision format string used to format numbers in the values array (Default: '5g') align : string The alignment to use ('left', 'center', or 'right'). (Default: 'right') style : namedtuple, optional A line formatting style Returns ------- rowstr : string A string consisting of the full row of data to
python
{ "resource": "" }
q3102
top
train
def top(n, width=WIDTH, style=STYLE): """Prints the top row of a table"""
python
{ "resource": "" }
q3103
banner
train
def banner(message, width=30, style='banner', out=sys.stdout): """Prints a banner message Parameters ---------- message : string The message to print in the banner width : int The minimum width of the banner (Default: 30) style : string A line formatting style (Default: 'banner') out :
python
{ "resource": "" }
q3104
dataframe
train
def dataframe(df, **kwargs): """Print table with data from the given pandas DataFrame Parameters ---------- df : DataFrame
python
{ "resource": "" }
q3105
MacrosPlugin.on_config
train
def on_config(self, config): "Fetch the variables and functions" #print("Here is the config:", config) # fetch variables from YAML file: self._variables = config.get(YAML_SUBSET)
python
{ "resource": "" }
q3106
MacrosPlugin.on_page_markdown
train
def on_page_markdown(self, markdown, page, config, site_navigation=None, **kwargs): "Provide a hook for defining functions from an external module" # the site_navigation argument has been made optional # (deleted in post 1.0 mkdocs, but maintained here # for backward compatibility) if not self.variables: return markdown else:
python
{ "resource": "" }
q3107
load_variables
train
def load_variables(variables, config): """ Add the template functions, via the python module located in the same directory as the Yaml config file. The python module must contain the following hook: declare_variables(variables, macro): variables['a'] = 5 @macro def bar(x): .... @macro def baz(x): .... """ def macro(v, name=''): """ Registers a variable as a macro in the template, i.e. in the variables dictionary: macro(myfunc) Optionally, you can assign a different name: macro(myfunc, 'funcname') You can also use it as a decorator: @macro def foo(a): return a ** 2 More info: https://stackoverflow.com/questions/6036082/call-a-python-function-from-jinja2 """ name = name or v.__name__ variables[name] = v return v # determine the package name, from the filename: python_module = config.get('python_module') or DEFAULT_MODULE_NAME
python
{ "resource": "" }
q3108
b64decode
train
def b64decode(s, altchars=None, validate=False): """Decode bytes encoded with the standard Base64 alphabet. Argument ``s`` is a :term:`bytes-like object` or ASCII string to decode. Optional ``altchars`` must be a :term:`bytes-like object` or ASCII string of length 2 which specifies the alternative alphabet used instead of the '+' and '/' characters. If ``validate`` is ``False`` (the default), characters that are neither in the normal base-64 alphabet nor the alternative alphabet are discarded prior to the padding check. If ``validate`` is ``True``, these non-alphabet characters in the input result in a :exc:`binascii.Error`. The result is returned as a :class:`bytes` object. A :exc:`binascii.Error` is raised if ``s`` is incorrectly padded. """ if version_info < (3, 0) or validate: if validate and len(s) % 4 != 0: raise BinAsciiError('Incorrect padding') s = _get_bytes(s) if altchars is not None: altchars = _get_bytes(altchars) assert len(altchars) == 2, repr(altchars) if version_info < (3, 0): map = maketrans(altchars,
python
{ "resource": "" }
q3109
b64encode
train
def b64encode(s, altchars=None): """Encode bytes using the standard Base64 alphabet. Argument ``s`` is a :term:`bytes-like object` to encode. Optional ``altchars`` must be a byte string of length 2 which specifies an alternative alphabet for the '+' and '/' characters. This allows an application to e.g. generate url or filesystem safe Base64 strings. The result is returned as a :class:`bytes` object. """ if altchars is not None:
python
{ "resource": "" }
q3110
get_song_urls
train
def get_song_urls(song_input): """ Gather all urls, titles for a search query from youtube """ YOUTUBECLASS = 'spf-prefetch' html = requests.get("https://www.youtube.com/results", params={'search_query': song_input}) soup = BeautifulSoup(html.text, 'html.parser')
python
{ "resource": "" }
q3111
download_song
train
def download_song(song_url, song_title): """ Download a song using youtube url and song title """ outtmpl = song_title + '.%(ext)s' ydl_opts = { 'format': 'bestaudio/best', 'outtmpl': outtmpl, 'postprocessors': [ {'key': 'FFmpegExtractAudio','preferredcodec': 'mp3',
python
{ "resource": "" }
q3112
add_album_art
train
def add_album_art(file_name, album_art): """ Add album_art in .mp3's tags """ img = requests.get(album_art, stream=True) # Gets album art from url img = img.raw audio = EasyMP3(file_name, ID3=ID3) try: audio.add_tags() except _util.error: pass audio.tags.add( APIC( encoding=3, # UTF-8 mime='image/png',
python
{ "resource": "" }
q3113
add_metadata
train
def add_metadata(file_name, title, artist, album): """ As the method name suggests """ tags = EasyMP3(file_name) if title: tags["title"] = title if artist:
python
{ "resource": "" }
q3114
revert_metadata
train
def revert_metadata(files): """ Removes all tags from a mp3 file """ for file_path in files:
python
{ "resource": "" }
q3115
UserVaultManager.get_user_vault_instance_or_none
train
def get_user_vault_instance_or_none(self, user): """Returns a vault_id string or None""" qset = self.filter(user=user) if not qset: return None
python
{ "resource": "" }
q3116
UserVaultManager.charge
train
def charge(self, user, vault_id=None): """If vault_id is not passed this will assume that there is only one instane of user and vault_id in the
python
{ "resource": "" }
q3117
UserCCDetailsForm.save
train
def save(self, prepend_vault_id=''): """ Adds or updates a users CC to the vault. @prepend_vault_id: any string to prepend all vault id's with in case the same braintree account is used by multiple projects/apps. """ assert self.is_valid() cc_details_map = { # cc details 'number': self.cleaned_data['cc_number'], 'cardholder_name': self.cleaned_data['name'], 'expiration_date': '%s/%s' %\ (self.cleaned_data['expiration_month'], self.cleaned_data['expiration_year']), 'cvv': self.cleaned_data['cvv'], 'billing_address': { 'postal_code': self.cleaned_data['zip_code'], } } if self.__user_vault: try: # get customer info, its credit card and then update that credit card response = Customer.find(self.__user_vault.vault_id) cc_info = response.credit_cards[0] return CreditCard.update(cc_info.token, params=cc_details_map) except Exception, e: logging.error('Was not able to get customer from vault. %s' % e)
python
{ "resource": "" }
q3118
Events.resolve_incident
train
def resolve_incident(self, incident_key, description=None, details=None): """ Causes the referenced incident to enter resolved state. Send a resolve event when the problem that caused the initial trigger has been fixed.
python
{ "resource": "" }
q3119
clean_response
train
def clean_response(response): '''Recurse through dictionary and replace any keys "self" with "self_"''' if type(response) is list: for elem in response: clean_response(elem) elif type(response) is dict: for key, val in response.items(): if key ==
python
{ "resource": "" }
q3120
PagerDuty.acknowledge_incident
train
def acknowledge_incident(self, service_key, incident_key, description=None, details=None): """ Causes the referenced incident to enter the acknowledged state. Send an acknowledge event when someone is presently working on the incident.
python
{ "resource": "" }
q3121
PagerDuty.trigger_incident
train
def trigger_incident(self, service_key, description, incident_key=None, details=None, client=None, client_url=None, contexts=None): """ Report a new or ongoing problem. When PagerDuty receives a trigger, it will either open a new incident, or add a new log entry to an existing incident.
python
{ "resource": "" }
q3122
try_fix_dataset
train
def try_fix_dataset(dataset): """Transpose the image data if it's in PIL format.""" if isinstance(dataset, numpy.ndarray): if len(dataset.shape) == 3: # NumPy 3D if dataset.shape[-1] == 3: return dataset.transpose((2, 0, 1)) elif len(dataset.shape) == 4: # NumPy 4D if dataset.shape[-1] == 3: return dataset.transpose((0, 3, 1, 2)) # Otherwise couldn't fix it. return dataset # List of
python
{ "resource": "" }
q3123
get_image
train
def get_image(dataset): """Convert the NumPy array to two nested lists with r,g,b tuples.""" dim, nrow, ncol = dataset.shape uint8_dataset = dataset.astype('uint8') if not (uint8_dataset == dataset).all(): message = ( "\nYour image was cast to a `uint8` (`<img>.astype(uint8)`), " "but some information was lost.\nPlease check your gif and " "convert to uint8 beforehand if the gif looks wrong." ) warnings.warn(message) image = [[
python
{ "resource": "" }
q3124
get_colors
train
def get_colors(image): """Return a Counter containing each color and how often it appears. """ colors = Counter(pixel for row in image for pixel in row) if len(colors) > 256: msg = ( "The maximum number of distinct colors in a GIF is
python
{ "resource": "" }
q3125
_get_global_color_table
train
def _get_global_color_table(colors): """Return a color table sorted in descending order of count. """ global_color_table = b''.join(c[0] for c in colors.most_common()) full_table_size = 2**(1+int(get_color_table_size(len(colors)), 2)) repeats = 3 *
python
{ "resource": "" }
q3126
_get_image_data
train
def _get_image_data(image, colors): """Performs the LZW compression as described by Matthew Flickinger. This isn't fast, but it works. http://www.matthewflickinger.com/lab/whatsinagif/lzw_image_data.asp """ lzw_code_size, coded_bits = _lzw_encode(image, colors) coded_bytes = ''.join( '{{:0{}b}}'.format(nbits).format(val) for val, nbits in coded_bits) coded_bytes = '0' * ((8 - len(coded_bytes)) % 8) + coded_bytes coded_data = list( reversed([ int(coded_bytes[8*i:8*(i+1)], 2) for i in range(len(coded_bytes) // 8) ]) ) output = [struct.pack('<B',
python
{ "resource": "" }
q3127
write_gif
train
def write_gif(dataset, filename, fps=10): """Write a NumPy array to GIF 89a format. Or write a list of NumPy arrays to an animation (GIF 89a format). - Positional arguments:: :param dataset: A NumPy arrayor list of arrays with shape rgb x rows x cols and integer values in [0, 255]. :param filename: The output file that will contain the GIF image. :param fps: The (integer) frames/second of the animation (default 10). :type dataset: a NumPy array or list of NumPy arrays. :return: None - Example: a minimal array, with one red pixel, would look like this:: import numpy as np one_red_pixel = np.array([[[255]], [[0]], [[0]]]) write_gif(one_red_pixel, 'red_pixel.gif') ..raises:: ValueError """ try:
python
{ "resource": "" }
q3128
good_sequences_to_track
train
def good_sequences_to_track(flow, motion_threshold=1.0): """Get list of good frames to do tracking in. Looking at the optical flow, this function chooses a span of frames that fulfill certain criteria. These include * not being too short or too long * not too low or too high mean flow magnitude * a low max value (avoids motion blur) Currently, the cost function for a sequence is hard coded. Sorry about that. Parameters ------------- flow : ndarray The optical flow magnitude motion_threshold : float The maximum amount of motion to consider for sequence endpoints. Returns ------------ sequences : list Sorted list of (a, b, score) elements (highest scpre first) of sequences where a sequence is frames with frame indices in the span [a, b]. """ endpoints = [] in_low = False for i, val in enumerate(flow): if val < motion_threshold: if not in_low: endpoints.append(i) in_low = True else: if in_low: endpoints.append(i-1) # Previous was last in a low spot in_low = False def mean_score_func(m): mu = 15 sigma = 8 top_val = normpdf(mu, mu, sigma) return normpdf(m, mu, sigma) / top_val def max_score_func(m): mu = 40 sigma = 8 if m <= mu: return 1. else: top_val = normpdf(mu, mu, sigma) return normpdf(m, mu, sigma) / top_val def length_score_func(l): mu = 30
python
{ "resource": "" }
q3129
AutoCalibrator.initialize
train
def initialize(self, gyro_rate, slices=None, skip_estimation=False): """Prepare calibrator for calibration This method does three things: 1. Create slices from the video stream, if not already provided 2. Estimate time offset 3. Estimate rotation between camera and gyroscope Parameters ------------------ gyro_rate : float Estimated gyroscope sample rate slices : list of Slice, optional Slices to use for optimization skip_estimation : bool Do not estimate initial time offset and rotation. Raises -------------------- InitializationError If the initialization fails """ self.params['user']['gyro_rate'] = gyro_rate for p in ('gbias_x', 'gbias_y', 'gbias_z'): self.params['initialized'][p] = 0.0 if slices is not None: self.slices = slices if self.slices is None: self.slices = videoslice.Slice.from_stream_randomly(self.video) logger.debug("Number of slices: {:d}".format(len(self.slices))) if len(self.slices) < 2:
python
{ "resource": "" }
q3130
AutoCalibrator.video_time_to_gyro_sample
train
def video_time_to_gyro_sample(self, t): """Convert video time to gyroscope sample index and interpolation factor Parameters ------------------- t : float Video timestamp Returns --------------------
python
{ "resource": "" }
q3131
AutoCalibrator.parameter
train
def parameter(self): """Return the current best value of a parameter""" D = {}
python
{ "resource": "" }
q3132
AutoCalibrator.print_params
train
def print_params(self): """Print the current best set of parameters""" print("Parameters") print("--------------------") for
python
{ "resource": "" }
q3133
AtanCameraModel.from_hdf
train
def from_hdf(cls, filename): """Load camera model params from a HDF5 file The HDF5 file should contain the following datasets: wc : (2,) float with distortion center lgamma : float distortion parameter readout : float readout value size : (2,) int image size fps : float frame rate K : (3, 3) float camera matrix
python
{ "resource": "" }
q3134
AtanCameraModel.invert
train
def invert(self, points): """Invert the distortion Parameters ------------------ points : ndarray Input image points Returns ----------------- ndarray Undistorted points """ X = points if not points.ndim == 1 else points.reshape((points.size, 1))
python
{ "resource": "" }
q3135
Kinect.purge_bad_timestamp_files
train
def purge_bad_timestamp_files(file_list): "Given a list of image files, find bad frames, remove them and modify file_list" MAX_INITIAL_BAD_FRAMES = 15 bad_ts = Kinect.detect_bad_timestamps(Kinect.timestamps_from_file_list(file_list)) # Trivial case if not bad_ts: return file_list
python
{ "resource": "" }
q3136
Kinect.depth_file_for_nir_file
train
def depth_file_for_nir_file(video_filename, depth_file_list): """Returns the corresponding depth filename given a NIR filename""" (root, filename) = os.path.split(video_filename) needle_ts = int(filename.split('-')[2].split('.')[0]) haystack_ts_list = np.array(Kinect.timestamps_from_file_list(depth_file_list))
python
{ "resource": "" }
q3137
Kinect.find_nir_file_with_missing_depth
train
def find_nir_file_with_missing_depth(video_file_list, depth_file_list): "Remove all files without its own counterpart. Returns new lists of files" new_video_list = [] new_depth_list = [] for fname in video_file_list: try: depth_file = Kinect.depth_file_for_nir_file(fname, depth_file_list) new_video_list.append(fname) new_depth_list.append(depth_file) except IndexError: # Missing file
python
{ "resource": "" }
q3138
fill_sampling
train
def fill_sampling(slice_list, N): """Given a list of slices, draw N samples such that each slice contributes as much as possible Parameters -------------------------- slice_list : list of Slice List of slices N : int Number of samples to draw """ A = [len(s.inliers) for s in slice_list] N_max = np.sum(A) if N > N_max: raise ValueError("Tried to draw {:d} samples from a pool of only {:d} items".format(N, N_max)) samples_from = np.zeros((len(A),), dtype='int') # Number of samples to draw from each group remaining = N while remaining > 0: remaining_groups = np.flatnonzero(samples_from - np.array(A)) if remaining < len(remaining_groups): np.random.shuffle(remaining_groups) for g in remaining_groups[:remaining]: samples_from[g] += 1 else: # Give each group the allowed number of samples. Constrain to their max size. to_each = max(1, int(remaining / len(remaining_groups))) samples_from = np.min(np.vstack((samples_from + to_each, A)), axis=0)
python
{ "resource": "" }
q3139
Slice.estimate_rotation
train
def estimate_rotation(self, camera, ransac_threshold=7.0): """Estimate the rotation between first and last frame It uses RANSAC where the error metric is the reprojection error of the points from the last frame to the first frame. Parameters ----------------- camera : CameraModel Camera model ransac_threshold : float Distance threshold (in pixels) for a reprojected point to count as an inlier """ if self.axis is None: x = self.points[:, 0, :].T y = self.points[:, -1, :].T inlier_ratio = 0.5 R, t, dist, idx = rotations.estimate_rotation_procrustes_ransac(x, y, camera, ransac_threshold, inlier_ratio=inlier_ratio,
python
{ "resource": "" }
q3140
Slice.from_stream_randomly
train
def from_stream_randomly(video_stream, step_bounds=(5, 15), length_bounds=(2, 15), max_start=None, min_distance=10, min_slice_points=10): """Create slices from a video stream using random sampling Parameters ----------------- video_stream : VideoStream A video stream step_bounds : tuple Range bounds (inclusive) of possible step lengths length_bounds : tuple Range bounds (inclusive) of possible slice lengths max_start : int Maximum frame number to start from min_distance : float Minimum (initial) distance between tracked points min_slice_points : int Minimum number of points to keep a slice Returns ------------------- list of Slice List of slices """ new_step = lambda: int(np.random.uniform(low=step_bounds[0], high=step_bounds[1])) new_length = lambda: int(np.random.uniform(low=length_bounds[0], high=length_bounds[1]))
python
{ "resource": "" }
q3141
estimate_rotation_procrustes_ransac
train
def estimate_rotation_procrustes_ransac(x, y, camera, threshold, inlier_ratio=0.75, do_translation=False): """Calculate rotation between two sets of image coordinates using ransac. Inlier criteria is the reprojection error of y into image 1. Parameters ------------------------- x : array 2xN image coordinates in image 1 y : array 2xN image coordinates in image 2 camera : Camera model threshold : float pixel distance threshold to accept as inlier do_translation : bool Try to estimate the translation as well Returns ------------------------ R : array 3x3 The rotation that best fulfills X = RY t : array 3x1 translation if do_translation is False residual : array pixel distances ||x - xhat|| where xhat ~ KRY (and lens distorsion) inliers : array Indices of the points (in X and Y) that are RANSAC inliers """ assert x.shape == y.shape assert x.shape[0] == 2 X = camera.unproject(x) Y = camera.unproject(y) data = np.vstack((X, Y, x)) assert data.shape[0] == 8 model_func = lambda data: procrustes(data[:3], data[3:6], remove_mean=do_translation) def eval_func(model, data): Y = data[3:6].reshape(3,-1) x = data[6:].reshape(2,-1) R, t = model
python
{ "resource": "" }
q3142
RANSAC
train
def RANSAC(model_func, eval_func, data, num_points, num_iter, threshold, recalculate=False): """Apply RANSAC. This RANSAC implementation will choose the best model based on the number of points in the consensus set. At evaluation time the model is created using num_points points. Then it will be recalculated using the points in the consensus set. Parameters ------------ model_func: Takes a data parameter of size DxK where K is the number of points needed to construct the model and returns the model (Mx1 vector) eval_func: Takes a model parameter (Lx1) and one or more data points (DxC, C>=1) and calculates the score of the point(s) relative to the selected model
python
{ "resource": "" }
q3143
IMU.integrate
train
def integrate(self, pose_correction=np.eye(3), uniform=True): """Integrate angular velocity measurements to rotations. Parameters ------------- pose_correction : (3,3) ndarray, optional Rotation matrix that describes the relative pose between the IMU and something else (e.g. camera). uniform : bool If True (default), assume uniform sample rate. This will use a faster integration method. Returns ------------- rotations : (4, N) ndarray Rotations as unit quaternions with scalar as first element. """ if uniform: dt = float(self.timestamps[1]-self.timestamps[0]) # Must be python float for fastintegrate to work return fastintegrate.integrate_gyro_quaternion_uniform(self.gyro_data_corrected, dt) else: N = len(self.timestamps) integrated = np.zeros((4, N)) integrated[:,0] = np.array([1, 0, 0, 0]) # Initial rotation (no rotation) # Iterate over all for i in range(1, len(self.timestamps)): w = pose_correction.dot(self.gyro_data[:, i]) # Change
python
{ "resource": "" }
q3144
GyroStream.from_csv
train
def from_csv(cls, filename): """Create gyro stream from CSV data Load data from a CSV file. The data must be formatted with three values per line: (x, y, z) where x, y, z is the measured angular velocity (in radians) of the specified
python
{ "resource": "" }
q3145
GyroStream.from_data
train
def from_data(cls, data): """Create gyroscope stream from data array Parameters ------------------- data : (N, 3) ndarray Data array of angular velocities (rad/s)
python
{ "resource": "" }
q3146
GyroStream.integrate
train
def integrate(self, dt): """Integrate gyro measurements to orientation using a uniform sample rate. Parameters ------------------- dt : float Sample distance in seconds Returns
python
{ "resource": "" }
q3147
gaussian_kernel
train
def gaussian_kernel(gstd): """Generate odd sized truncated Gaussian The generated filter kernel has a cutoff at $3\sigma$ and is normalized to sum to 1 Parameters ------------- gstd : float Standard deviation of filter Returns ------------- g : ndarray
python
{ "resource": "" }
q3148
subsample
train
def subsample(time_series, downsample_factor): """Subsample with Gaussian prefilter The prefilter will have the filter size $\sigma_g=.5*ssfactor$ Parameters -------------- time_series : ndarray Input signal downsample_factor : float Downsampling factor Returns -------------- ts_out : ndarray The downsampled signal """ Ns = np.int(np.floor(np.size(time_series)/downsample_factor)) g = gaussian_kernel(0.5*downsample_factor) ts_blur
python
{ "resource": "" }
q3149
upsample
train
def upsample(time_series, scaling_factor): """Upsample using linear interpolation The function uses replication of the value at edges Parameters -------------- time_series : ndarray Input signal scaling_factor : float The factor to upsample with Returns -------------- ts_out : ndarray The upsampled signal """ Ns0 = np.size(time_series) Ns = np.int(np.floor(np.size(time_series)*scaling_factor)) ts_out = np.zeros((Ns,1), dtype='float64') for k in
python
{ "resource": "" }
q3150
find_shift_pyr
train
def find_shift_pyr(ts1,ts2,nlevels): """ Find shift that best aligns two time series The shift that aligns the timeseries ts1 with ts2. This is sought using zero mean normalized cross correlation (ZNCC) in a coarse to fine search with an octave pyramid on nlevels levels. Parameters ---------------- ts1 : list_like The first timeseries ts2 : list_like The seconds timeseries nlevels : int Number of levels in pyramid Returns ---------------- ts1_shift : float How many samples to shift ts1 to align with ts2
python
{ "resource": "" }
q3151
to_rot_matrix
train
def to_rot_matrix(r): "Convert combined axis angle vector to rotation matrix" theta = np.linalg.norm(r) v = r/theta
python
{ "resource": "" }
q3152
add_pass_thru
train
def add_pass_thru(pass_thrus): """ Decorator adds explicit pass-through visit and depart methods """ def meth(self, node): pass def dec(cls): for element_name in pass_thrus: for meth_prefix in ('visit_', 'depart_'):
python
{ "resource": "" }
q3153
IndentLevel.write
train
def write(self): """ Add ``self.contents`` with current ``prefix`` and ``first_prefix`` Add processed ``self.contents`` to ``self.base``. The first line has ``first_prefix`` prepended, further lines have ``prefix`` prepended. Empty (all whitespace) lines get written as bare carriage returns, to avoid ugly extra whitespace. """ string = ''.join(self.content) lines = string.splitlines(True) if len(lines) == 0: return
python
{ "resource": "" }
q3154
Wallet.identifier
train
def identifier(self): """Get the identifier for this node. Extended keys can be identified by the Hash160 (RIPEMD160 after SHA256) of the public key's `key`. This corresponds exactly to the data used in traditional Bitcoin addresses. It is not advised to represent this data in base58 format though, as it may be interpreted as an address that
python
{ "resource": "" }
q3155
Wallet.get_child
train
def get_child(self, child_number, is_prime=None, as_private=True): """Derive a child key. :param child_number: The number of the child key to compute :type child_number: int :param is_prime: If True, the child is calculated via private derivation. If False, then public derivation is used. If None, then it is figured out from the value of child_number. :type is_prime: bool, defaults to None :param as_private: If True, strips private key from the result. Defaults to False. If there is no private key present, this is ignored. :type as_private: bool Positive child_numbers (>= 0, < 2,147,483,648) produce publicly derived children. (prime=False) Negative numbers (> -2,147,483,648, < 0) use private derivation. (prime=True) NOTE: Python can't do -0, so if you want the privately derived 0th child you need to manually set is_prime=True. NOTE: negative numbered children are provided as a convenience because nobody wants to remember the above numbers. Negative numbers are considered 'prime children', which is described in the BIP32 spec as a leading 1 in a 32 bit unsigned int. This derivation is fully described at https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#child-key-derivation-functions # nopep8 """ boundary = 0x80000000 # Note: If this boundary check gets removed, then children above # the boundary should use private (prime) derivation. if abs(child_number) >= boundary: raise ValueError("Invalid child number %s" % child_number) # If is_prime isn't set, then we can infer it from the child_number if is_prime is None: # Prime children are either < 0 or > 0x80000000 if child_number < 0: child_number = abs(child_number) is_prime = True else: is_prime =
python
{ "resource": "" }
q3156
Wallet.crack_private_key
train
def crack_private_key(self, child_private_key): """Crack the parent private key given a child private key. BIP32 has a vulnerability/feature that allows you to recover the master private key if you're given a master public key and any of its publicly-derived child private keys. This is a pretty serious security vulnerability that looks as innocuous as this: >>> w = Wallet.new_random_wallet() >>> child = w.get_child(0, is_prime=False) >>> w_pub = w.public_copy() >>> assert w_pub.private_key is None >>> master_public_key = w_pub.serialize_b58(private=False) >>> # Now you put master_public_key on your website >>> # and give somebody a private key >>> public_master = Wallet.deserialize(master_public_key) >>> cracked_private_master = public_master.crack_private_key(child) >>> assert w == cracked_private_master # :( Implementation details from http://bitcoinmagazine.com/8396/deterministic-wallets-advantages-flaw/ # nopep8 """ if self.private_key: raise AssertionError("You already know the private key") if child_private_key.parent_fingerprint != self.fingerprint: raise ValueError("This is not a valid child") if child_private_key.child_number >= 0x80000000: raise ValueError( "Cannot crack private keys from private derivation") # Duplicate the public child derivation child_number_hex =
python
{ "resource": "" }
q3157
_pypi_push
train
def _pypi_push(dist): """Push created package to PyPI. Requires the following defined environment variables: - TWINE_USERNAME: The PyPI username to upload this package under - TWINE_PASSWORD: The password to the user's account Args: dist (str): The distribution to push. Must be a valid directory; shell globs are NOT allowed. """ # Register all distributions and wheels with PyPI. We have to list the dist # directory and register each file individually because `twine` doesn't # handle globs. for filename in os.listdir(dist): full_path =
python
{ "resource": "" }
q3158
deploy
train
def deploy(target): """Deploys the package and documentation. Proceeds in the following steps: 1. Ensures proper environment variables are set and checks that we are on Circle CI 2. Tags the repository with the new version 3. Creates a standard distribution and a wheel 4. Updates version.py to have the proper version 5. Commits the ChangeLog, AUTHORS, and version.py file 6. Pushes to PyPI 7. Pushes the tags and newly committed files Raises: `EnvironmentError`: - Not running on CircleCI - `*_PYPI_USERNAME` and/or `*_PYPI_PASSWORD` environment variables are missing - Attempting to deploy to production from a branch that isn't master """ # Ensure proper environment if not os.getenv(CIRCLECI_ENV_VAR): # pragma: no cover raise EnvironmentError('Must be on CircleCI to run this script') current_branch = os.getenv('CIRCLE_BRANCH') if (target == 'PROD') and (current_branch != 'master'): raise EnvironmentError( f'Refusing to deploy to production from branch {current_branch!r}. ' f'Production deploys can only be made from master.') if target in ('PROD', 'TEST'): pypi_username = os.getenv(f'{target}_PYPI_USERNAME') pypi_password = os.getenv(f'{target}_PYPI_PASSWORD') else: raise ValueError(f"Deploy target must be 'PROD' or 'TEST', got {target!r}.") if not (pypi_username and pypi_password): # pragma: no cover raise EnvironmentError( f"Missing '{target}_PYPI_USERNAME' and/or '{target}_PYPI_PASSWORD' " f"environment variables. These are required to push to PyPI.") # Twine requires these environment variables to be set. Subprocesses will # inherit these when we invoke them, so no need to pass them on the command # line. We want to avoid that in case something's logging each command run. os.environ['TWINE_USERNAME'] =
python
{ "resource": "" }
q3159
_get_triplet
train
def _get_triplet(dd): """Return a triplet from a dialogue dictionary. :param dd: Dialogue dictionary. :type dd: Dict[str, str] :return: (query, response, error response) :rtype: (str, str |
python
{ "resource": "" }
q3160
_load
train
def _load(content_or_fp): """YAML Parse a file or str and check version. """ try: data = yaml.load(content_or_fp, Loader=yaml.loader.BaseLoader) except Exception as e: raise type(e)('Malformed yaml file:\n%r' % format_exc()) try: ver = data['spec'] except: raise ValueError('The file does not specify a spec version') try: ver = tuple(map(int, (ver.split(".")))) except: raise ValueError("Invalid spec version format. Expect 'X.Y'" " (X and
python
{ "resource": "" }
q3161
parse_resource
train
def parse_resource(name): """Parse a resource file """ with closing(pkg_resources.resource_stream(__name__, name)) as fp:
python
{ "resource": "" }
q3162
update_component
train
def update_component(name, comp, component_dict): """Get a component from a component dict. """ for dia in component_dict.get('dialogues', ()): try: comp.add_dialogue(*_get_pair(dia)) except Exception as e: msg = 'In device %s, malformed dialogue %s\n%r' raise Exception(msg % (name, dia, e)) for prop_name, prop_dict in component_dict.get('properties', {}).items(): try: getter = (_get_pair(prop_dict['getter']) if 'getter' in prop_dict else None) setter = (_get_triplet(prop_dict['setter'])
python
{ "resource": "" }
q3163
get_bases
train
def get_bases(definition_dict, loader): """Collect dependencies. """ bases = definition_dict.get('bases', ()) if bases: bases = (loader.get_comp_dict(required_version=SPEC_VERSION_TUPLE[0],
python
{ "resource": "" }
q3164
get_channel
train
def get_channel(device, ch_name, channel_dict, loader, resource_dict): """Get a channels from a channels dictionary. :param name: name of the device :param device_dict: device dictionary :rtype: Device """ channel_dict = get_bases(channel_dict,
python
{ "resource": "" }
q3165
get_device
train
def get_device(name, device_dict, loader, resource_dict): """Get a device from a device dictionary. :param name: name of the device :param device_dict: device dictionary :rtype: Device """ device = Device(name, device_dict.get('delimiter', ';').encode('utf-8')) device_dict = get_bases(device_dict, loader) err = device_dict.get('error', {}) device.add_error_handler(err) for itype, eom_dict in device_dict.get('eom', {}).items(): device.add_eom(itype, *_get_pair(eom_dict))
python
{ "resource": "" }
q3166
get_devices
train
def get_devices(filename, bundled): """Get a Devices object from a file. :param filename: full path of the file to parse or name of the resource. :param is_resource: boolean indicating if it is a resource. :rtype: Devices """ loader = Loader(filename, bundled) data = loader.data devices = Devices() # Iterate through the resources and generate each individual device # on demand. for resource_name, resource_dict in data.get('resources', {}).items(): device_name = resource_dict['device'] dd = loader.get_device_dict(device_name,
python
{ "resource": "" }
q3167
ChannelProperty.init_value
train
def init_value(self, string_value): """Create an empty defaultdict holding the default value. """
python
{ "resource": "" }
q3168
ChannelProperty.set_value
train
def set_value(self, string_value): """Set the current value for a channel. """
python
{ "resource": "" }
q3169
Channels.add_dialogue
train
def add_dialogue(self, query, response): """Add dialogue to channel. :param query: query string :param response: response string
python
{ "resource": "" }
q3170
Channels.add_property
train
def add_property(self, name, default_value, getter_pair, setter_triplet, specs): """Add property to channel :param name: property name :param default_value: default value as string :param getter_pair: (query, response) :param setter_triplet: (query, response, error) :param specs: specification of the Property """ self._properties[name] = ChannelProperty(self, name, default_value, specs)
python
{ "resource": "" }
q3171
Channels.match
train
def match(self, query): """Try to find a match for a query in the channel commands. """ if not self.can_select: ch_id = self._device._properties['selected_channel'].get_value() if ch_id in self._ids: self._selected = ch_id else: return response = self._match_dialog(query, self._dialogues['__default__']) if response is not None: return response response = self._match_getters(query, self._getters['__default__']) if response is not None: return response else: for ch_id in self._ids: self._selected
python
{ "resource": "" }
q3172
Channels._match_setters
train
def _match_setters(self, query): """Try to find a match """ q = query.decode('utf-8') for name, parser, response, error_response in self._setters: try: parsed = parser(q) logger.debug('Found response in setter of %s' % name) except ValueError: continue try: if isinstance(parsed, dict) and 'ch_id' in parsed: self._selected = parsed['ch_id']
python
{ "resource": "" }
q3173
Session.get_session_class
train
def get_session_class(cls, interface_type, resource_class): """Return the session class for a given interface type and resource class. :type interface_type: constants.InterfaceType :type resource_class: str :return: Session """
python
{ "resource": "" }
q3174
Session.register
train
def register(cls, interface_type, resource_class): """Register a session class for a given interface type and resource class. :type interface_type: constants.InterfaceType :type resource_class: str """ def _internal(python_class): if (interface_type, resource_class) in cls._session_classes:
python
{ "resource": "" }
q3175
to_bytes
train
def to_bytes(val): """Takes a text message and return a tuple """ if val is NoResponse: return val val
python
{ "resource": "" }
q3176
Property.validate_value
train
def validate_value(self, string_value): """Validate that a value match the Property specs. """ specs = self.specs if 'type' in specs: value = specs['type'](string_value)
python
{ "resource": "" }
q3177
Component._match_dialog
train
def _match_dialog(self, query, dialogues=None): """Tries to match in dialogues :param query: message tuple :type query: Tuple[bytes] :return: response if found or None :rtype: Tuple[bytes] | None
python
{ "resource": "" }
q3178
Component._match_getters
train
def _match_getters(self, query, getters=None): """Tries to match in getters :param query: message tuple :type query: Tuple[bytes] :return: response if found or None :rtype: Tuple[bytes] | None """ if getters is None: getters = self._getters if query in getters: name, response = getters[query]
python
{ "resource": "" }
q3179
Component._match_setters
train
def _match_setters(self, query): """Tries to match in setters :param query: message tuple :type query: Tuple[bytes] :return: response if found or None :rtype: Tuple[bytes] | None """ q = query.decode('utf-8') for name, parser, response, error_response in self._setters: try: value = parser(q) logger.debug('Found response in setter of %s' % name) except ValueError:
python
{ "resource": "" }
q3180
Device.add_error_handler
train
def add_error_handler(self, error_input): """Add error handler to the device """ if isinstance(error_input, dict): error_response = error_input.get('response', {}) cerr = error_response.get('command_error', NoResponse) qerr = error_response.get('query_error', NoResponse) response_dict = {'command_error': cerr, 'query_error': qerr} register_list = error_input.get('status_register', []) for register_dict in register_list:
python
{ "resource": "" }
q3181
Device.add_eom
train
def add_eom(self, type_class, query_termination, response_termination): """Add default end of message for a given interface type and resource class. :param type_class: interface type and resource class as strings joined by space :param query_termination: end of message used in queries. :param response_termination: end of message used in responses. """ interface_type,
python
{ "resource": "" }
q3182
Device.write
train
def write(self, data): """Write data into the device input buffer. :param data: single element byte :type data: bytes """ logger.debug('Writing into device input buffer: %r' % data) if not isinstance(data, bytes): raise TypeError('data must be an instance of bytes') if len(data) != 1: msg = 'data must have a length of 1, not %d' raise ValueError(msg % len(data)) self._input_buffer.extend(data)
python
{ "resource": "" }
q3183
Device.read
train
def read(self): """Return a single byte from the output buffer """ if self._output_buffer:
python
{ "resource": "" }
q3184
Device._match
train
def _match(self, query): """Tries to match in dialogues, getters and setters and subcomponents :param query: message tuple :type query: Tuple[bytes] :return: response if found or None :rtype: Tuple[bytes] | None """ response = self._match_dialog(query) if response is not None: return response response = self._match_getters(query) if response is not None: return response response = self._match_registers(query) if response is not None: return response
python
{ "resource": "" }
q3185
Device._match_registers
train
def _match_registers(self, query): """Tries to match in status registers :param query: message tuple :type query: Tuple[bytes] :return: response if found or None :rtype: Tuple[bytes] | None """ if query in self._status_registers: register
python
{ "resource": "" }
q3186
Device._match_errors_queues
train
def _match_errors_queues(self, query): """Tries to match in error queues :param query: message tuple :type query: Tuple[bytes] :return: response if found or None :rtype: Tuple[bytes] | None """ if query in self._error_queues:
python
{ "resource": "" }
q3187
Devices.add_device
train
def add_device(self, resource_name, device): """Bind device to resource name """ if device.resource_name is not None:
python
{ "resource": "" }
q3188
SequenceCursorPagination.get_ordering
train
def get_ordering(self, *args, **kwargs): """Take whatever the expected ordering is and then first order by QuerySet.""" result = super(SequenceCursorPagination, self).get_ordering(*args, **kwargs) # Because paginate_queryset sets self.ordering after reading it...we # need to only modify it sometimes. (This allows re-use of
python
{ "resource": "" }
q3189
SequenceCursorPagination.decode_cursor
train
def decode_cursor(self, request): """ Given a request with a cursor, return a `Cursor` instance. Differs from the standard CursorPagination to handle a tuple in the position field. """ # Determine if we have a cursor, and if so then decode it. encoded = request.query_params.get(self.cursor_query_param) if encoded is None: return None try: querystring = b64decode(encoded.encode('ascii')).decode('ascii') tokens = urlparse.parse_qs(querystring, keep_blank_values=True) offset = tokens.get('o', ['0'])[0] offset = _positive_int(offset, cutoff=self.offset_cutoff) reverse =
python
{ "resource": "" }
q3190
multiply_iterables
train
def multiply_iterables(it1, it2): """ Element-wise iterables multiplications. """ assert len(it1) == len(it2),\ "Can not
python
{ "resource": "" }
q3191
ComparatorMixin._generate_comparator
train
def _generate_comparator(cls, field_names): """ Construct a comparator function based on the field names. The comparator returns the first non-zero comparison value. Inputs: field_names (iterable of strings): The field names to sort on. Returns: A comparator function. """ # Ensure that field names is a list and not a tuple. field_names = list(field_names) # For fields that start with a '-', reverse the ordering of the # comparison. reverses = [1] * len(field_names) for i, field_name in enumerate(field_names): if field_name[0] == '-': reverses[i] = -1 field_names[i] = field_name[1:] field_names = [f.replace(LOOKUP_SEP, '.') for f in field_names] def comparator(i1, i2): # Get a tuple of values for comparison. v1 = attrgetter(*field_names)(i1) v2 = attrgetter(*field_names)(i2) # If there's only one arg supplied, attrgetter returns a single
python
{ "resource": "" }
q3192
QuerySetSequence._filter_or_exclude_querysets
train
def _filter_or_exclude_querysets(self, negate, **kwargs): """ Similar to QuerySet._filter_or_exclude, but run over the QuerySets in the QuerySetSequence instead of over each QuerySet's fields. """ # Ensure negate is a boolean. negate = bool(negate) for kwarg, value in kwargs.items(): parts = kwarg.split(LOOKUP_SEP) # Ensure this is being used to filter QuerySets. if parts[0] != '#': raise ValueError("Keyword '%s' is not a valid keyword to filter over, " "it must begin with '#'." % kwarg) # Don't allow __ multiple times. if len(parts) > 2: raise ValueError("Keyword '%s' must not contain multiple " "lookup seperators." % kwarg) # The actual lookup is the second part. try: lookup = parts[1] except IndexError: lookup = 'exact' # Math operators that all have the same logic. LOOKUP_TO_OPERATOR = { 'exact': eq, 'iexact': eq, 'gt': gt, 'gte': ge, 'lt': lt, 'lte': le, } try: operator = LOOKUP_TO_OPERATOR[lookup] # These expect integers, this matches the logic in # IntegerField.get_prep_value(). (Essentially treat the '#' # field as an IntegerField.) if value is not None: value = int(value) self._queryset_idxs = filter(lambda i: operator(i, value) != negate, self._queryset_idxs) continue except KeyError: # It wasn't one of the above operators, keep trying. pass # Some of these seem to get handled as bytes. if lookup in ('contains', 'icontains'): value = six.text_type(value) self._queryset_idxs = filter(lambda i: (value in six.text_type(i)) != negate, self._queryset_idxs)
python
{ "resource": "" }
q3193
RollingCache.decode
train
def decode(self, name, as_map_key=False): """Always returns the name""" if is_cache_key(name) and
python
{ "resource": "" }
q3194
RollingCache.encode
train
def encode(self, name, as_map_key=False): """Returns the name the first time and the key after that""" if name in self.key_to_value:
python
{ "resource": "" }
q3195
read_chunk
train
def read_chunk(stream): """Ignore whitespace outside of strings. If we hit a string, read it in its entirety. """ chunk = stream.read(1) while chunk in SKIP: chunk = stream.read(1) if chunk == "\"": chunk += stream.read(1) while not
python
{ "resource": "" }
q3196
yield_json
train
def yield_json(stream): """Uses array and object delimiter counts for balancing. """ buff = u"" arr_count = 0 obj_count = 0 while True: buff += read_chunk(stream) # If we finish parsing all objs or arrays, yield a finished JSON # entity. if buff.endswith('{'): obj_count += 1 if buff.endswith('['): arr_count += 1 if buff.endswith(']'): arr_count -= 1
python
{ "resource": "" }
q3197
Marshaler.are_stringable_keys
train
def are_stringable_keys(self, m): """Test whether the keys within a map are stringable - a simple map, that can be optimized and whose keys can be cached """ for x in m.keys():
python
{ "resource": "" }
q3198
Marshaler.marshal_top
train
def marshal_top(self, obj, cache=None): """Given a complete object that needs to be marshaled into Transit data, and optionally a cache, dispatch accordingly, and flush the data directly into the IO stream. """ if not cache: cache = RollingCache() handler = self.handlers[obj] tag = handler.tag(obj) if tag: if len(tag) == 1:
python
{ "resource": "" }
q3199
Marshaler.dispatch_map
train
def dispatch_map(self, rep, as_map_key, cache): """Used to determine and dipatch the writing of a map - a simple map with strings as keys, or a complex map, whose keys are also compound types. """ if
python
{ "resource": "" }