docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Perform a Yelp Neighborhood API Search based on a location specifier. Args: location - textual location specifier of form: "address, city, state or zip, optional country" cc - ISO 3166-1 alpha-2 country code. (Optional)
def by_location(self, location, cc=None): header, content = self._http_request(self.BASE_URL, location=location, cc=cc) return json.loads(content)
882,784
Initialize Profile object with Riminder client. Args: client: Riminder client instance <Riminder object> Returns Profile instance object.
def __init__(self, client): self.client = client self.stage = ProfileStage(self.client) self.document = ProfileDocument(self.client) self.parsing = ProfileParsing(self.client) self.scoring = ProfileScoring(self.client) self.rating = ProfileRating(self.client) self.revealing = ProfileRevealing(self.client) self.json = ProfileJson(self.client)
883,509
Retrieve the profile information associated with profile id. Args: source_id: <string> source id profile_id: <string> profile id Returns profile information
def get(self, source_id=None, profile_id=None, profile_reference=None): query_params = {} query_params["source_id"] = _validate_source_id(source_id) if profile_id: query_params["profile_id"] = _validate_profile_id(profile_id) if profile_reference: query_params["profile_reference"] = _validate_profile_reference(profile_reference) response = self.client.get('profile', query_params) return response.json()
883,513
Retrieve the interpretability information. Args: source_id: <string> source id profile_id: <string> profile id filter_id: <string> filter id Returns interpretability information
def get(self, source_id=None, profile_id=None, profile_reference=None, filter_id=None, filter_reference=None): query_params = {} query_params["source_id"] = _validate_source_id(source_id) if profile_id: query_params["profile_id"] = _validate_profile_id(profile_id) if profile_reference: query_params["profile_reference"] = _validate_profile_reference(profile_reference) if filter_id: query_params["filter_id"] = _validate_filter_id(filter_id) if filter_reference: query_params["filter_reference"] = _validate_filter_reference(filter_reference) response = self.client.get('profile/revealing', query_params) return response
883,515
md5sums a file, returning the hex digest Parameters: - f filename string
def md5sum(self, f): m = hashlib.md5() fh = open(f, 'r') while 1: chunk = fh.read(BUF_SIZE) if not chunk: break m.update(chunk) fh.close() return m.hexdigest()
883,560
streaming item iterator with low overhead duplicate file detection Parameters: - compare compare function between files (defaults to md5sum)
def iterdupes(self, compare=None, filt=None): if not compare: compare = self.md5sum seen_siz = {} ## store size -> first seen filename seen_sum = {} ## store chksum -> first seen filename size_func = lambda x: os.stat(x).st_size for (fsize, f) in self.iteritems(want_dirs=False, func=size_func, filt=filt): if fsize not in seen_siz: ## state 1: no previous size collisions seen_siz[fsize] = f continue else: if seen_siz[fsize]: ## state 2: defined key => str (initial, unscanned path) chksum = compare(seen_siz[fsize]) if chksum in seen_sum: yield (chksum, seen_siz[fsize]) else: seen_sum[chksum] = seen_siz[fsize] seen_siz[fsize] = None ## state 3: defined key => None (already scanned path, no-op) chksum = compare(f) if chksum in seen_sum: ## if it's a dupe, check if the first one was ever yielded then yield if seen_sum[chksum]: yield (chksum, seen_sum[chksum]) seen_sum[chksum] = None yield (chksum, f) else: ## if not, set the initial filename seen_sum[chksum] = f
883,562
Returns sampled action fluents for the current `state` and `timestep`. Args: state (Sequence[tf.Tensor]): The current state fluents. timestep (tf.Tensor): The current timestep. Returns: Sequence[tf.Tensor]: A tuple of action fluents.
def __call__(self, state: Sequence[tf.Tensor], timestep: tf.Tensor) -> Sequence[tf.Tensor]: action, _, _ = self._sample_actions(state) return action
883,649
Returns sampled action fluents and tensors related to the sampling. Args: state (Sequence[tf.Tensor]): A list of state fluents. Returns: Tuple[Sequence[tf.Tensor], tf.Tensor, tf.Tensor]: A tuple with action fluents, an integer tensor for the number of samples, and a boolean tensor for checking all action preconditions.
def _sample_actions(self, state: Sequence[tf.Tensor]) -> Tuple[Sequence[tf.Tensor], tf.Tensor, tf.Tensor]: default = self.compiler.compile_default_action(self.batch_size) bound_constraints = self.compiler.compile_action_bound_constraints(state) action = self._sample_action(bound_constraints, default) n, action, checking = self._check_preconditions(state, action, bound_constraints, default) return action, n, checking
883,650
Returns the dimensions of an image tensor. Args: images: 4-D Tensor of shape [batch, height, width, channels] dynamic_shape: Whether the input image has undertermined shape. If set to `True`, shape information will be retrieved at run time. Default to `False`. Returns: list of integers [batch, height, width, channels]
def _ImageDimensions(images, dynamic_shape=False): # A simple abstraction to provide names for each dimension. This abstraction # should make it simpler to switch dimensions in the future (e.g. if we ever # want to switch height and width.) if dynamic_shape: return array_ops.unpack(array_ops.shape(images)) else: return images.get_shape().as_list()
884,024
Assert that we are working with properly shaped image. Args: image: 3-D Tensor of shape [height, width, channels] require_static: If `True`, requires that all dimensions of `image` are known and non-zero. Raises: ValueError: if image.shape is not a [3] vector.
def _Check3DImage(image, require_static=True): try: image_shape = image.get_shape().with_rank(3) except ValueError: raise ValueError('\'image\' must be three-dimensional.') if require_static and not image_shape.is_fully_defined(): raise ValueError('\'image\' must be fully defined.') if any(x == 0 for x in image_shape): raise ValueError('all dims of \'image.shape\' must be > 0: %s' % image_shape)
884,025
Load configuration for the service Args: config_file: Configuration file path
def load_config(self): logger.debug('loading config file: %s', self.config_file) if os.path.exists(self.config_file): with open(self.config_file) as file_handle: return json.load(file_handle) else: logger.error('configuration file is required for eventify') logger.error('unable to load configuration for service') raise EventifyConfigError( 'Configuration is required! Missing: %s' % self.config_file )
884,042
method to query a URL with the given parameters Parameters: url -> URL to query params -> dictionary with parameter values Returns: HTTP response code, headers If an exception occurred, headers fields are None
def _query(self, url=None, params=""): if url is None: raise NoUrlError("No URL was provided.") # return values headers = {'location': None, 'title': None} headerdata = urllib.urlencode(params) try: request = urllib2.Request(url, headerdata) response = urllib2.urlopen(request) # return numeric HTTP status code unless JSONP was requested if 'jsonp' in params: status = response.read() else: status = response.getcode() info = response.info() try: headers['location'] = info['Content-Location'] except KeyError: pass try: headers['title'] = info['X-Instapaper-Title'] except KeyError: pass return (status, headers) except urllib2.HTTPError as exception: # handle API not returning JSONP response on 403 if 'jsonp' in params: return ('%s({"status":%d})' % (params['jsonp'], exception.code), headers) else: return (exception.code, headers) except IOError as exception: return (exception.code, headers)
884,468
Format an error message for missing positional arguments. Args: name: The function name. sig: The function's signature. num_params: The number of function parameters. Returns: str: A formatted error message.
def _format_parameter_error_message(name: str, sig: Signature, num_params: int) -> str: if num_params == 0: plural = 's' missing = 2 arguments = "'slack' and 'event'" else: plural = '' missing = 1 arguments = "'event'" return (f"{name}{sig} missing {missing} required positional " f"argument{plural}: {arguments}")
885,789
Propagate 'debug' wrapper into inner function calls if needed. Args: node (ast.AST): node statement to surround.
def visit_Call(self, node): if self.depth == 0: return node if self.ignore_exceptions is None: ignore_exceptions = ast.Name("None", ast.Load()) else: ignore_exceptions = ast.List(self.ignore_exceptions, ast.Load()) catch_exception_type = self.catch_exception \ if self.catch_exception else "None" catch_exception = ast.Name(catch_exception_type, ast.Load()) depth = ast.Num(self.depth - 1 if self.depth > 0 else -1) debug_node_name = ast.Name("debug", ast.Load()) call_extra_parameters = [] if IS_PYTHON_3 else [None, None] node.func = ast.Call(debug_node_name, [node.func, ignore_exceptions, catch_exception, depth], [], *call_extra_parameters) return node
886,154
Surround node statement with a try/except block to catch errors. This method is called for every node of the parsed code, and only changes statement lines. Args: node (ast.AST): node statement to surround.
def generic_visit(self, node): if (isinstance(node, ast.stmt) and not isinstance(node, ast.FunctionDef)): new_node = self.wrap_with_try(node) # handling try except statement if isinstance(node, self.ast_try_except): self.try_except_handler(node) return new_node # Run recursively on all sub nodes super(ErrorsCatchTransformer, self).generic_visit(node) return new_node # Run recursively on all sub nodes return super(ErrorsCatchTransformer, self).generic_visit(node)
886,155
Logs a message to the console, with optional level paramater Args: - msg (str): message to send to console - level (int): log level; 0 for info, 1 for error (default = 0)
def log(msg, level=0): red = '\033[91m' endc = '\033[0m' # configure the logging module cfg = { 'version': 1, 'disable_existing_loggers': False, 'formatters': { 'stdout': { 'format': '[%(levelname)s]: %(asctime)s - %(message)s', 'datefmt': '%x %X' }, 'stderr': { 'format': red + '[%(levelname)s]: %(asctime)s - %(message)s' + endc, 'datefmt': '%x %X' } }, 'handlers': { 'stdout': { 'class': 'logging.StreamHandler', 'level': 'DEBUG', 'formatter': 'stdout' }, 'stderr': { 'class': 'logging.StreamHandler', 'level': 'ERROR', 'formatter': 'stderr' } }, 'loggers': { 'info': { 'handlers': ['stdout'], 'level': 'INFO', 'propagate': True }, 'error': { 'handlers': ['stderr'], 'level': 'ERROR', 'propagate': False } } } dictConfig(cfg) lg = 'info' if level == 0 else 'error' lvl = 20 if level == 0 else 40 logger = logging.getLogger(lg) logger.log(lvl, msg)
886,969
Connect to an assembly that points to the assembly specified with the url. Args: - url (str): The url of the onshape item
def __init__(self, url): # Accept either a url OR uri if isinstance(url, Uri): self.uri = url else: self.uri = Uri(url)
886,973
Insert a part into this assembly. Args: - part (onshapepy.part.Part) A Part instance that will be inserted. Returns: - requests.Response: Onshape response data
def insert(self, part): params = {k: str(v) for k,v in part.params.items()} res=c.create_assembly_instance(self.uri.as_dict(), part.uri.as_dict(), params) return res
886,974
Creates a new instance of the Service. Args: executable_path: Path to the AndroidDriver. port: Port the service is running on. env: Environment variables. service_args: List of args to pass to the androiddriver service.
def __init__(self, executable_path: _PATH = 'default', port: Union[int, str] = 5037, env: Dict = None, service_args: Union[list, tuple] = None) -> None: self._service_args = service_args or [] super(Service, self).__init__(executable_path, port=port, env=env)
886,999
Check whether the given function is a lambda function. .. testsetup:: from proso.func import is_lambda .. testcode:: def not_lambda_fun(): return 1 lambda_fun = lambda: 1 print( is_lambda(not_lambda_fun), is_lambda(lambda_fun) ) .. testoutput:: False True Args: fun (function) Returns: bool: True if the given function is a lambda function, False otherwise
def is_lambda(fun): return isinstance(fun, type(LAMBDA)) and fun.__name__ == LAMBDA.__name__
887,074
Init a sonic visualiser environment structure based on the attributes of the main audio file Args: samplerate(int): media sample rate (Hz) nframes(int): number of samples wavpath(str): Full path to the wav file used in the current environment
def __init__(self, samplerate, nframes, wavpath): imp = minidom.getDOMImplementation() dt = imp.createDocumentType('sonic-visualiser', None, None) self.doc = doc = imp.createDocument(None,'sv', dt) root = doc.documentElement self.__dname = dict() self.data = root.appendChild(doc.createElement('data')) self.display = root.appendChild(doc.createElement('display')) window = self.display.appendChild(doc.createElement('window')) self.defwidth = 900 window.setAttribute('width', str(self.defwidth)) window.setAttribute('height', str(856)) self.selections = root.appendChild(doc.createElement('selections')) self.nbdata = 0 #self.nchannels = nchannels self.samplerate = samplerate self.nframes = nframes self.__setMainWaveModel(wavpath)
887,331
Init a sonic visualiser environment structure based the analysis of the main audio file. The audio file have to be encoded in wave Args: wavpath(str): the full path to the wavfile
def init_from_wave_file(wavpath): try: samplerate, data = SW.read(wavpath) nframes = data.shape[0] except: # scipy cannot handle 24 bit wav files # and wave cannot handle 32 bit wav files try: w = wave.open(wavpath) samplerate = w.getframerate() nframes = w.getnframes() except: raise Exception('Cannot decode wavefile ' + wavpath) return SVEnv(samplerate, nframes, wavpath)
887,332
add a continous annotation layer Args: x (float iterable): temporal indices of the dataset y (float iterable): values of the dataset Kwargs: view (<DOM Element: view>): environment view used to display the spectrogram, if set to None, a new view is created Returns: <DOM Element: view>: the view used to store the spectrogram
def add_continuous_annotations(self, x, y, colourName='Purple', colour='#c832ff', name='', view=None, vscale=None, presentationName=None): model = self.data.appendChild(self.doc.createElement('model')) imodel = self.nbdata for atname, atval in [('id', imodel + 1), ('dataset', imodel), ('name', name), ('sampleRate', self.samplerate), ('start', int(min(x) * self.samplerate)), ('end', int(max(x) * self.samplerate)), ('type', 'sparse'), ('dimensions', '2'), ('resolution', '1'), ('notifyOnAdd', 'true'), ('minimum', min(y)), ('maximum', max(y)), ('units', '') ]: model.setAttribute(atname, str(atval)) # dataset = self.data.appendChild(self.doc.createElement('dataset')) # dataset.setAttribute('id', str(imodel)) # dataset.setAttribute('dimensions', '2') # self.nbdata += 2 # datasetnode = SVDataset2D(self.doc, str(imodel), self.samplerate) # datasetnode.set_data_from_iterable(map(int, np.array(x) * self.samplerate), y) # data = dataset.appendChild(datasetnode) dataset = self.data.appendChild(SVDataset2D(self.doc, str(imodel), self.samplerate)) dataset.set_data_from_iterable(map(int, np.array(x) * self.samplerate), y) self.nbdata += 2 ###### add layers valruler = self.__add_time_ruler() vallayer = self.__add_val_layer(imodel + 1) vallayer.setAttribute('colourName', colourName) vallayer.setAttribute('colour', colour) if presentationName: vallayer.setAttribute('presentationName', presentationName) if vscale is None: vallayer.setAttribute('verticalScale', '0') vallayer.setAttribute('scaleMinimum', str(min(y))) vallayer.setAttribute('scaleMaximum', str(max(y))) else: vallayer.setAttribute('verticalScale', '0') vallayer.setAttribute('scaleMinimum', str(vscale[0])) vallayer.setAttribute('scaleMaximum', str(vscale[1])) if view is None: view = self.__add_view() self.__add_layer_reference(view, valruler) self.__add_layer_reference(view, vallayer) return view
887,335
add a labelled interval annotation layer Args: temp_idx (float iterable): The temporal indices of invervals durations (float iterable): intervals durations labels (string iterable): interval labels values (int iterable): interval numeric values, if set to None, values are set to 0 Kwargs: view (<DOM Element: view>): environment view used to display the spectrogram, if set to None, a new view is created
def add_interval_annotations(self, temp_idx, durations, labels, values=None, colourName='Purple', colour='#c832ff', name='', view=None, presentationName = None): model = self.data.appendChild(self.doc.createElement('model')) imodel = self.nbdata for atname, atval in [('id', imodel + 1), ('dataset', imodel), ('name', name), ('sampleRate', self.samplerate), ('type', 'sparse'), ('dimensions', '3'), ('subtype', 'region'), ('resolution', '1'), ('notifyOnAdd', 'true'), ('units', ''), ('valueQuantization', '0') ]: model.setAttribute(atname, str(atval)) dataset = self.data.appendChild(SVDataset3D(self.doc, str(imodel), self.samplerate)) if values is None: values = ([0] * len(temp_idx)) dataset.set_data_from_iterable(map(int, np.array(temp_idx) * self.samplerate), values, map(int, np.array(durations) * self.samplerate), labels) # dataset = self.data.appendChild(self.doc.createElement('dataset')) # dataset.setAttribute('id', str(imodel)) # dataset.setAttribute('dimensions', '3') self.nbdata+= 2 valruler = self.__add_time_ruler() vallayer = self.__add_region_layer(imodel + 1, name) vallayer.setAttribute('colourName', colourName) vallayer.setAttribute('colour', colour) if presentationName: vallayer.setAttribute('presentationName', presentationName) if view is None: view = self.__add_view() self.__add_layer_reference(view, valruler) self.__add_layer_reference(view, vallayer) # if values is None: # values = ([0] * len(temp_idx)) # for t, d, l, v in zip(temp_idx, durations, labels, values): # point = dataset.appendChild(self.doc.createElement('point')) # point.setAttribute('label', l) # point.setAttribute('frame', str(int(t * self.samplerate))) # point.setAttribute('duration', str(int(d * self.samplerate))) # point.setAttribute('value', str(v)) return view
887,336
Save the environment of a sv file to be used with soniv visualiser Args: outfname(str): full path to the file storing the environment
def save(self, outfname): f = BZ2File(outfname, 'w') self.doc.writexml(f, addindent=' ', newl='\n') f.close()
887,337
Provides the same functionality as .. py:method:: ItemManager.filter_all_reachable_leaves(), but for more filters in the same time. Args: identifier_filters: list of identifier filters language (str): language used for further filtering (some objects for different languages share the same item Returns: list: list of list of item ids
def filter_all_reachable_leaves_many(self, identifier_filters, language, forbidden_identifiers=None): for i, identifier_filter in enumerate(identifier_filters): if len(identifier_filter) == 1 and not isinstance(identifier_filter[0], list): identifier_filters[i] = [identifier_filter] item_identifiers = [ identifier[1:] if identifier.startswith('-') else identifier for identifier_filter in identifier_filters for identifier in set(flatten(identifier_filter)) ] if forbidden_identifiers is None: forbidden_identifiers = [] for identifier in forbidden_identifiers: item_identifiers.append(identifier) translated = self.translate_identifiers(item_identifiers, language) forbidden_item_ids = {translated[identifier] for identifier in forbidden_identifiers} leaves = self.get_leaves({translated[i] for i in item_identifiers}, language=language, forbidden_item_ids=forbidden_item_ids) result = [] for identifier_filter in identifier_filters: if len(identifier_filter) == 0: result.append(self.get_all_available_leaves(language=language, forbidden_item_ids=forbidden_item_ids)) continue filter_result = None filter_neg_result = set() for inner_filter in identifier_filter: inner_result = None inner_neg_result = None if len(inner_filter) == 0: raise Exception('Empty nested filters are not allowed.') for identifier in inner_filter: if inner_neg_result is not None: raise Exception('Nested filters can not contain multiple statements.') if identifier.startswith('-'): inner_neg_result = set(leaves[translated[identifier[1:]]]) else: if inner_result is None: inner_result = set() inner_result |= set(leaves[translated[identifier]]) if inner_result is not None: if filter_result is None: filter_result = inner_result else: filter_result &= inner_result if inner_neg_result is not None: filter_neg_result != inner_neg_result result.append(sorted(list(filter_result - filter_neg_result))) return result
887,461
Get a subgraph of items reachable from the given set of items through the 'child' relation. Args: item_ids (list): items which are taken as roots for the reachability language (str): if specified, filter out items which are not available in the given language Returns: dict: item id -> list of items (child items), root items are referenced by None key
def get_children_graph(self, item_ids=None, language=None, forbidden_item_ids=None): if forbidden_item_ids is None: forbidden_item_ids = set() def _children(item_ids): if item_ids is None: items = Item.objects.filter(active=True).prefetch_related('children') else: item_ids = [ii for iis in item_ids.values() for ii in iis] items = Item.objects.filter(id__in=item_ids, active=True).prefetch_related('children') return { item.id: sorted([ _item.id for _item in item.children.all() if _item.active and _item.id not in forbidden_item_ids ]) for item in items if item.id not in forbidden_item_ids } if item_ids is None: return self._reachable_graph(None, _children, language=language) else: graph = self.get_children_graph(None, language, forbidden_item_ids=forbidden_item_ids) return self._subset_graph(graph, set(item_ids) - set(forbidden_item_ids))
887,463
Get a subgraph of items reachable from the given set of items through the 'parent' relation. Args: item_ids (list): items which are taken as roots for the reachability language (str): if specified, filter out items which are not available in the given language Returns: dict: item id -> list of items (parent items), root items are referenced by None key
def get_parents_graph(self, item_ids, language=None): def _parents(item_ids): if item_ids is None: items = Item.objects.filter(active=True).prefetch_related('parents') else: item_ids = [ii for iis in item_ids.values() for ii in iis] items = Item.objects.filter(id__in=item_ids, active=True).prefetch_related('parents') return {item.id: sorted([_item.id for _item in item.parents.all()]) for item in items} return self._reachable_graph(item_ids, _parents, language=language) if item_ids is None: return self._reachable_graph(None, _parents, language=language) else: graph = self.get_parents_graph(None, language) return self._subset_graph(graph, item_ids)
887,465
Get a subgraph of items reachable from the given set of items through any relation. Args: item_ids (list): items which are taken as roots for the reachability language (str): if specified, filter out items which are not available in the given language Returns: dict: item id -> list of items (parent items), root items are referenced by None key
def get_graph(self, item_ids, language=None): def _related(item_ids): if item_ids is None: items = Item.objects.filter(active=True).prefetch_related('parents', 'children') else: item_ids = [ii for iis in item_ids.values() for ii in iis] items = Item.objects.filter(id__in=item_ids, active=True).prefetch_related('parents', 'children') return {item.id: sorted([_item.id for rel in [item.parents.all(), item.children.all()] for _item in rel]) for item in items} if item_ids is None: return self._reachable_graph(None, _related, language=language) else: graph = self.get_graph(None, language) return self._subset_graph(graph, item_ids)
887,466
Translate a list of item ids to JSON objects which reference them. Args: item_ids (list[int]): item ids language (str): language used for further filtering (some objects for different languages share the same item) is_nested (function): mapping from item ids to booleans, where the boolean value indicates whether the item is nested Returns: dict: item id -> JSON object
def translate_item_ids(self, item_ids, language, is_nested=None): if is_nested is None: def is_nested_fun(x): return True elif isinstance(is_nested, bool): def is_nested_fun(x): return is_nested else: is_nested_fun = is_nested all_item_type_ids = ItemType.objects.get_all_item_type_ids() groupped = proso.list.group_by(item_ids, by=lambda item_id: all_item_type_ids[item_id]) result = {} for item_type_id, items in groupped.items(): with timeit('translating item type {}'.format(item_type_id)): item_type = ItemType.objects.get_all_types()[item_type_id] model = ItemType.objects.get_model(item_type_id) kwargs = {'{}__in'.format(item_type['foreign_key']): items} if 'language' in item_type: kwargs[item_type['language']] = language if any([not is_nested_fun(item_id) for item_id in items]) and hasattr(model.objects, 'prepare_related'): objs = model.objects.prepare_related() elif hasattr(model.objects, 'prepare'): objs = model.objects.prepare() else: objs = model.objects for obj in objs.filter(**kwargs): item_id = getattr(obj, item_type['foreign_key']) result[item_id] = obj.to_json(nested=is_nested_fun(item_id)) return result
887,470
Get mapping of items to their reachable leaves. Leaves having inactive relations to other items are omitted. Args: item_ids (list): items which are taken as roots for the reachability language (str): if specified, filter out items which are not available in the given language Returns: dict: item id -> list of items (reachable leaves)
def get_leaves(self, item_ids=None, language=None, forbidden_item_ids=None): forbidden_item_ids = set() if forbidden_item_ids is None else set(forbidden_item_ids) children = self.get_children_graph(item_ids, language=language, forbidden_item_ids=forbidden_item_ids) counts = self.get_children_counts(active=None) if item_ids is None: # not leaves item_ids = set(children.keys()) def _get_leaves(item_id): leaves = set() def __search(item_ids): result = set(flatten([children.get(item_id, []) for item_id in item_ids])) new_leaves = {item_id for item_id in result if item_id not in children.keys()} leaves.update(new_leaves) return result - new_leaves fixed_point( is_zero=lambda to_visit: len(to_visit) == 0, minus=lambda to_visit, visited: to_visit - visited, plus=lambda visited_x, visited_y: visited_x | visited_y, f=__search, x={item_id} ) leaves = {leaf for leaf in leaves if counts[leaf] == 0} if len(leaves) > 0: return leaves if counts[item_id] == 0 and item_id not in forbidden_item_ids: return {item_id} return set() return {item_id: _get_leaves(item_id) for item_id in item_ids}
887,471
Get all leaves reachable from the given set of items. Leaves having inactive relations to other items are omitted. Args: item_ids (list): items which are taken as roots for the reachability language (str): if specified, filter out items which are not available in the given language Returns: set: leaf items which are reachable from the given set of items
def get_all_leaves(self, item_ids=None, language=None, forbidden_item_ids=None): return sorted(set(flatten(self.get_leaves(item_ids, language=language, forbidden_item_ids=forbidden_item_ids).values())))
887,472
Get all items with outcoming edges from the given subgraph, drop all their parent relations, and then add parents according to the given subgraph. Args: parent_subgraph (dict): item id -> list of parents(item ids) invisible_edges (list|set): set of (from, to) tuples specifying invisible edges
def override_parent_subgraph(self, parent_subgraph, invisible_edges=None): with transaction.atomic(): if invisible_edges is None: invisible_edges = set() children = list(parent_subgraph.keys()) all_old_relations = dict(proso.list.group_by( list(ItemRelation.objects.filter(child_id__in=children)), by=lambda relation: relation.child_id )) to_delete = set() for child_id, parents in parent_subgraph.items(): old_relations = { relation.parent_id: relation for relation in all_old_relations.get(child_id, []) } for parent_id in parents: if parent_id not in old_relations: ItemRelation.objects.create( parent_id=parent_id, child_id=child_id, visible=(child_id, parent_id) not in invisible_edges ) elif old_relations[parent_id].visible != ((child_id, parent_id) not in invisible_edges): old_relations[parent_id].visible = (child_id, parent_id) not in invisible_edges old_relations[parent_id].save() to_delete |= {old_relations[parent_id].pk for parent_id in set(old_relations.keys()) - set(parents)} ItemRelation.objects.filter(pk__in=to_delete).delete()
887,474
Generate a basic image using the auto-image endpoint of weeb.sh. This function is a coroutine. Parameters: imgtype: str - type of the generation to create, possible types are awooo, eyes, or won. face: str - only used with awooo type, defines color of face hair: str - only used with awooo type, defines color of hair/fur Return Type: image data
async def generate_image(self, imgtype, face=None, hair=None): if not isinstance(imgtype, str): raise TypeError("type of 'imgtype' must be str.") if face and not isinstance(face, str): raise TypeError("type of 'face' must be str.") if hair and not isinstance(hair, str): raise TypeError("type of 'hair' must be str.") if (face or hair) and imgtype != 'awooo': raise InvalidArguments('\'face\' and \'hair\' are arguments only available on the \'awoo\' image type') url = f'https://api.weeb.sh/auto-image/generate?type={imgtype}' + ("&face="+face if face else "")+ ("&hair="+hair if hair else "") async with aiohttp.ClientSession() as session: async with session.get(url, headers=self.__headers) as resp: if resp.status == 200: return await resp.read() else: raise Exception((await resp.json())['message'])
887,497
Generate a discord status icon below the image provided. This function is a coroutine. Parameters: status: str - a discord status, must be online, idle, dnd, or streaming avatar: str - http/s url pointing to an avatar, has to have proper headers and be a direct link to an image (Note, this url is encoded by the wrapper itself, so you don't have to worry about encoding it ;)) Return Type: image data
async def generate_status(self, status, avatar=None): if not isinstance(status, str): raise TypeError("type of 'status' must be str.") if avatar and not isinstance(avatar, str): raise TypeError("type of 'avatar' must be str.") url = f'https://api.weeb.sh/auto-image/discord-status?status={status}' + (f'&avatar={urllib.parse.quote(avatar, safe="")}' if avatar else '') async with aiohttp.ClientSession() as session: async with session.get(url, headers=self.__headers) as resp: if resp.status == 200: return await resp.read() else: raise Exception((await resp.json())['message'])
887,498
Generate a waifu insult image. This function is a coroutine. Parameters: avatar: str - http/s url pointing to an image, has to have proper headers and be a direct link to an image Return Type: image data
async def generate_waifu_insult(self, avatar): if not isinstance(avatar, str): raise TypeError("type of 'avatar' must be str.") async with aiohttp.ClientSession() as session: async with session.post("https://api.weeb.sh/auto-image/waifu-insult", headers=self.__headers, data={"avatar": avatar}) as resp: if resp.status == 200: return await resp.read() else: raise Exception((await resp.json())['message'])
887,499
Generate a license. This function is a coroutine. Parameters: title: str - title of the license avatar: str - http/s url pointing to an image, has to have proper headers and be a direct link to an image badges: list - list of 1-3 direct image urls. Same requirements as avatar (optional) widgets: list - list of 1-3 strings to fill the three boxes with (optional) Return Type: image data
async def generate_license(self, title, avatar, badges=None, widgets=None): if not isinstance(title, str): raise TypeError("type of 'title' must be str.") if not isinstance(avatar, str): raise TypeError("type of 'avatar' must be str.") if badges and not isinstance(badges, list): raise TypeError("type of 'badges' must be list.") if widgets and not isinstance(widgets, list): raise TypeError("type of 'widgets' must be list.") data = {"title": title, "avatar": avatar} if badges and len(badges) <= 3: data['badges'] = badges if widgets and len(widgets) <= 3: data['widgets'] = widgets async with aiohttp.ClientSession() as session: async with session.post("https://api.weeb.sh/auto-image/license", headers=self.__headers, data=data) as resp: if resp.status == 200: return await resp.read() else: raise Exception((await resp.json())['message'])
887,500
Generate a love ship. This function is a coroutine. Parameters: target_one: str - http/s url pointing to an image, has to have proper headers and be a direct link to an image, image will be on the left side. target_two: str - http/s url pointing to an image, has to have proper headers and be a direct link to an image, image will be on the left side. Return Type: image data
async def generate_love_ship(self, target_one, target_two): if not isinstance(target_one, str): raise TypeError("type of 'target_one' must be str.") if not isinstance(target_two, str): raise TypeError("type of 'target_two' must be str.") data = {"targetOne": target_one, "targetTwo": target_two} async with aiohttp.ClientSession() as session: async with session.post("https://api.weeb.sh/auto-image/love-ship", headers=self.__headers, data=data) as resp: if resp.status == 200: return await resp.read() else: raise Exception((await resp.json())['message'])
887,501
Sync a local build from $ANDROID_PRODUCT_OUT to the device (default all). Args: option: 'system', 'vendor', 'oem', 'data', 'all'
def sync(self, option: str = 'all') -> None: if option in ['system', 'vendor', 'oem', 'data', 'all']: self._execute('-s', self.device_sn, 'sync', option) else: raise ValueError(f'There is no option named: {option!r}.')
887,788
List but don't copy. Args: option: 'system', 'vendor', 'oem', 'data', 'all'
def sync_l(self, option: str = 'all') -> None: if option in ['system', 'vendor', 'oem', 'data', 'all']: self._execute('-s', self.device_sn, 'sync', '-l', option) else: raise ValueError('There is no option named: {!r}.'.format(option))
887,789
Push package to the device and install it. Args: option: -l: forward lock application -r: replace existing application -t: allow test packages -s: install application on sdcard -d: allow version code downgrade (debuggable packages only) -g: grant all runtime permissions
def install(self, package: str, option: str = '-r') -> None: if not os.path.isfile(package): raise FileNotFoundError(f'{package!r} does not exist.') for i in option: if i not in '-lrtsdg': raise ValueError(f'There is no option named: {option!r}.') self._execute('-s', self.device_sn, 'install', option, package)
887,790
Trim memory. Args: level: HIDDEN | RUNNING_MODERATE | BACKGROUNDRUNNING_LOW | \ MODERATE | RUNNING_CRITICAL | COMPLETE
def app_trim_memory(self, pid: int or str, level: str = 'RUNNING_LOW') -> None: _, error = self._execute('-s', self.device_sn, 'shell', 'am', 'send-trim-memory', str(pid), level) if error and error.startswith('Error'): raise ApplicationsException(error.split(':', 1)[-1].strip())
887,802
Recording the display of devices running Android 4.4 (API level 19) and higher. Args: bit_rate:You can increase the bit rate to improve video quality, but doing so results in larger movie files. time_limit: Sets the maximum recording time, in seconds, and the maximum value is 180 (3 minutes).
def screenrecord(self, bit_rate: int = 5000000, time_limit: int = 180, filename: _PATH = '/sdcard/demo.mp4') -> None: self._execute('-s', self.device_sn, 'shell', 'screenrecord', '--bit-rate', str(bit_rate), '--time-limit', str(time_limit), filename)
887,806
Recording the display of devices running Android 4.4 (API level 19) and higher. Then copy it to your computer. Args: bit_rate:You can increase the bit rate to improve video quality, but doing so results in larger movie files. time_limit: Sets the maximum recording time, in seconds, and the maximum value is 180 (3 minutes).
def pull_screenrecord(self, bit_rate: int = 5000000, time_limit: int = 180, remote: _PATH = '/sdcard/demo.mp4', local: _PATH = 'demo.mp4') -> None: self.screenrecord(bit_rate, time_limit, filename=remote) self.pull(remote, local)
887,807
Finds an element by id. Args: id_: The id of the element to be found. update: If the interface has changed, this option should be True. Returns: The element if it was found. Raises: NoSuchElementException - If the element wasn't found. Usage: element = driver.find_element_by_id('foo')
def find_element_by_id(self, id_, update=False) -> Elements: return self.find_element(by=By.ID, value=id_, update=update)
887,816
Finds multiple elements by id. Args: id_: The id of the elements to be found. update: If the interface has changed, this option should be True. Returns: A list with elements if any was found. An empty list if not. Raises: NoSuchElementException - If the element wasn't found. Usage: elements = driver.find_elements_by_id('foo')
def find_elements_by_id(self, id_, update=False) -> Elements: return self.find_elements(by=By.ID, value=id_, update=update)
887,817
Finds an element by name. Args: name: The name of the element to be found. update: If the interface has changed, this option should be True. Returns: The element if it was found. Raises: NoSuchElementException - If the element wasn't found. Usage: element = driver.find_element_by_name('foo')
def find_element_by_name(self, name, update=False) -> Elements: return self.find_element(by=By.NAME, value=name, update=update)
887,818
Finds multiple elements by name. Args: name: The name of the elements to be found. update: If the interface has changed, this option should be True. Returns: A list with elements if any was found. An empty list if not. Raises: NoSuchElementException - If the element wasn't found. Usage: elements = driver.find_elements_by_name('foo')
def find_elements_by_name(self, name, update=False) -> Elements: return self.find_elements(by=By.NAME, value=name, update=update)
887,819
Finds an element by class. Args: class_: The class of the element to be found. update: If the interface has changed, this option should be True. Returns: The element if it was found. Raises: NoSuchElementException - If the element wasn't found. Usage: element = driver.find_element_by_class('foo')
def find_element_by_class(self, class_, update=False) -> Elements: return self.find_element(by=By.CLASS, value=class_, update=update)
887,820
Finds multiple elements by class. Args: class_: The class of the elements to be found. update: If the interface has changed, this option should be True. Returns: A list with elements if any was found. An empty list if not. Raises: NoSuchElementException - If the element wasn't found. Usage: elements = driver.find_elements_by_class('foo')
def find_elements_by_class(self, class_, update=False) -> Elements: return self.find_elements(by=By.CLASS, value=class_, update=update)
887,821
Creates a new instance of the Commands. Args: executable_path: Path to the AndroidDriver. On the Windows platform, the best choice is default.
def __init__(self, executable: _PATH = 'default') -> None: _default_path = os.path.join( os.path.dirname(__file__), 'executable', 'adb.exe') if executable == 'default': self.path = _default_path elif executable.endswith('adb.exe'): if not os.path.isfile(executable): raise FileNotFoundError(f'{self.path!r} does not exist.') self.path = executable elif executable in ['adb', 'adb.exe']: PATH = os.environ['PATH'] if not ('adb' in PATH or 'android' in PATH or 'platform-tools' in PATH): raise EnvironmentError('PATH does not exist.') self.path = executable else: self.path = _default_path
887,992
Enrich the given list of objects, so they have URL. Args: request (django.http.request.HttpRequest): request which is currently processed json_list (list): list of dicts (JSON objects to be enriched) url_name (str|fun): pattern to create a url name taking object_type ignore_get (list): list of GET parameters which are ignored when the URL is generated Returns: list: list of dicts (enriched JSON objects)
def url(request, json_list, nested, url_name='show_{}', ignore_get=None): if not ignore_get: ignore_get = [] if isinstance(url_name, str): url_string = str(url_name) url_name = lambda x: url_string.format(x) urls = cache.get('proso_urls') if urls is None: urls = {} else: urls = json_lib.loads(urls) cache_updated = False pass_string = pass_get_parameters_string(request, ignore_get) for json in json_list: if 'object_type' not in json or 'id' not in json: continue key = 'show_%s_%s' % (json['object_type'], json['id']) if key in urls: json['url'] = urls[key] else: cache_updated = True json['url'] = reverse(url_name(json['object_type']), kwargs={'id': json['id']}) urls[key] = json['url'] json['url'] = append_get_parameters(json['url'], pass_string) if cache_updated: cache.set('proso_urls', json_lib.dumps(urls), CACHE_EXPIRATION)
888,147
Print translate result in a better format Args: data(str): result
def print_res(data): print('===================================') main_part = data['data'] print(main_part['word_name']) symbols = main_part['symbols'][0] print("美式音标:[" + symbols['ph_am'] + "]") print("英式音标:[" + symbols['ph_en'] + "]") print('-----------------------------------') parts = symbols['parts'] for part in parts: print(part['part']) for mean in part['means']: print(" ", mean) print('===================================')
888,169
Generate and set identifier of concept before saving object to DB Args: sender (class): should be Concept instance (Concept): saving concept
def generate_identifier(sender, instance, **kwargs): identifier = Concept.create_identifier(instance.query) qs = Concept.objects.filter(identifier=identifier, lang=instance.lang) if instance.pk: qs = qs.exclude(pk=instance.pk) if qs.count() > 0: raise ValueError("Concept identifier conflict") instance.identifier = identifier
888,481
Get mapping of concepts to items belonging to concept. Args: concepts (list of Concept): Defaults to None meaning all concepts lang (str): language of concepts, if None use language of concepts Returns: dict: concept (int) -> list of item ids (int)
def get_concept_item_mapping(self, concepts=None, lang=None): if concepts is None: concepts = self.filter(active=True) if lang is not None: concepts = concepts.filter(lang=lang) if lang is None: languages = set([concept.lang for concept in concepts]) if len(languages) > 1: raise Exception('Concepts has multiple languages') lang = list(languages)[0] item_lists = Item.objects.filter_all_reachable_leaves_many([json.loads(concept.query) for concept in concepts], lang) return dict(zip([c.pk for c in concepts], item_lists))
888,483
Get mapping of items_ids to concepts containing these items Args: lang (str): language of concepts Returns: dict: item (int) -> set of concepts (int)
def get_item_concept_mapping(self, lang): concepts = self.filter(active=True, lang=lang) return group_keys_by_value_lists(Concept.objects.get_concept_item_mapping(concepts, lang))
888,484
Recalculated given concepts for given users Args: concepts (dict): user id (int -> set of concepts to recalculate) lang(Optional[str]): language used to get items in all concepts (cached). Defaults to None, in that case are get items only in used concepts
def recalculate_concepts(self, concepts, lang=None): if len(concepts) == 0: return if lang is None: items = Concept.objects.get_concept_item_mapping(concepts=Concept.objects.filter(pk__in=set(flatten(concepts.values())))) else: items = Concept.objects.get_concept_item_mapping(lang=lang) environment = get_environment() mastery_threshold = get_mastery_trashold() for user, concepts in concepts.items(): all_items = list(set(flatten([items[c] for c in concepts]))) answer_counts = environment.number_of_answers_more_items(all_items, user) correct_answer_counts = environment.number_of_correct_answers_more_items(all_items, user) predictions = dict(list(zip(all_items, get_predictive_model(). predict_more_items(environment, user, all_items, time=get_time_for_knowledge_overview())))) new_user_stats = [] stats_to_delete_condition = Q() for concept in concepts: answer_aggregates = Answer.objects.filter(user=user, item__in=items[concept]).aggregate( time_spent=Sum("response_time"), sessions=Count("session", True), time_first=Min("time"), time_last=Max("time"), ) stats = { "answer_count": sum(answer_counts[i] for i in items[concept]), "correct_answer_count": sum(correct_answer_counts[i] for i in items[concept]), "item_count": len(items[concept]), "practiced_items_count": sum([answer_counts[i] > 0 for i in items[concept]]), "mastered_items_count": sum([predictions[i] >= mastery_threshold for i in items[concept]]), "prediction": sum([predictions[i] for i in items[concept]]) / len(items[concept]), "time_spent": answer_aggregates["time_spent"] / 1000, "session_count": answer_aggregates["sessions"], "time_first": answer_aggregates["time_first"].timestamp(), "time_last": answer_aggregates["time_last"].timestamp(), } stats_to_delete_condition |= Q(user=user, concept=concept) for stat_name, value in stats.items(): new_user_stats.append(UserStat(user_id=user, concept_id=concept, stat=stat_name, value=value)) self.filter(stats_to_delete_condition).delete() self.bulk_create(new_user_stats)
888,489
Instantiates an instance of the Onshape class. Args: - stack (str): Base URL - creds (str): Credentials dict
def __init__(self, stack, creds, logging): self._url = stack try: self._access_key = creds['access_key'].encode('utf-8') self._secret_key = creds['secret_key'].encode('utf-8') except TypeError as e: raise UserWarning("Specify a correct access key and secret key for the client") self._logging = logging if self._logging: utils.log('onshape instance created: url = %s, access key = %s' % (self._url, self._access_key))
888,643
Create the request signature to authenticate Args: - method (str): HTTP method - date (str): HTTP date header string - nonce (str): Cryptographic nonce - path (str): URL pathname - query (dict, default={}): URL query string in key-value pairs - ctype (str, default='application/json'): HTTP Content-Type
def _make_auth(self, method, date, nonce, path, query={}, ctype='application/json'): query = urlencode(query) hmac_str = (method + '\n' + nonce + '\n' + date + '\n' + ctype + '\n' + path + '\n' + query + '\n').lower().encode('utf-8') signature = base64.b64encode(hmac.new(self._secret_key, hmac_str, digestmod=hashlib.sha256).digest()) auth = 'On ' + self._access_key.decode('utf-8') + ':HmacSHA256:' + signature.decode('utf-8') if self._logging: utils.log({ 'query': query, 'hmac_str': hmac_str, 'signature': signature, 'auth': auth }) return auth
888,645
Creates a headers object to sign the request Args: - method (str): HTTP method - path (str): Request path, e.g. /api/documents. No query string - query (dict, default={}): Query string in key-value format - headers (dict, default={}): Other headers to pass in Returns: - dict: Dictionary containing all headers
def _make_headers(self, method, path, query={}, headers={}): date = datetime.datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT') nonce = self._make_nonce() ctype = headers.get('Content-Type') if headers.get('Content-Type') else 'application/json' auth = self._make_auth(method, date, nonce, path, query=query, ctype=ctype) req_headers = { 'Content-Type': 'application/json', 'Date': date, 'On-Nonce': nonce, 'Authorization': auth, 'User-Agent': 'Onshape Python Sample App', 'Accept': 'application/json' } # add in user-defined headers for h in headers: req_headers[h] = headers[h] return req_headers
888,646
Find an elemnent in the document with the given name - could be a PartStudio, Assembly or blob. Args: name: str the name of the element. Returns: - onshapepy.uri of the element
def find_element(self, name, type=ElementType.ANY): for e in self.e_list: # if a type is specified and this isn't it, move to the next loop. if type.value and not e['elementType'] == type: continue if e["name"] == name: uri = self.uri uri.eid = e["id"] return uri
889,106
Take the given value and start enrichment by object_type. The va Args: request (django.http.request.HttpRequest): request which is currently processed value (dict|list|django.db.models.Model): in case of django.db.models.Model object (or list of these objects), to_json method is invoked Returns: dict|list
def enrich_json_objects_by_object_type(request, value): time_start_globally = time() if isinstance(value, list): json = [x.to_json() if hasattr(x, "to_json") else x for x in value] else: if isinstance(value, dict): json = value else: json = value.to_json() objects, nested = _collect_json_objects(json, by='object_type') for enricher_info in _get_OBJECT_TYPE_ENRICHER_ORDER(): if len(enricher_info['object_types']) > 0: enricher_objects = flatten([objects.get(object_type, []) for object_type in enricher_info['object_types']]) enricher_nested = any([nested.get(object_type, False) for object_type in enricher_info['object_types']]) else: enricher_objects = flatten(objects.values()) enricher_nested = any(nested.values()) if len(enricher_objects) > 0: time_start = time() enricher_info['enricher'](request, enricher_objects, enricher_nested) LOGGER.debug('enrichment "{}" took {} seconds'.format(enricher_info['enricher_name'], time() - time_start)) if not enricher_info['pure']: # if the enricher modified object types we must collect objects # again objects, nested = _collect_json_objects(json, by='object_type') LOGGER.debug('The whole enrichment of json objects by their object_type took {} seconds.'.format(time() - time_start_globally)) return json
889,293
Instantiates a new Onshape client. Attributes: - conf: the configuration that generated this client. This is read-only and for testing purposes. Args: - configuration (dict, optional): a dictionary of configuration options. Default behavior is to load this from a YAML file that is located in user's home directory and name '.onshapepy'. For options that can be set, look at the documentation section on 'configuration'.
def __init__(self, conf=None, conf_file=".onshapepy"): default_conf = { 'stack': 'https://cad.onshape.com', 'logging': False, 'creds': None } try: user_conf = yaml.load(Path.home().joinpath(conf_file)) default_conf.update(user_conf) except: pass if conf: default_conf.update(conf) self.conf = default_conf self._stack = default_conf['stack'] self._api = Onshape(default_conf['stack'], default_conf['creds'], default_conf['logging'])
889,446
Create a new document. Args: - name (str, default='Test Document'): The doc name - owner_type (int, default=0): 0 for user, 1 for company, 2 for team - public (bool, default=False): Whether or not to make doc public Returns: - requests.Response: Onshape response data
def create_document(self, name='Test Document', owner_type=0, public=True): payload = { 'name': name, 'ownerType': owner_type, 'isPublic': public } return self._api.request('post', '/api/documents', body=payload)
889,447
Renames the specified document. Args: - did (str): Document ID - name (str): New document name Returns: - requests.Response: Onshape response data
def rename_document(self, did, name): payload = { 'name': name } return self._api.request('post', '/api/documents/' + did, body=payload)
889,448
Copy the current workspace. Args: - uri (dict): the uri of the workspace being copied. Needs to have a did and wid key. - new_name (str): the new name of the copied workspace. Returns: - requests.Response: Onshape response data
def copy_workspace(self, uri, new_name): payload = { 'isPublic': True, 'newName': new_name } return self._api.request('post', '/api/documents/' + uri['did'] + '/workspaces/' + uri['wvm'] + '/copy', body=payload)
889,449
Create a workspace in the specified document. Args: - did (str): the document id of where to create the new workspace - name (str): the new name of the copied workspace. - version_id (str): the ID of the version to be copied into a new workspace Returns: - requests.Response: Onshape response data
def create_workspace(self, did, name, version_id=None): payload = { 'isPublic': True, 'name': name, } if version_id: payload['versionId'] = version_id return self._api.request('post', '/api/documents/d/' + did + '/workspaces', body=payload)
889,450
Creates a new assembly element in the specified document / workspace. Args: - did (str): Document ID - wid (str): Workspace ID - name (str, default='My Assembly') Returns: - requests.Response: Onshape response data
def create_assembly(self, did, wid, name='My Assembly'): payload = { 'name': name } return self._api.request('post', '/api/assemblies/d/' + did + '/w/' + wid, body=payload)
889,451
Gets the feature list for specified document / workspace / part studio. Args: - did (str): Document ID - wid (str): Workspace ID - eid (str): Element ID Returns: - requests.Response: Onshape response data
def get_features(self, did, wid, eid): return self._api.request('get', '/api/partstudios/d/' + did + '/w/' + wid + '/e/' + eid + '/features')
889,452
Gets the tessellation of the edges of all parts in a part studio. Args: - did (str): Document ID - wid (str): Workspace ID - eid (str): Element ID Returns: - requests.Response: Onshape response data
def get_partstudio_tessellatededges(self, did, wid, eid): return self._api.request('get', '/api/partstudios/d/' + did + '/w/' + wid + '/e/' + eid + '/tessellatededges')
889,453
Uploads a file to a new blob element in the specified doc. Args: - did (str): Document ID - wid (str): Workspace ID - filepath (str, default='./blob.json'): Blob element location Returns: - requests.Response: Onshape response data
def upload_blob(self, did, wid, filepath='./blob.json'): chars = string.ascii_letters + string.digits boundary_key = ''.join(random.choice(chars) for i in range(8)) mimetype = mimetypes.guess_type(filepath)[0] encoded_filename = os.path.basename(filepath) file_content_length = str(os.path.getsize(filepath)) blob = open(filepath) req_headers = { 'Content-Type': 'multipart/form-data; boundary="%s"' % boundary_key } # build request body payload = '--' + boundary_key + '\r\nContent-Disposition: form-data; name="encodedFilename"\r\n\r\n' + encoded_filename + '\r\n' payload += '--' + boundary_key + '\r\nContent-Disposition: form-data; name="fileContentLength"\r\n\r\n' + file_content_length + '\r\n' payload += '--' + boundary_key + '\r\nContent-Disposition: form-data; name="file"; filename="' + encoded_filename + '"\r\n' payload += 'Content-Type: ' + mimetype + '\r\n\r\n' payload += blob.read() payload += '\r\n--' + boundary_key + '--' return self._api.request('post', '/api/blobelements/d/' + did + '/w/' + wid, headers=req_headers, body=payload)
889,454
Exports STL export from a part studio Args: - did (str): Document ID - wid (str): Workspace ID - eid (str): Element ID Returns: - requests.Response: Onshape response data
def part_studio_stl(self, did, wid, eid): req_headers = { 'Accept': 'application/vnd.onshape.v1+octet-stream' } return self._api.request('get', '/api/partstudios/d/' + did + '/w/' + wid + '/e/' + eid + '/stl', headers=req_headers)
889,455
Insert a configurable part into an assembly. Args: - assembly (dict): eid, wid, and did of the assembly into which will be inserted - part (dict): eid and did of the configurable part - configuration (dict): the configuration Returns: - requests.Response: Onshape response data
def create_assembly_instance(self, assembly_uri, part_uri, configuration): payload = { "documentId": part_uri["did"], "elementId": part_uri["eid"], # could be added if needed: # "partId": "String", # "featureId": "String", # "microversionId": "String", "versionId": part_uri["wvm"], # "microversionId": "String", "isAssembly": False, "isWholePartStudio": True, "configuration": self.encode_configuration(part_uri["did"], part_uri["eid"], configuration) } return self._api.request('post', '/api/assemblies/d/' + assembly_uri["did"] + '/' + assembly_uri["wvm_type"] + '/' + assembly_uri["wvm"] + '/e/' + assembly_uri["eid"] + '/instances', body=payload)
889,456
Encode parameters as a URL-ready string Args: - did (str): Document ID - eid (str): Element ID - parameters (dict): key-value pairs of the parameters to be encoded Returns: - configuration (str): the url-ready configuration string.
def encode_configuration(self, did, eid, parameters): # change to the type of list the API is expecting parameters = [{"parameterId": k, "parameterValue": v} for (k,v) in parameters.items()] payload = { 'parameters':parameters } req_headers = { 'Accept': 'application/vnd.onshape.v1+json', 'Content-Type': 'application/json' } res = self._api.request('post', '/api/elements/d/' + did + '/e/' + eid + '/configurationencodings', body=payload, headers=req_headers) return json.loads(res.content.decode("utf-8"))["encodedId"]
889,457
get the configuration of a PartStudio Args: - uri (dict): points to a particular element Returns: - requests.Response: Onshape response data
def get_configuration(self, uri): req_headers = { 'Accept': 'application/vnd.onshape.v1+json', 'Content-Type': 'application/json' } return self._api.request('get', '/api/partstudios/d/' + uri["did"] + '/' + uri["wvm_type"] + '/' + uri["wvm"] + '/e/' + uri["eid"] + '/configuration', headers=req_headers)
889,458
Update the configuration specified in the payload Args: - did (str): Document ID - eid (str): Element ID - payload (json): the request body Returns: - configuration (str): the url-ready configuration string.
def update_configuration(self, did, wid, eid, payload): req_headers = { 'Accept': 'application/vnd.onshape.v1+json', 'Content-Type': 'application/json' } res = self._api.request('post', '/api/partstudios/d/' + did + '/w/' + wid + '/e/' + eid + '/configuration', body=payload, headers=req_headers) return res
889,459
Tries to connect to the device to see if it is connectable. Args: host: The host to connect. port: The port to connect. Returns: True or False.
def is_connectable(host: str, port: Union[int, str]) -> bool: socket_ = None try: socket_ = socket.create_connection((host, port), 1) result = True except socket.timeout: result = False finally: if socket_: socket_.close() return result
889,827
Initialization method. Args: name (str): name of the vertex.
def __init__(self, name): self.name = name self.edges_in = set() self.edges_out = set()
890,427
Connect this vertex to another one. Args: vertex (Vertex): vertex to connect to. weight (int): weight of the edge. Returns: Edge: the newly created edge.
def connect_to(self, vertex, weight=1): for edge in self.edges_out: if vertex == edge.vertex_in: return edge return Edge(self, vertex, weight)
890,428
Connect another vertex to this one. Args: vertex (Vertex): vertex to connect from. weight (int): weight of the edge. Returns: Edge: the newly created edge.
def connect_from(self, vertex, weight=1): for edge in self.edges_in: if vertex == edge.vertex_out: return edge return Edge(vertex, self, weight)
890,429
Initialization method. Args: vertex_out (Vertex): source vertex (edge going out). vertex_in (Vertex): target vertex (edge going in). weight (int): weight of the edge.
def __init__(self, vertex_out, vertex_in, weight=1): self.vertex_out = None self.vertex_in = None self.weight = weight self.go_from(vertex_out) self.go_in(vertex_in)
890,430
Tell the edge to go out from this vertex. Args: vertex (Vertex): vertex to go from.
def go_from(self, vertex): if self.vertex_out: self.vertex_out.edges_out.remove(self) self.vertex_out = vertex vertex.edges_out.add(self)
890,432
Tell the edge to go into this vertex. Args: vertex (Vertex): vertex to go into.
def go_in(self, vertex): if self.vertex_in: self.vertex_in.edges_in.remove(self) self.vertex_in = vertex vertex.edges_in.add(self)
890,433
Initialization method. An intermediary matrix is built to ease the creation of the graph. Args: *nodes (list of DSM/Package/Module): the nodes on which to build the graph. depth (int): the depth of the intermediary matrix. See the documentation for Matrix class.
def __init__(self, *nodes, depth=0): self.edges = set() vertices = [] matrix = Matrix(*nodes, depth=depth) for key in matrix.keys: vertices.append(Vertex(key)) for l, line in enumerate(matrix.data): for c, cell in enumerate(line): if cell > 0: self.edges.add(Edge(vertices[l], vertices[c], weight=cell)) self.vertices = set(vertices)
890,434
Guess the optimal depth to use for the given list of arguments. Args: packages (list of str): list of packages. Returns: int: guessed depth to use.
def guess_depth(packages): if len(packages) == 1: return packages[0].count('.') + 2 return min(p.count('.') for p in packages) + 1
890,633
Print the object in a file or on standard output by default. Args: format (str): output format (csv, json or text). output (file): descriptor to an opened file (default to standard output). **kwargs (): additional arguments.
def print(self, format=TEXT, output=sys.stdout, **kwargs): if format is None: format = TEXT if format == TEXT: print(self._to_text(**kwargs), file=output) elif format == CSV: print(self._to_csv(**kwargs), file=output) elif format == JSON: print(self._to_json(**kwargs), file=output)
890,634
Given an id, get the corresponding file info as the following:\n (relative path joined with file name, file info dict) Parameters: #. id (string): The file unique id string. :Returns: #. relativePath (string): The file relative path joined with file name. If None, it means file was not found. #. info (None, dictionary): The file information dictionary. If None, it means file was not found.
def get_file_info_by_id(self, id): for path, info in self.walk_files_info(): if info['id']==id: return path, info # none was found return None, None
890,816
Given an id, get the corresponding file info relative path joined with file name. Parameters: #. id (string): The file unique id string. :Returns: #. relativePath (string): The file relative path joined with file name. If None, it means file was not found.
def get_file_relative_path_by_id(self, id): for path, info in self.walk_files_info(): if info['id']==id: return path # none was found return None
890,817
Removes all files matching the search path Arguments: search_path -- The path you would like to remove, can contain wildcards Example: self._remove_files("output/*.html")
def _remove_files(self, directory, pattern): for root, dirnames, file_names in os.walk(directory): for file_name in fnmatch.filter(file_names, pattern): os.remove(os.path.join(root, file_name))
890,871
Add a user to the specified LDAP group. Args: group: Name of group to update username: Username of user to add Raises: ldap_tools.exceptions.InvalidResult: Results of the query were invalid. The actual exception raised inherits from InvalidResult. See #lookup_id for more info.
def add_user(self, group, username): try: self.lookup_id(group) except ldap_tools.exceptions.InvalidResult as err: # pragma: no cover raise err from None operation = {'memberUid': [(ldap3.MODIFY_ADD, [username])]} self.client.modify(self.__distinguished_name(group), operation)
890,928
Remove a user from the specified LDAP group. Args: group: Name of group to update username: Username of user to remove Raises: ldap_tools.exceptions.InvalidResult: Results of the query were invalid. The actual exception raised inherits from InvalidResult. See #lookup_id for more info.
def remove_user(self, group, username): try: self.lookup_id(group) except ldap_tools.exceptions.InvalidResult as err: # pragma: no cover raise err from None operation = {'memberUid': [(ldap3.MODIFY_DELETE, [username])]} self.client.modify(self.__distinguished_name(group), operation)
890,929
Lookup GID for the given group. Args: group: Name of group whose ID needs to be looked up Returns: A bytestring representation of the group ID (gid) for the group specified Raises: ldap_tools.exceptions.NoGroupsFound: No Groups were returned by LDAP ldap_tools.exceptions.TooManyResults: More than one group was returned by LDAP
def lookup_id(self, group): filter = ["(cn={})".format(group), "(objectclass=posixGroup)"] results = self.client.search(filter, ['gidNumber']) if len(results) < 1: raise ldap_tools.exceptions.NoGroupsFound( 'No Groups Returned by LDAP') elif len(results) > 1: raise ldap_tools.exceptions.TooManyResults( 'Multiple groups found. Please narrow your search.') else: return results[0].gidNumber.value
890,930
Add object to LDAP. Args: distinguished_name: the DN of the LDAP record to be added object_class: The objectClass of the record to be added. This is a list of length >= 1. attributes: a dictionary of LDAP attributes to add See ldap_tools.api.group.API#__ldap_attr
def add(self, distinguished_name, object_class, attributes): self.conn.add(distinguished_name, object_class, attributes)
890,992
Decorate a function so that print arguments before calling it. Args: output: writable to print args. (Default: sys.stdout)
def print_args(output=sys.stdout): def decorator(func): @wraps(func) def _(*args, **kwargs): output.write( "Args: {0}, KwArgs: {1}\n".format(str(args), str(kwargs))) return func(*args, **kwargs) return _ return decorator
891,043
Word-level n-grams in a string By default, whitespace is assumed to be a word boundary. >>> ng.word_ngrams('This is not a test!') [('This', 'is', 'not'), ('is', 'not', 'a'), ('not', 'a', 'test!')] If the sequence's length is less than or equal to n, the n-grams are simply the sequence itself. >>> ng.word_ngrams('Test!') [('Test!')] Args: s: a string Returns: list: tuples of word-level n-grams
def word_ngrams(s, n=3, token_fn=tokens.on_whitespace): tokens = token_fn(s) return __ngrams(tokens, n=min(len(tokens), n))
891,132
Returns the n-grams that match between two sequences See also: SequenceMatcher.get_matching_blocks Args: s1: a string s2: another string n: an int for the n in n-gram Returns: set:
def __matches(s1, s2, ngrams_fn, n=3): ngrams1, ngrams2 = set(ngrams_fn(s1, n=n)), set(ngrams_fn(s2, n=n)) return ngrams1.intersection(ngrams2)
891,134
Character-level n-grams that match between two strings Args: s1: a string s2: another string n: an int for the n in n-gram Returns: set: the n-grams found in both strings
def char_matches(s1, s2, n=3): return __matches(s1, s2, char_ngrams, n=n)
891,135
Word-level n-grams that match between two strings Args: s1: a string s2: another string n: an int for the n in n-gram Returns: set: the n-grams found in both strings
def word_matches(s1, s2, n=3): return __matches(s1, s2, word_ngrams, n=n)
891,136
The fraction of n-grams matching between two sequences Args: s1: a string s2: another string n: an int for the n in n-gram Returns: float: the fraction of n-grams matching
def __similarity(s1, s2, ngrams_fn, n=3): ngrams1, ngrams2 = set(ngrams_fn(s1, n=n)), set(ngrams_fn(s2, n=n)) matches = ngrams1.intersection(ngrams2) return 2 * len(matches) / (len(ngrams1) + len(ngrams2))
891,137
Add limitations of given spec to self's. Args: spec (PackageSpec): another spec.
def add(self, spec): for limit in spec.limit_to: if limit not in self.limit_to: self.limit_to.append(limit)
891,175