code
stringlengths
81
3.79k
def publish( self, resource_group_name, automation_account_name, runbook_name, custom_headers=None, raw=False, polling=True, **operation_config): raw_result = self._publish_initial( resource_group_name=resource_group_name, automation_account_name=automation_account_name, runbook_name=runbook_name, custom_headers=custom_headers, raw=True, **operation_config ) def get_long_running_output(response): if raw: client_raw_response = ClientRawResponse(None, response) client_raw_response.add_headers({ 'location': 'str', }) return client_raw_response lro_delay = operation_config.get( 'long_running_operation_timeout', self.config.long_running_operation_timeout) if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) elif polling is False: polling_method = NoPolling() else: polling_method = polling return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
def get_rules(self) -> parsing.Node: res = None try: res = self.eval_rule('bnf_dsl') if not res: self.diagnostic.notify( error.Severity.ERROR, "Parse error in '%s' in EBNF bnf" % self._lastRule, error.LocationInfo.from_maxstream(self._stream) ) raise self.diagnostic except error.Diagnostic as d: d.notify( error.Severity.ERROR, "Parse error in '%s' in EBNF bnf" % self._lastRule ) raise d return res
def save(self, filename=None, deleteid3=False): if filename is None: filename = self.filename f = open(filename, 'rb+') try: self.metadata_blocks.append(Padding(b'\x00' * 1020)) MetadataBlock.group_padding(self.metadata_blocks) header = self.__check_header(f) available = self.__find_audio_offset(f) - header data = MetadataBlock.writeblocks(self.metadata_blocks) if deleteid3 and header > 4: available += header - 4 header = 4 if len(data) > available: padding = self.metadata_blocks[-1] newlength = padding.length - (len(data) - available) if newlength > 0: padding.length = newlength data = MetadataBlock.writeblocks(self.metadata_blocks) assert len(data) == available elif len(data) < available: self.metadata_blocks[-1].length += (available - len(data)) data = MetadataBlock.writeblocks(self.metadata_blocks) assert len(data) == available if len(data) != available: diff = (len(data) - available) insert_bytes(f, diff, header) f.seek(header - 4) f.write(b"fLaC" + data) if deleteid3: try: f.seek(-128, 2) except IOError: pass else: if f.read(3) == b"TAG": f.seek(-128, 2) f.truncate() finally: f.close()
def text(self, etype, value, tb, tb_offset=None, context=5): tb_list = self.structured_traceback(etype, value, tb, tb_offset, context) return self.stb2text(tb_list)
def child_object(self): from . import types child_klass = types.get(self.task_type.split('.')[1]) return child_klass.retrieve(self.task_id, client=self._client)
def controller_factory(cls, passes, options, **partial_controller): if None in partial_controller.values(): raise TranspilerError('The controller needs a condition.') if partial_controller: for registered_controller in cls.registered_controllers.keys(): if registered_controller in partial_controller: return cls.registered_controllers[registered_controller](passes, options, **partial_controller) raise TranspilerError("The controllers for %s are not registered" % partial_controller) else: return FlowControllerLinear(passes, options)
def sprint(text, *colors): return "\33[{}m{content}\33[{}m".format(";".join([str(color) for color in colors]), RESET, content=text) if IS_ANSI_TERMINAL and colors else text
def __build_question(html_question, question, comments): question_object = {} question_container = AskbotParser.parse_question_container(html_question[0]) question_object.update(question_container) if comments[int(question['id'])]: question_object['comments'] = comments[int(question['id'])] answers = [] for page in html_question: answers.extend(AskbotParser.parse_answers(page)) if len(answers) != 0: question_object['answers'] = answers for answer in question_object['answers']: if comments[int(answer['id'])]: answer['comments'] = comments[int(answer['id'])] return question_object
def gpio_interrupts_enable(self): try: bring_gpio_interrupt_into_userspace() set_gpio_interrupt_edge() except Timeout as e: raise InterruptEnableException( "There was an error bringing gpio%d into userspace. %s" % (GPIO_INTERRUPT_PIN, e.message) )
def request(self, method, url, **kwargs): opts = { 'allow_redirects': True, 'auth': self._auth, 'data': {}, 'files': None, 'headers': dict(self._headers), 'params': {}, 'timeout': 80, 'verify': True } raw = kwargs.pop('raw', False) debug = kwargs.pop('debug', False) opts.update(kwargs) method = method.upper() if opts['files']: opts['headers'].pop('Content-Type', None) else: opts['data'] = json.dumps(opts['data']) if not url.startswith(self._host): url = urljoin(self._host, url) logger.debug('API %s Request: %s' % (method, url)) if debug: self._log_raw_request(method, url, **opts) try: response = self._session.request(method, url, **opts) except Exception as e: _handle_request_error(e) if 429 == response.status_code: delay = int(response.headers['retry-after']) + 1 logger.warn('Too many requests. Retrying in {0}s.'.format(delay)) time.sleep(delay) return self.request(method, url, **kwargs) if not (200 <= response.status_code < 400): _handle_api_error(response) if raw or response.status_code in [204, 301, 302]: return response return response.json()
def make_default_options_response(self): adapter = _request_ctx_stack.top.url_adapter if hasattr(adapter, 'allowed_methods'): methods = adapter.allowed_methods() else: methods = [] try: adapter.match(method='--') except MethodNotAllowed as e: methods = e.valid_methods except HTTPException as e: pass rv = self.response_class() rv.allow.update(methods) return rv
def load(self, path): self.network = graph.Network.load(path) return self.network
def _uri2path(self, uri): if uri == self.package_name: return os.path.join(self.root_path, '__init__.py') path = uri.replace('.', os.path.sep) path = path.replace(self.package_name + os.path.sep, '') path = os.path.join(self.root_path, path) if os.path.exists(path + '.py'): path += '.py' elif os.path.exists(os.path.join(path, '__init__.py')): path = os.path.join(path, '__init__.py') else: return None return path
def describe_object(self, obj): conn = self.get_conn() return conn.__getattr__(obj).describe()
def get_ilvl(li, w_namespace): ilvls = li.xpath('.//w:ilvl', namespaces=li.nsmap) if len(ilvls) == 0: return -1 return int(ilvls[0].get('%sval' % w_namespace))
def validateAttrib(self, method, cls = None): any = False for group in self.attribs: match = True for key, value in group: attr = get_method_attr(method, cls, key) if callable(value): if not value(key, method, cls): match = False break elif value is True: if not bool(attr): match = False break elif value is False: if bool(attr): match = False break elif type(attr) in (list, tuple): if not str(value).lower() in [str(x).lower() for x in attr]: match = False break else: if (value != attr and str(value).lower() != str(attr).lower()): match = False break any = any or match if any: return None return False
def matchmaker_matches(institute_id, case_name): user_obj = store.user(current_user.email) if 'mme_submitter' not in user_obj['roles']: flash('unauthorized request', 'warning') return redirect(request.referrer) mme_base_url = current_app.config.get('MME_URL') mme_token = current_app.config.get('MME_TOKEN') if not mme_base_url or not mme_token: flash('An error occurred reading matchmaker connection parameters. Please check config file!', 'danger') return redirect(request.referrer) institute_obj, case_obj = institute_and_case(store, institute_id, case_name) data = controllers.mme_matches(case_obj, institute_obj, mme_base_url, mme_token) if data and data.get('server_errors'): flash('MatchMaker server returned error:{}'.format(data['server_errors']), 'danger') return redirect(request.referrer) elif not data: data = { 'institute' : institute_obj, 'case' : case_obj } return data
def get_container(self, name, collection_id, tag="latest", version=None): from sregistry.database.models import Container if version is None: container = Container.query.filter_by(collection_id = collection_id, name = name, tag = tag).first() else: container = Container.query.filter_by(collection_id = collection_id, name = name, tag = tag, version = version).first() return container
def _plant_trie(strings: _List[str]) -> dict: trie = {} for string in strings: d = trie for char in string: d[char] = char in d and d[char] or {} d = d[char] d[''] = None return trie
def _is_known_unsigned_by_dtype(dt): return { tf.bool: True, tf.uint8: True, tf.uint16: True, }.get(dt.base_dtype, False)
def create_record_and_pid(data): from invenio_records.api import Record from invenio_pidstore.models import PersistentIdentifier, PIDStatus, \ RecordIdentifier deposit = Record.create(data=data) created = arrow.get(data['_p']['created']).datetime deposit.model.created = created.replace(tzinfo=None) depid = deposit['_p']['id'] pid = PersistentIdentifier.create( pid_type='depid', pid_value=str(depid), object_type='rec', object_uuid=str(deposit.id), status=PIDStatus.REGISTERED ) if RecordIdentifier.query.get(int(depid)) is None: RecordIdentifier.insert(int(depid)) deposit.commit() return deposit, pid
def is_embargoed(record): return record.get('access_right') == 'embargoed' and \ record.get('embargo_date') and \ record.get('embargo_date') > datetime.utcnow().date()
def add_comment(self, comment_text): return self.fetch_json( uri_path=self.base_uri + '/actions/comments', http_method='POST', query_params={'text': comment_text} )
def pkt_text(pkt): if pkt.src.upper() in BANNED_DEVICES: body = '' elif pkt.src.upper()[:8] in AMAZON_DEVICES: body = '{} (Amazon Device)'.format(pkt.src) else: body = pkt.src return body
def _update_barrier_status(self): with open(self.log_file) as fh: for line in fh: if "Session aborted" in line: return if "<<< barrier arrive" in line: process_m = re.match(".*process: (.*)\)", line) if process_m: process = process_m.group(1) if process in self.processes: self.processes[process]["barrier"] = "C"
def copy_notebook(self, notebook_id): last_mod, nb = self.get_notebook_object(notebook_id) name = nb.metadata.name + '-Copy' path, name = self.increment_filename(name) nb.metadata.name = name notebook_id = self.new_notebook_id(name) self.save_notebook_object(notebook_id, nb) return notebook_id
def entity_name_decorator(top_cls): class_name = inflection.underscore(top_cls.__name__).lower() def entity_name(cls): return class_name top_cls.entity_name = classmethod(entity_name) return top_cls
def init_modules(self): if not self.config: raise ValueError("please read your config file.") log.debug("begin to import customer's service modules.") modules = ServiceModules(self.config) modules.import_modules() log.debug("end to import customer's service modules.")
def _normalized(self, data): int_keys = ('frames', 'width', 'height', 'size') for key in int_keys: if key not in data: continue try: data[key] = int(data[key]) except ValueError: pass return data
def get_product_by_name(self, name): return next(i for i in self.items if i.name.lower() == name.lower())
def add_point(self, point, value, check=True): if self.tier_type != 'TextTier': raise Exception('Tiertype must be TextTier.') if check and any(i for i in self.intervals if i[0] == point): raise Exception('No overlap is allowed') self.intervals.append((point, value))
def create_adapter(cmph, ffi, obj): if is_file_location(obj): fd = open(obj) adapter = cmph.cmph_io_nlfile_adapter(fd) def dtor(): cmph.cmph_io_nlfile_adapter_destroy(adapter) fd.close() return _AdapterCxt(adapter, dtor) elif is_file(obj): adapter = cmph.cmph_io_nlfile_adapter(obj) dtor = lambda: cmph.cmph_io_nlfile_adapter_destroy(adapter) return _AdapterCxt(adapter, dtor) elif isinstance(obj, Sequence): if len(obj) == 0: raise ValueError("An empty sequence is already a perfect hash!") return _create_pyobj_adapter(cmph, ffi, obj) else: raise ValueError("data cannot have a cmph wrapper generated")
def _hash_file(self, algo): hash_data = getattr(hashlib, algo)() with open(self.path, "rb") as file: content = file.read() hash_data.update(content) return hash_data.hexdigest()
def to_string(self, indent): ind = indent * ' ' print(ind, 'qreg') self.children[0].to_string(indent + 3)
def _installation_trace(self, frame_unused, event_unused, arg_unused): sys.settrace(None) fn = self._start_tracer() if fn: fn = fn(frame_unused, event_unused, arg_unused) return fn
def parse_sv_frequencies(variant): frequency_keys = [ 'clingen_cgh_benignAF', 'clingen_cgh_benign', 'clingen_cgh_pathogenicAF', 'clingen_cgh_pathogenic', 'clingen_ngi', 'clingen_ngiAF', 'swegen', 'swegenAF', 'decipherAF', 'decipher' ] sv_frequencies = {} for key in frequency_keys: value = variant.INFO.get(key, 0) if 'AF' in key: value = float(value) else: value = int(value) if value > 0: sv_frequencies[key] = value return sv_frequencies
def calc_fwhm(distribution, is_neg_log=True): if isinstance(distribution, interp1d): if is_neg_log: ymin = distribution.y.min() log_prob = distribution.y-ymin else: log_prob = -np.log(distribution.y) log_prob -= log_prob.min() xvals = distribution.x elif isinstance(distribution, Distribution): xvals = distribution._func.x log_prob = distribution._func.y else: raise TypeError("Error in computing the FWHM for the distribution. " " The input should be either Distribution or interpolation object"); L = xvals.shape[0] tmp = np.where(log_prob < 0.693147)[0] x_l, x_u = tmp[0], tmp[-1] if L < 2: print ("Not enough points to compute FWHM: returning zero") return min(TINY_NUMBER, distribution.xmax - distribution.xmin) else: return max(TINY_NUMBER, xvals[min(x_u+1,L-1)] - xvals[max(0,x_l-1)])
def submit_order(self, symbol, qty, side, type, time_in_force, limit_price=None, stop_price=None, client_order_id=None): params = { 'symbol': symbol, 'qty': qty, 'side': side, 'type': type, 'time_in_force': time_in_force, } if limit_price is not None: params['limit_price'] = limit_price if stop_price is not None: params['stop_price'] = stop_price if client_order_id is not None: params['client_order_id'] = client_order_id resp = self.post('/orders', params) return Order(resp)
def get_queryset(self): queryset = self.get_publishable_queryset() queryset = queryset \ .select_related('featured_image', 'featured_video', 'topic', 'section', 'subsection') \ .prefetch_related( 'tags', 'featured_image__image__authors', 'authors' ) queryset = queryset.order_by('-updated_at') q = self.request.query_params.get('q', None) section = self.request.query_params.get('section', None) tags = self.request.query_params.getlist('tags', None) author = self.request.query_params.get('author', None) if q is not None: queryset = queryset.filter(headline__icontains=q) if section is not None: queryset = queryset.filter(section_id=section) if tags is not None: for tag in tags: queryset = queryset.filter(tags__id=tag) if author is not None: queryset = queryset.filter(authors__person_id=author) return queryset
def get_instance_group_manager(self, zone, resource_id, project_id=None): response = self.get_conn().instanceGroupManagers().get( project=project_id, zone=zone, instanceGroupManager=resource_id ).execute(num_retries=self.num_retries) return response
def get_contacts(address_books, query, method="all", reverse=False, group=False, sort="first_name"): contacts = [] for address_book in address_books: contacts.extend(address_book.search(query, method=method)) if group: if sort == "first_name": return sorted(contacts, reverse=reverse, key=lambda x: ( unidecode(x.address_book.name).lower(), unidecode(x.get_first_name_last_name()).lower())) elif sort == "last_name": return sorted(contacts, reverse=reverse, key=lambda x: ( unidecode(x.address_book.name).lower(), unidecode(x.get_last_name_first_name()).lower())) else: raise ValueError('sort must be "first_name" or "last_name" not ' '{}.'.format(sort)) else: if sort == "first_name": return sorted(contacts, reverse=reverse, key=lambda x: unidecode(x.get_first_name_last_name()).lower()) elif sort == "last_name": return sorted(contacts, reverse=reverse, key=lambda x: unidecode(x.get_last_name_first_name()).lower()) else: raise ValueError('sort must be "first_name" or "last_name" not ' '{}.'.format(sort))
def remove_property(self, property_): if property_.name in self.properties: del self.properties[property_.name]
def delete(self, filename=None): if filename is None: filename = self.filename delete(filename) self.clear()
def DeleteAllItems(self): "Remove all the item from the list and unset the related data" self._py_data_map.clear() self._wx_data_map.clear() wx.ListCtrl.DeleteAllItems(self)
def phone_subcommand(search_terms, vcard_list, parsable): all_phone_numbers_list = [] matching_phone_number_list = [] for vcard in vcard_list: for type, number_list in sorted(vcard.get_phone_numbers().items(), key=lambda k: k[0].lower()): for number in sorted(number_list): if config.display_by_name() == "first_name": name = vcard.get_first_name_last_name() else: name = vcard.get_last_name_first_name() line_formatted = "\t".join([name, type, number]) line_parsable = "\t".join([number, name, type]) if parsable: phone_number_line = line_parsable else: phone_number_line = line_formatted if re.search(search_terms, "%s\n%s" % (line_formatted, line_parsable), re.IGNORECASE | re.DOTALL): matching_phone_number_list.append(phone_number_line) elif len(re.sub("\D", "", search_terms)) >= 3: if re.search(re.sub("\D", "", search_terms), re.sub("\D", "", number), re.IGNORECASE): matching_phone_number_list.append(phone_number_line) all_phone_numbers_list.append(phone_number_line) if matching_phone_number_list: if parsable: print('\n'.join(matching_phone_number_list)) else: list_phone_numbers(matching_phone_number_list) elif all_phone_numbers_list: if parsable: print('\n'.join(all_phone_numbers_list)) else: list_phone_numbers(all_phone_numbers_list) else: if not parsable: print("Found no phone numbers") sys.exit(1)
def add_and_rename_file(self, filename: str, new_filename: str) -> None: dest = os.path.join( self.name + ':' + SANDBOX_WORKING_DIR_NAME, new_filename) subprocess.check_call(['docker', 'cp', filename, dest]) self._chown_files([new_filename])
def parse_frequencies(variant, transcripts): frequencies = {} thousand_genomes_keys = ['1000GAF'] thousand_genomes_max_keys = ['1000G_MAX_AF'] exac_keys = ['EXACAF'] exac_max_keys = ['ExAC_MAX_AF', 'EXAC_MAX_AF'] gnomad_keys = ['GNOMADAF', 'GNOMAD_AF'] gnomad_max_keys = ['GNOMADAF_POPMAX', 'GNOMADAF_MAX'] for test_key in thousand_genomes_keys: thousand_g = parse_frequency(variant, test_key) if thousand_g: frequencies['thousand_g'] = thousand_g break for test_key in thousand_genomes_max_keys: thousand_g_max = parse_frequency(variant, test_key) if thousand_g_max: frequencies['thousand_g_max'] = thousand_g_max break for test_key in exac_keys: exac = parse_frequency(variant, test_key) if exac: frequencies['exac'] = exac break for test_key in exac_max_keys: exac_max = parse_frequency(variant, test_key) if exac_max: frequencies['exac_max'] = exac_max break for test_key in gnomad_keys: gnomad = parse_frequency(variant, test_key) if gnomad: frequencies['gnomad'] = gnomad break for test_key in gnomad_max_keys: gnomad_max = parse_frequency(variant, test_key) if gnomad_max: frequencies['gnomad_max'] = gnomad_max break if not frequencies: for transcript in transcripts: exac = transcript.get('exac_maf') exac_max = transcript.get('exac_max') thousand_g = transcript.get('thousand_g_maf') thousandg_max = transcript.get('thousandg_max') gnomad = transcript.get('gnomad_maf') gnomad_max = transcript.get('gnomad_max') if exac: frequencies['exac'] = exac if exac_max: frequencies['exac_max'] = exac_max if thousand_g: frequencies['thousand_g'] = thousand_g if thousandg_max: frequencies['thousand_g_max'] = thousandg_max if gnomad: frequencies['gnomad'] = gnomad if gnomad_max: frequencies['gnomad_max'] = gnomad_max thousand_g_left = parse_frequency(variant, 'left_1000GAF') if thousand_g_left: frequencies['thousand_g_left'] = thousand_g_left thousand_g_right = parse_frequency(variant, 'right_1000GAF') if thousand_g_right: frequencies['thousand_g_right'] = thousand_g_right return frequencies
def random(cls, num_qubits, seed=None): if seed is not None: np.random.seed(seed) z = np.random.randint(2, size=num_qubits).astype(np.bool) x = np.random.randint(2, size=num_qubits).astype(np.bool) return cls(z, x)
def lost_dimensions(point_fmt_in, point_fmt_out): unpacked_dims_in = PointFormat(point_fmt_in).dtype unpacked_dims_out = PointFormat(point_fmt_out).dtype out_dims = unpacked_dims_out.fields completely_lost = [] for dim_name in unpacked_dims_in.names: if dim_name not in out_dims: completely_lost.append(dim_name) return completely_lost
def rename(script, label='blank', layer_num=None): filter_xml = ''.join([ ' <filter name="Rename Current Mesh">\n', ' <Param name="newName" ', 'value="{}" '.format(label), 'description="New Label" ', 'type="RichString" ', '/>\n', ' </filter>\n']) if isinstance(script, mlx.FilterScript): if (layer_num is None) or (layer_num == script.current_layer()): util.write_filter(script, filter_xml) script.layer_stack[script.current_layer()] = label else: cur_layer = script.current_layer() change(script, layer_num) util.write_filter(script, filter_xml) change(script, cur_layer) script.layer_stack[layer_num] = label else: util.write_filter(script, filter_xml) return None
def verify_signature(amazon_cert: crypto.X509, signature: str, request_body: bytes) -> bool: signature = base64.b64decode(signature) try: crypto.verify(amazon_cert, signature, request_body, 'sha1') result = True except crypto.Error: result = False return result
def parallel_execute(self, cell, block=None, groupby='type', save_name=None): block = self.view.block if block is None else block base = "Parallel" if block else "Async parallel" targets = self.view.targets if isinstance(targets, list) and len(targets) > 10: str_targets = str(targets[:4])[:-1] + ', ..., ' + str(targets[-4:])[1:] else: str_targets = str(targets) if self.verbose: print base + " execution on engine(s): %s" % str_targets result = self.view.execute(cell, silent=False, block=False) self.last_result = result if save_name: self.shell.user_ns[save_name] = result if block: result.get() result.display_outputs(groupby) else: return result
def class_is_abstract(node: astroid.ClassDef) -> bool: for method in node.methods(): if method.parent.frame() is node: if method.is_abstract(pass_is_abstract=False): return True return False
def _merge_statements(statements: List["HdlStatement"])\ -> Tuple[List["HdlStatement"], int]: order = {} for i, stm in enumerate(statements): order[stm] = i new_statements = [] rank_decrease = 0 for rank, stms in groupedby(statements, lambda s: s.rank): if rank == 0: new_statements.extend(stms) else: if len(stms) == 1: new_statements.extend(stms) continue for iA, stmA in enumerate(stms): if stmA is None: continue for iB, stmB in enumerate(islice(stms, iA + 1, None)): if stmB is None: continue if stmA._is_mergable(stmB): rank_decrease += stmB.rank stmA._merge_with_other_stm(stmB) stms[iA + 1 + iB] = None new_statements.append(stmA) else: new_statements.append(stmA) new_statements.append(stmB) new_statements.sort(key=lambda stm: order[stm]) return new_statements, rank_decrease
def convert(self, value): if not isinstance(value, ConvertingDict) and isinstance(value, dict): value = ConvertingDict(value) value.configurator = self elif not isinstance(value, ConvertingList) and isinstance(value, list): value = ConvertingList(value) value.configurator = self elif not isinstance(value, ConvertingTuple) and\ isinstance(value, tuple): value = ConvertingTuple(value) value.configurator = self elif isinstance(value, six.string_types): m = self.CONVERT_PATTERN.match(value) if m: d = m.groupdict() prefix = d['prefix'] converter = self.value_converters.get(prefix, None) if converter: suffix = d['suffix'] converter = getattr(self, converter) value = converter(suffix) return value
def bind_parameter(binding_key, value): if config_is_locked(): raise RuntimeError('Attempted to modify locked Gin config.') pbk = ParsedBindingKey(binding_key) fn_dict = _CONFIG.setdefault(pbk.config_key, {}) fn_dict[pbk.arg_name] = value
def verify_signature(self, signing_key, message, signature, padding_method, signing_algorithm=None, hashing_algorithm=None, digital_signature_algorithm=None): backend = default_backend() hash_algorithm = None dsa_hash_algorithm = None dsa_signing_algorithm = None if hashing_algorithm: hash_algorithm = self._encryption_hash_algorithms.get( hashing_algorithm ) if digital_signature_algorithm: algorithm_pair = self._digital_signature_algorithms.get( digital_signature_algorithm ) if algorithm_pair: dsa_hash_algorithm = algorithm_pair[0] dsa_signing_algorithm = algorithm_pair[1] if dsa_hash_algorithm and dsa_signing_algorithm: if hash_algorithm and (hash_algorithm != dsa_hash_algorithm): raise exceptions.InvalidField( "The hashing algorithm does not match the digital " "signature algorithm." ) if (signing_algorithm and (signing_algorithm != dsa_signing_algorithm)): raise exceptions.InvalidField( "The signing algorithm does not match the digital " "signature algorithm." ) signing_algorithm = dsa_signing_algorithm hash_algorithm = dsa_hash_algorithm if signing_algorithm == enums.CryptographicAlgorithm.RSA: if padding_method == enums.PaddingMethod.PSS: if hash_algorithm: padding = asymmetric_padding.PSS( mgf=asymmetric_padding.MGF1(hash_algorithm()), salt_length=asymmetric_padding.PSS.MAX_LENGTH ) else: raise exceptions.InvalidField( "A hashing algorithm must be specified for PSS " "padding." ) elif padding_method == enums.PaddingMethod.PKCS1v15: padding = asymmetric_padding.PKCS1v15() else: raise exceptions.InvalidField( "The padding method '{0}' is not supported for signature " "verification.".format(padding_method) ) try: public_key = backend.load_der_public_key(signing_key) except Exception: try: public_key = backend.load_pem_public_key(signing_key) except Exception: raise exceptions.CryptographicFailure( "The signing key bytes could not be loaded." ) try: public_key.verify( signature, message, padding, hash_algorithm() ) return True except errors.InvalidSignature: return False except Exception: raise exceptions.CryptographicFailure( "The signature verification process failed." ) else: raise exceptions.InvalidField( "The signing algorithm '{0}' is not supported for " "signature verification.".format(signing_algorithm) )
def predict(self, x, distributed=True): if is_distributed: if isinstance(x, np.ndarray): features = to_sample_rdd(x, np.zeros([x.shape[0]])) elif isinstance(x, RDD): features = x else: raise TypeError("Unsupported prediction data type: %s" % type(x)) return self.predict_distributed(features) else: if isinstance(x, np.ndarray): return self.predict_local(x) else: raise TypeError("Unsupported prediction data type: %s" % type(x))
def generate(self, outputfile=None, dotfile=None, mapfile=None): import subprocess name = self.graphname if not dotfile: if outputfile and outputfile.endswith(".dot"): dotfile = outputfile else: dotfile = "%s.dot" % name if outputfile is not None: storedir, _, target = target_info_from_filename(outputfile) if target != "dot": pdot, dot_sourcepath = tempfile.mkstemp(".dot", name) os.close(pdot) else: dot_sourcepath = osp.join(storedir, dotfile) else: target = "png" pdot, dot_sourcepath = tempfile.mkstemp(".dot", name) ppng, outputfile = tempfile.mkstemp(".png", name) os.close(pdot) os.close(ppng) pdot = codecs.open(dot_sourcepath, "w", encoding="utf8") pdot.write(self.source) pdot.close() if target != "dot": use_shell = sys.platform == "win32" if mapfile: subprocess.call( [ self.renderer, "-Tcmapx", "-o", mapfile, "-T", target, dot_sourcepath, "-o", outputfile, ], shell=use_shell, ) else: subprocess.call( [self.renderer, "-T", target, dot_sourcepath, "-o", outputfile], shell=use_shell, ) os.unlink(dot_sourcepath) return outputfile
def connect(com, peers, tree, pub_url, root_id): com.connect(peers, tree, pub_url, root_id)
def delete_instance(self, instance_id, project_id=None): instance = self.get_instance(instance_id=instance_id, project_id=project_id) if instance: instance.delete() else: self.log.info("The instance '%s' does not exist in project '%s'. Exiting", instance_id, project_id)
def encode(self, input, errors='strict'): if isinstance(input, memoryview): input = input.tobytes() if not isinstance(input, (binary_type, bytearray)): raise with_context( exc=TypeError( "Can't encode {type}; byte string expected.".format( type=type(input).__name__, )), context={ 'input': input, }, ) if not isinstance(input, bytearray): input = bytearray(input) trytes = bytearray() for c in input: second, first = divmod(c, len(self.alphabet)) trytes.append(self.alphabet[first]) trytes.append(self.alphabet[second]) return binary_type(trytes), len(input)
def get_hash( cls, version: str, frequency: int, timestamp: int, seed_value: str, prev_output: str, status_code: str, ) -> SHA512Hash: return SHA512.new( version.encode() + struct.pack( '>1I1Q64s64s1I', frequency, timestamp, binascii.a2b_hex(seed_value), binascii.a2b_hex(prev_output), int(status_code), ) )
def init( dist='dist', minver=None, maxver=None, use_markdown_readme=True, use_stdeb=False, use_distribute=False, ): if not minver == maxver == None: import sys if not minver <= sys.version < (maxver or 'Any'): sys.stderr.write( '%s: requires python version in <%s, %s), not %s\n' % ( sys.argv[0], minver or 'any', maxver or 'any', sys.version.split()[0])) sys.exit(1) if use_distribute: from distribute_setup import use_setuptools use_setuptools(to_dir=dist) from setuptools import setup else: try: from setuptools import setup except ImportError: from distutils.core import setup if use_markdown_readme: try: import setuptools.command.sdist setuptools.command.sdist.READMES = tuple(list(getattr(setuptools.command.sdist, 'READMES', ())) + ['README.md']) except ImportError: pass if use_stdeb: import platform if 'debian' in platform.dist(): try: import stdeb except ImportError: pass return setup
def get_defined_srms(srm_file): srms = read_table(srm_file) return np.asanyarray(srms.index.unique())
def read(self, input_buffer, kmip_version=enums.KMIPVersion.KMIP_2_0): if kmip_version < enums.KMIPVersion.KMIP_2_0: raise exceptions.VersionNotSupported( "KMIP {} does not support the DefaultsInformation " "object.".format( kmip_version.value ) ) super(DefaultsInformation, self).read( input_buffer, kmip_version=kmip_version ) local_buffer = utils.BytearrayStream(input_buffer.read(self.length)) object_defaults = [] while self.is_tag_next(enums.Tags.OBJECT_DEFAULTS, local_buffer): object_default = ObjectDefaults() object_default.read(local_buffer, kmip_version=kmip_version) object_defaults.append(object_default) if len(object_defaults) == 0: raise exceptions.InvalidKmipEncoding( "The DefaultsInformation encoding is missing the object " "defaults structure." ) else: self._object_defaults = object_defaults self.is_oversized(local_buffer)
def unused_variable_line_numbers(messages): for message in messages: if isinstance(message, pyflakes.messages.UnusedVariable): yield message.lineno
def set_data(data): "Write content to the clipboard, data can be either a string or a bitmap" try: if wx.TheClipboard.Open(): if isinstance(data, (str, unicode)): do = wx.TextDataObject() do.SetText(data) wx.TheClipboard.SetData(do) elif isinstance(data, wx.Bitmap): do = wx.BitmapDataObject() do.SetBitmap(data) wx.TheClipboard.SetData(do) wx.TheClipboard.Close() except: pass
def partial(f, *args): @functools.wraps(f) def partial_f(*inner_args): return f(*itertools.chain(args, inner_args)) return partial_f
def match(self, request): errors = [] def match(matcher): try: return matcher.match(request) except Exception as err: err = '{}: {}'.format(type(matcher).__name__, err) errors.append(err) return False return all([match(matcher) for matcher in self]), errors
def batch_shape_tensor(self): batch_shape = tf.constant([], dtype=tf.int32) for param in self.parameters: batch_shape = tf.broadcast_dynamic_shape( batch_shape, param.prior.batch_shape_tensor()) return batch_shape
def generate(self, *arg, **kw): for p, meth in self.plugins: result = None try: result = meth(*arg, **kw) if result is not None: for r in result: yield r except (KeyboardInterrupt, SystemExit): raise except: exc = sys.exc_info() yield Failure(*exc) continue
def fracpols(str, **kwargs): I,Q,U,V,L=get_stokes(str, **kwargs) return L/I,V/I
def checkUser(self, user): return not self.conn("POST", "{0}/GetCredentialType.srf".format(SkypeConnection.API_MSACC), json={"username": user}).json().get("IfExistsResult")
def user_institutes(store, login_user): if login_user.is_admin: institutes = store.institutes() else: institutes = [store.institute(inst_id) for inst_id in login_user.institutes] return institutes
def start(self, job): if self.hostname is None: self.hostname = subprocess.check_output(["hostname", "-f",])[:-1] _log.info("Started Spark master container.") self.sparkContainerID = dockerCheckOutput(job=job, defer=STOP, workDir=os.getcwd(), tool="quay.io/ucsc_cgl/apache-spark-master:1.5.2", dockerParameters=["--net=host", "-d", "-v", "/mnt/ephemeral/:/ephemeral/:rw", "-e", "SPARK_MASTER_IP=" + self.hostname, "-e", "SPARK_LOCAL_DIRS=/ephemeral/spark/local", "-e", "SPARK_WORKER_DIR=/ephemeral/spark/work"], parameters=[self.hostname])[:-1] _log.info("Started HDFS Datanode.") self.hdfsContainerID = dockerCheckOutput(job=job, defer=STOP, workDir=os.getcwd(), tool="quay.io/ucsc_cgl/apache-hadoop-master:2.6.2", dockerParameters=["--net=host", "-d"], parameters=[self.hostname])[:-1] return self.hostname
def sequence_LH(self, pos=None, full_sequence=False): if not hasattr(self.tree, "total_sequence_LH"): self.logger("TreeAnc.sequence_LH: you need to run marginal ancestral inference first!", 1) self.infer_ancestral_sequences(marginal=True) if pos is not None: if full_sequence: compressed_pos = self.full_to_reduced_sequence_map[pos] else: compressed_pos = pos return self.tree.sequence_LH[compressed_pos] else: return self.tree.total_sequence_LH
def adjust_saturation(img, saturation_factor): if not _is_pil_image(img): raise TypeError('img should be PIL Image. Got {}'.format(type(img))) enhancer = ImageEnhance.Color(img) img = enhancer.enhance(saturation_factor) return img
def issue_funds(ctx, amount='uint256', rtgs_hash='bytes32', returns=STATUS): "In the IOU fungible the supply is set by Issuer, who issue funds." ctx.accounts[ctx.msg_sender] += amount ctx.issued_amounts[ctx.msg_sender] += amount ctx.Issuance(ctx.msg_sender, rtgs_hash, amount) return OK
def build_seasonal_transition_noise( drift_scale, num_seasons, is_last_day_of_season): drift_scale_diag = tf.stack( [tf.zeros_like(drift_scale)] * (num_seasons - 1) + [drift_scale], axis=-1) def seasonal_transition_noise(t): noise_scale_diag = dist_util.pick_scalar_condition( is_last_day_of_season(t), drift_scale_diag, tf.zeros_like(drift_scale_diag)) return tfd.MultivariateNormalDiag( loc=tf.zeros(num_seasons, dtype=drift_scale.dtype), scale_diag=noise_scale_diag) return seasonal_transition_noise
def resolve_url(self, url, follow_redirect=True): url = update_scheme("http://", url) available_plugins = [] for name, plugin in self.plugins.items(): if plugin.can_handle_url(url): available_plugins.append(plugin) available_plugins.sort(key=lambda x: x.priority(url), reverse=True) if available_plugins: return available_plugins[0](url) if follow_redirect: try: res = self.http.head(url, allow_redirects=True, acceptable_status=[501]) if res.status_code == 501: res = self.http.get(url, stream=True) if res.url != url: return self.resolve_url(res.url, follow_redirect=follow_redirect) except PluginError: pass raise NoPluginError
def embed_font_to_svg(filepath, outfile, font_files): tree = _embed_font_to_svg(filepath, font_files) tree.write(outfile, encoding='utf-8', pretty_print=True)
def verify_type_product(self, satellite): if satellite == 'L5': id_satellite = '3119' stations = ['GLC', 'ASA', 'KIR', 'MOR', 'KHC', 'PAC', 'KIS', 'CHM', 'LGS', 'MGR', 'COA', 'MPS'] elif satellite == 'L7': id_satellite = '3373' stations = ['EDC', 'SGS', 'AGS', 'ASN', 'SG1'] elif satellite == 'L8': id_satellite = '4923' stations = ['LGN'] else: raise ProductInvalidError('Type product invalid. the permitted types are: L5, L7, L8. ') typ_product = dict(id_satelite=id_satellite, stations=stations) return typ_product
def brent(seqs, f=None, start=None, key=lambda x: x): power = period = 1 tortise, hare = seqs yield hare.next() tortise_value = tortise.next() hare_value = hare.next() while key(tortise_value) != key(hare_value): yield hare_value if power == period: power *= 2 period = 0 if f: tortise = f_generator(f, hare_value) tortise_value = tortise.next() else: while tortise_value != hare_value: tortise_value = tortise.next() hare_value = hare.next() period += 1 if f is None: raise CycleDetected() first = 0 tortise_value = hare_value = start for _ in xrange(period): hare_value = f(hare_value) while key(tortise_value) != key(hare_value): tortise_value = f(tortise_value) hare_value = f(hare_value) first += 1 raise CycleDetected(period=period, first=first)
def add_s(self, s, obj, priority= 0 ): chain = self.strs.get(s, CommandChainDispatcher()) chain.add(obj,priority) self.strs[s] = chain