code
stringlengths
4
4.48k
docstring
stringlengths
1
6.45k
_id
stringlengths
24
24
def total(r,h): <NEW_LINE> <INDENT> return lateral(r,h) + 2 * base(r)
calcula a area total do cilindro dado o raio(r),e a altura(h) float, float - float
625941b394891a1f4081b853
def delete_position_data(self, i): <NEW_LINE> <INDENT> self.shot_index.pop(i) <NEW_LINE> self.shot_frame.pop(i) <NEW_LINE> self.array_ball_position_shot_x.pop(i) <NEW_LINE> self.array_ball_position_shot_y.pop(i) <NEW_LINE> self.arrayPlayerAPosition_x.pop(i) <NEW_LINE> self.arrayPlayerAPosition_y.pop(i) <NEW_LINE> self.arrayPlayerBPosition_x.pop(i) <NEW_LINE> self.arrayPlayerBPosition_y.pop(i) <NEW_LINE> self.arrayHitPlayer.pop(i) <NEW_LINE> self.arrayBounceHit.pop(i) <NEW_LINE> self.arrayForeBack.pop(i) <NEW_LINE> self.arrayDirection.pop(i) <NEW_LINE> self.array_x1.pop(i) <NEW_LINE> self.array_y1.pop(i) <NEW_LINE> self.array_x2.pop(i) <NEW_LINE> self.array_y2.pop(i) <NEW_LINE> self.array_x3.pop(i) <NEW_LINE> self.array_y3.pop(i) <NEW_LINE> self.array_x4.pop(i) <NEW_LINE> self.array_y4.pop(i)
delete position data which is selected tree data Parameters ---------- i:selected num
625941b315baa723493c3d1d
def recurseValidate(doc, doc_class, key, val, attrPath, doc_errors): <NEW_LINE> <INDENT> doc = doc_class(**val) <NEW_LINE> errors = doc.validate() <NEW_LINE> if errors: <NEW_LINE> <INDENT> error = {'attrPath': '.'.join(attrPath), 'fld':key, '_cls': val['_cls'], 'errors': errors} <NEW_LINE> doc_errors.append(error)
this will be called by recursiveDoc function and be executed on each doc/embedded doc
625941b3e1aae11d1e749a5f
def gtri(r, a, b): <NEW_LINE> <INDENT> if r[a] > b: <NEW_LINE> <INDENT> return 1 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return 0
gtri op
625941b3ad47b63b2c509d38
def validate_credentials(data): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> person = data['person'] <NEW_LINE> <DEDENT> except KeyError: <NEW_LINE> <INDENT> raise ValidationError("The key 'person' is not present. Please validate as a coursys user.") <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> secret = data['secret'] <NEW_LINE> <DEDENT> except KeyError: <NEW_LINE> <INDENT> raise ValidationError("The key 'secret' is not present. ") <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> unit = data['unit'] <NEW_LINE> <DEDENT> except KeyError: <NEW_LINE> <INDENT> raise ValidationError("The key 'unit' is not present.") <NEW_LINE> <DEDENT> if _check_token(person, secret, 'problems-token'): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> person = Person.objects.get(userid=person) <NEW_LINE> <DEDENT> except Person.DoesNotExist: <NEW_LINE> <INDENT> person = None <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> unit = Unit.objects.get(label=unit) <NEW_LINE> <DEDENT> except Unit.DoesNotExist: <NEW_LINE> <INDENT> unit = None <NEW_LINE> <DEDENT> if person and unit: <NEW_LINE> <INDENT> return person, unit, 'problems-token' <NEW_LINE> <DEDENT> <DEDENT> if _check_token(person, secret, 'advisor-token'): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> person = Person.objects.get(userid=person) <NEW_LINE> <DEDENT> except Person.DoesNotExist: <NEW_LINE> <INDENT> person = None <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> unit = Unit.objects.get(label=unit) <NEW_LINE> <DEDENT> except Unit.DoesNotExist: <NEW_LINE> <INDENT> unit = None <NEW_LINE> <DEDENT> if person and unit: <NEW_LINE> <INDENT> return person, unit, 'advisor-token' <NEW_LINE> <DEDENT> <DEDENT> raise ValidationError(_token_not_found)
Determine if the data contains a valid user, secret, and unit. If the data doesn't validate, it will throw a "ValidationError".
625941b326068e7796caea83
def toString(s): <NEW_LINE> <INDENT> if isinstance(s, str): <NEW_LINE> <INDENT> return s <NEW_LINE> <DEDENT> if isinstance(s, bytes): <NEW_LINE> <INDENT> if sys.version_info[0] == 2: <NEW_LINE> <INDENT> return str(s) <NEW_LINE> <DEDENT> return s.decode('ascii') <NEW_LINE> <DEDENT> if isinstance(s, list): <NEW_LINE> <INDENT> return [toString(x) for x in s] <NEW_LINE> <DEDENT> if isinstance(s, np.ndarray): <NEW_LINE> <INDENT> return s.astype(str) <NEW_LINE> <DEDENT> return s
This takes care of python2/3 differences
625941b3097d151d1a222c0f
def characters(self, ch): <NEW_LINE> <INDENT> self.characterElementIdx += 1 <NEW_LINE> if self.inIgnorableElement == 0: <NEW_LINE> <INDENT> if self.characterElementIdx not in self.contentBitSet: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> self.html += xmlEncode(str(ch))
generated source for method characters
625941b3435de62698dfda00
def getPosition(poiID): <NEW_LINE> <INDENT> return _getUniversal(tc.VAR_POSITION, poiID)
getPosition(string) -> (double, double) Returns the position coordinates of the given poi.
625941b3d8ef3951e32432e9
def update(self, lang=None): <NEW_LINE> <INDENT> r <NEW_LINE> lang = _get_translate_language(lang) <NEW_LINE> if lang != _get_default_language(): <NEW_LINE> <INDENT> query = models.Q() <NEW_LINE> _translations = [] <NEW_LINE> for address, text in self._get_changed_fields(): <NEW_LINE> <INDENT> query |= models.Q(**address) <NEW_LINE> _translations.append( translations.models.Translation( language=lang, text=text, **address ) ) <NEW_LINE> <DEDENT> _get_translations(query, lang).delete() <NEW_LINE> translations.models.Translation.objects.bulk_create(_translations)
Update the translations of the `Context`\ 's `purview` in a language.
625941b36e29344779a623c3
def __getitem__(self, name): <NEW_LINE> <INDENT> return self._byname[name]
Retrieve entry by relative path. :return: tuple with (ctime, mtime, dev, ino, mode, uid, gid, size, sha, flags)
625941b3cc40096d61595702
def deleteExtraneous(component, ignore_dtstamp=False): <NEW_LINE> <INDENT> for comp in component.components(): <NEW_LINE> <INDENT> deleteExtraneous(comp, ignore_dtstamp) <NEW_LINE> <DEDENT> for line in component.lines(): <NEW_LINE> <INDENT> if line.params.has_key('X-VOBJ-ORIGINAL-TZID'): <NEW_LINE> <INDENT> del line.params['X-VOBJ-ORIGINAL-TZID'] <NEW_LINE> <DEDENT> <DEDENT> if ignore_dtstamp and hasattr(component, 'dtstamp_list'): <NEW_LINE> <INDENT> del component.dtstamp_list
Recursively walk the component's children, deleting extraneous details like X-VOBJ-ORIGINAL-TZID.
625941b34e696a04525c9201
def _filterPitchLabel(self, ticks): <NEW_LINE> <INDENT> post = [] <NEW_LINE> for value, label in ticks: <NEW_LINE> <INDENT> label = _substituteAccidentalSymbols(label) <NEW_LINE> post.append([value, label]) <NEW_LINE> <DEDENT> return post
Given a list of ticks, replace all labels with alternative/unicode symbols where necessary.
625941b3e5267d203edcda4d
def __init__(self): <NEW_LINE> <INDENT> self.val = 0 <NEW_LINE> self.running = True
Initialize the counter to 0, the running-flag to True.
625941b35510c4643540f1a6
def input_fn(data_file, num_epochs, shuffle, batch_size, feature_only=False): <NEW_LINE> <INDENT> assert tf.gfile.Exists(data_file), ( '%s not found. Please make sure you have run data_download.py and ' 'set the --data_dir argument to the correct path.' % data_file) <NEW_LINE> def parse_csv(value): <NEW_LINE> <INDENT> columns = tf.decode_csv(value, record_defaults=_CSV_COLUMN_DEFAULTS) <NEW_LINE> features = dict(zip(_CSV_COLUMNS, columns)) <NEW_LINE> labels = features.pop('r_rank') <NEW_LINE> return features, labels <NEW_LINE> <DEDENT> dataset = tf.data.TextLineDataset(data_file) <NEW_LINE> if shuffle: <NEW_LINE> <INDENT> dataset = dataset.shuffle(buffer_size=_NUM_EXAMPLES['train']) <NEW_LINE> <DEDENT> dataset = dataset.map(parse_csv, num_parallel_calls=16) <NEW_LINE> dataset = dataset.repeat(num_epochs) <NEW_LINE> dataset = dataset.batch(batch_size) <NEW_LINE> iterator = dataset.make_one_shot_iterator() <NEW_LINE> features, labels = iterator.get_next() <NEW_LINE> if feature_only: <NEW_LINE> <INDENT> return features <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return features, labels
Generate an input function for the Estimator.
625941b31d351010ab8558d1
def mask_correlated_samples(self, batch_size): <NEW_LINE> <INDENT> N = 2 * batch_size <NEW_LINE> mask = torch.ones((N, N), dtype=bool) <NEW_LINE> mask = mask.fill_diagonal_(0) <NEW_LINE> for i in range(batch_size): <NEW_LINE> <INDENT> mask[i, batch_size + i] = 0 <NEW_LINE> mask[batch_size + i, i] = 0 <NEW_LINE> <DEDENT> return mask
Mask correlated samples. :param batch_size: batch size of the dataset :type batch_size: int
625941b34d74a7450ccd3f6f
def __init__(self, client_manager): <NEW_LINE> <INDENT> self.client_manager = client_manager <NEW_LINE> self.cinder_client = self.client_manager.get_cinder()
:param client_manager: :return:
625941b30a50d4780f666c3a
def __init__(self, wf, project_name): <NEW_LINE> <INDENT> self.project_name = project_name <NEW_LINE> self.wf = wf <NEW_LINE> self.live_seed_list = None <NEW_LINE> self.test_seed_lists = [] <NEW_LINE> self.subject = None <NEW_LINE> self.html = None <NEW_LINE> self.category = None <NEW_LINE> self.from_line = None <NEW_LINE> self.suppression_file_path = None <NEW_LINE> self.client_id = None <NEW_LINE> self.email_creative_id = None <NEW_LINE> self.__project = None <NEW_LINE> self.seeds_provider = ProviderConfig()
@param wf: Workfront service object @param project_name: that the created will have.
625941b3be8e80087fb209fb
def next_staircase(self, home_floor, dest_floor): <NEW_LINE> <INDENT> if home_floor is None: <NEW_LINE> <INDENT> debug.log('Cannot determine next staircase when home floor is None') <NEW_LINE> return None <NEW_LINE> <DEDENT> if dest_floor is None: <NEW_LINE> <INDENT> debug.log('Cannot determine next staircase when destination floor is None') <NEW_LINE> return None <NEW_LINE> <DEDENT> if home_floor is dest_floor: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> routes = self.routes[home_floor.floor_id] <NEW_LINE> staircase_id = routes.get(dest_floor.floor_id, routes['*']) <NEW_LINE> return self.staircases[staircase_id]
Return the next staircase that any character on one floor (the home floor) should ascend or descend next to reach another floor (the destination floor). :param home_floor: The home floor. :param dest_floor: The destination floor.
625941b3377c676e91271f5e
def surf_dist(pred_seg, gt_seg, sampling=1, connectivity=1): <NEW_LINE> <INDENT> pred_seg = np.atleast_1d(pred_seg.astype(np.bool)) <NEW_LINE> gt_seg = np.atleast_1d(gt_seg.astype(np.bool)) <NEW_LINE> conn = morphology.generate_binary_structure(pred_seg.ndim, connectivity) <NEW_LINE> S = pred_seg ^ morphology.binary_erosion(pred_seg, conn) <NEW_LINE> Sprime = gt_seg ^ morphology.binary_erosion(gt_seg, conn) <NEW_LINE> dta = morphology.distance_transform_edt(~S, sampling) <NEW_LINE> dtb = morphology.distance_transform_edt(~Sprime, sampling) <NEW_LINE> sds = np.concatenate([np.ravel(dta[Sprime != 0]), np.ravel(dtb[S != 0])]) <NEW_LINE> return sds
from https://mlnotebook.github.io/post/surface-distance-function/ Calculates and returns the surface distance between the Ground Truth segmentation and the predicted one. The surface distance is a vector with length as len(contour(pred_seg)) that indicates for every pixel on the contour, its distance from the closest pixel on the contour of the ground truth in euclidean norm. :param pred_seg: the segmentation that has been created :param gt_seg: the GT segmentation against which we wish to compare pred_seg :param sampling: the pixel resolution or pixel size. This is entered as an n-vector where n is equal to the number of dimensions in the segmentation i.e. 2D or 3D. The default value is 1 which means pixels (or rather voxels) are 1 x 1 x 1 mm in size :param connectivity: creates either a 2D (3 x 3) or 3D (3 x 3 x 3) matrix defining the neighbourhood around which the function looks for neighbouring pixels. Typically, this is defined as a six-neighbour kernel which is the default behaviour of this function. :return: surface distance vector
625941b350812a4eaa59c0d2
def turn_off(self, **kwargs): <NEW_LINE> <INDENT> for _ in range(self.signal_repetitions): <NEW_LINE> <INDENT> self.tellstick_device.turn_off() <NEW_LINE> <DEDENT> self._brightness = 0 <NEW_LINE> self.update_ha_state()
Turn the switch off.
625941b307f4c71912b11232
def create_prime_iterator(rfrom, rto): <NEW_LINE> <INDENT> prefix = [2] if rfrom < 3 and rto > 1 else [] <NEW_LINE> odd_rfrom = 3 if rfrom < 3 else make_odd(rfrom) <NEW_LINE> odd_numbers = (num for num in xrange(odd_rfrom, rto + 1, 2)) <NEW_LINE> prime_generator = (num for num in odd_numbers if not has_odd_divisor(num)) <NEW_LINE> return itertools.chain(prefix, prime_generator)
Create iterator of prime numbers in range [rfrom, rto]
625941b34f88993c3716be21
def find_device_ids(self): <NEW_LINE> <INDENT> device_info = str(sh.xinput('list', '--short')) <NEW_LINE> id_pattern = r'id=(\d+)' <NEW_LINE> xtest_id_pattern = r'XTEST[^\n]+id=(\d+)' <NEW_LINE> device_ids = list(set(re.findall(id_pattern, device_info)).difference( set(re.findall(xtest_id_pattern, device_info)))) <NEW_LINE> return device_ids
:return: list of all device ids from xinput, excluding XTEST devices.
625941b326068e7796caea84
def op_tolower(string): <NEW_LINE> <INDENT> return string.lower()
Lowercases string
625941b3e64d504609d745ec
def cluster(x, max_num_clusters=3): <NEW_LINE> <INDENT> data = _transform_data(x) <NEW_LINE> centroids = _apply_clustering(data, max_num_clusters) <NEW_LINE> centroids = np.append(centroids, 0) <NEW_LINE> centroids = np.round(centroids).astype(np.int32) <NEW_LINE> centroids = np.unique(centroids) <NEW_LINE> return centroids
Applies clustering on reduced data, i.e. data where power is greater than threshold. Parameters ---------- X : pd.Series or single-column pd.DataFrame max_num_clusters : int Returns ------- centroids : ndarray of int32s Power in different states of an appliance, sorted
625941b397e22403b379cd45
def __init__(self, session): <NEW_LINE> <INDENT> self.session = session <NEW_LINE> tkutil.Dialog.__init__(self, session.tk, 'Open & Reload Multiple Spectra') <NEW_LINE> proj = session.project <NEW_LINE> mfs = tkutil.multiple_file_selection(self.top, proj.sparky_directory) <NEW_LINE> mfs.frame.pack(side = 'top', anchor = 'nw', fill = 'both', expand = 1) <NEW_LINE> self.files = mfs <NEW_LINE> r = Tkinter.Label(self.top, justify = 'left') <NEW_LINE> r.pack(side = 'top', anchor = 'nw') <NEW_LINE> self.result = r <NEW_LINE> br = tkutil.button_row(self.top, ('Open', self.open_cb), ('Reload', self.reload_spectra), ('Cancel', self.close_cb), ('Help', sputil.help_cb(session, 'OpenAutoreloadSpectra')), ) <NEW_LINE> br.frame.pack(side = 'top', anchor = 'nw') <NEW_LINE> self.paths = [] <NEW_LINE> self.spectra = [] <NEW_LINE> for v in proj.view_list(): <NEW_LINE> <INDENT> self.paths.append(v.spectrum.save_path) <NEW_LINE> self.spectra.append(v.spectrum)
Initialization
625941b376d4e153a657e8dc
def load(self, request): <NEW_LINE> <INDENT> assignments = request.FILES <NEW_LINE> reader = csv.reader(assignments['csv']) <NEW_LINE> def get_model(model, name, cache): <NEW_LINE> <INDENT> name = name.strip() <NEW_LINE> if not name in cache: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> cache[name] = model.objects.get(name=name) <NEW_LINE> <DEDENT> except model.DoesNotExist: <NEW_LINE> <INDENT> cache[name] = name <NEW_LINE> <DEDENT> <DEDENT> return cache[name] <NEW_LINE> <DEDENT> def generate_assignments(reader): <NEW_LINE> <INDENT> committees = {} <NEW_LINE> countries = {} <NEW_LINE> schools = {} <NEW_LINE> for row in reader: <NEW_LINE> <INDENT> if (row[0]=='School' and row[1]=='Committee' and row[2]=='Country'): <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> while len(row) < 3: <NEW_LINE> <INDENT> row.append("") <NEW_LINE> <DEDENT> if len(row) < 4: <NEW_LINE> <INDENT> rejected = False <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> rejected = (row[3].lower() == 'true') <NEW_LINE> <DEDENT> committee = get_model(Committee, row[1], committees) <NEW_LINE> country = get_model(Country, row[2], countries) <NEW_LINE> school = get_model(School, row[0], schools) <NEW_LINE> yield (committee, country, school, rejected) <NEW_LINE> <DEDENT> <DEDENT> failed_rows = Assignment.update_assignments(generate_assignments(reader)) <NEW_LINE> if failed_rows: <NEW_LINE> <INDENT> messages.error(request, html.format_html('Assignment upload aborted. These assignments failed:<br/>' + '<br/>'.join(failed_rows))) <NEW_LINE> <DEDENT> return HttpResponseRedirect(reverse('admin:core_assignment_changelist'))
Loads new Assignments.
625941b330bbd722463cbb6f
def find_or_create(stimulus, listnum, m, sd, ratings, responses, subjects): <NEW_LINE> <INDENT> obj = None <NEW_LINE> sMsg = "" <NEW_LINE> oErr = ErrHandle() <NEW_LINE> try: <NEW_LINE> <INDENT> obj = Brysbaert.objects.filter(stimulus=stimulus).first() <NEW_LINE> if obj == None: <NEW_LINE> <INDENT> obj = Brysbaert(stimulus=stimulus, list=listnum, m=m, sd=sd, ratings=ratings, responses=responses, subjects=subjects) <NEW_LINE> obj.save() <NEW_LINE> <DEDENT> <DEDENT> except: <NEW_LINE> <INDENT> sMsg = oErr.get_error_message() <NEW_LINE> oErr.DoError("find_or_create") <NEW_LINE> <DEDENT> return obj, sMsg
Find existing or create new item
625941b3f548e778e58cd327
def json_gen( tokens, f , first=None): <NEW_LINE> <INDENT> def get_block(tokens, start, end): <NEW_LINE> <INDENT> level = 1 <NEW_LINE> out = start <NEW_LINE> while level > 0: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> t = tokens.next() <NEW_LINE> out += t[1] <NEW_LINE> if t[1] == start: <NEW_LINE> <INDENT> level +=1 <NEW_LINE> <DEDENT> elif t[1] == end: <NEW_LINE> <INDENT> level -=1 <NEW_LINE> <DEDENT> <DEDENT> except tokenize.TokenError as e: <NEW_LINE> <INDENT> logging.error("%s::%s" % (type(e),e )); <NEW_LINE> s = f.read(1) <NEW_LINE> if s: <NEW_LINE> <INDENT> raise e <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise StopIteration ("reached EOF") <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return out <NEW_LINE> <DEDENT> def get_next(t, tokens): <NEW_LINE> <INDENT> if t is None: <NEW_LINE> <INDENT> raise StopIteration <NEW_LINE> <DEDENT> elif t[1] == "{": <NEW_LINE> <INDENT> return get_block(tokens, "{", "}") <NEW_LINE> <DEDENT> elif t[1] == "[": <NEW_LINE> <INDENT> return get_block(tokens, "[", "]") <NEW_LINE> <DEDENT> elif t[0] is tokenize.NUMBER or t[0] is tokenize.STRING or t[0] is tokenize.NAME: <NEW_LINE> <INDENT> return t[1] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return get_next( get_first(tokens,f ), tokens) <NEW_LINE> <DEDENT> <DEDENT> if first: <NEW_LINE> <INDENT> yield get_next(first, tokens) <NEW_LINE> <DEDENT> for t in tokens: <NEW_LINE> <INDENT> yield get_next(t, tokens)
Generator of json objects
625941b32eb69b55b151c657
def __call__(self,request,*args,**kwargs): <NEW_LINE> <INDENT> self._template = None <NEW_LINE> self.response = HttpResponse() <NEW_LINE> self.request = request <NEW_LINE> self.report = Report() <NEW_LINE> self.cookies = CookieHandler(self) <NEW_LINE> self.init(*args,**kwargs) <NEW_LINE> getattr(self,'post' if self.request.method=='POST' else 'get')(*args,**kwargs) <NEW_LINE> if self.template: <NEW_LINE> <INDENT> self.response.write(self.template.render()) <NEW_LINE> <DEDENT> return self.response
Works when controller executes mapped subclass
625941b30a50d4780f666c3b
def shortest_path(source, target): <NEW_LINE> <INDENT> explored = set() <NEW_LINE> start = Node(state = source, parent = None, action = None) <NEW_LINE> frontier = QueueFrontier() <NEW_LINE> frontier.add(start) <NEW_LINE> while True: <NEW_LINE> <INDENT> if frontier.empty(): <NEW_LINE> <INDENT> raise Exception("No solution.") <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> node = frontier.remove() <NEW_LINE> if node.state == target: <NEW_LINE> <INDENT> n = node <NEW_LINE> path = [] <NEW_LINE> while n.parent is not None: <NEW_LINE> <INDENT> path.append((n.action, n.state)) <NEW_LINE> n = n.parent <NEW_LINE> <DEDENT> path.reverse() <NEW_LINE> return path <NEW_LINE> <DEDENT> explored.add(node.state) <NEW_LINE> for neighbor in neighbors_for_person(node.state): <NEW_LINE> <INDENT> neighbor_node = Node(neighbor[1], node, neighbor[0]) <NEW_LINE> if neighbor_node not in explored: <NEW_LINE> <INDENT> frontier.add(neighbor_node) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> raise NotImplementedError
Returns the shortest list of (movie_id, person_id) pairs that connect the source to the target. If no possible path, returns None.
625941b30383005118ecf391
def update(self, host, values): <NEW_LINE> <INDENT> return self._update("/os-hosts/%s" % host, values)
Update status or maintenance mode for the host.
625941b3b7558d58953c4cca
def classify(self, x): <NEW_LINE> <INDENT> y = np.sign(x.dot(self.w.T)) <NEW_LINE> return y
classify a dataset using the internal PLA weights x - data set returns the classifification
625941b3507cdc57c6306a7e
def E_MeV_at_reference_depth_cm(energy_MeV): <NEW_LINE> <INDENT> E_MeV_at_2cm = { 70: 48, 150: 138, 226: 215 } <NEW_LINE> return E_MeV_at_2cm[energy_MeV]
Geant4-calculated depth at 2 cm water depth
625941b363d6d428bbe442a3
def _create_database_artifacts(self): <NEW_LINE> <INDENT> pass
Dummy placeholder. Nothing to do.
625941b38e7ae83300e4ad7c
def __init__(self, longueur, largeur): <NEW_LINE> <INDENT> self.longueur = longueur <NEW_LINE> self.largeur = largeur <NEW_LINE> self.liste_objets = self.generer_liste_objets() <NEW_LINE> types = self.generer_liste_types() <NEW_LINE> self.map = self._generer_map()
constructeur
625941b3d6c5a10208143df3
def prompt(object): <NEW_LINE> <INDENT> return object.get_name() + ":" + object.get_responder_name() + "> "
ピティナのプロンプトを作る関数 戻り値 'Ptnaオブジェクト名:応答オブジェクト名 > '
625941b3f8510a7c17cf94b2
def eth_getFilterChanges(self, filter_id): <NEW_LINE> <INDENT> return self.web3.eth.getFilterChanges(filter_id)
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getfilterchanges http://web3py.readthedocs.io/en/latest/web3.eth.html#web3.eth.Eth.getFilterChanges
625941b37b180e01f3dc45b5
def HR(p_c, T = 293.15): <NEW_LINE> <INDENT> return exp( p_c / (rho_liq*Rv*T) )
Clausius-Clapeyron formula: returns the value of relative humidity as a function of the capillary pressure and temperature input : capillary pressure p_c [Pa], temperature T [K]
625941b31b99ca400220a85d
def images(self, **kwargs): <NEW_LINE> <INDENT> path = self._get_id_path('images') <NEW_LINE> response = self._GET(path, kwargs) <NEW_LINE> self._set_attrs_to_values(response) <NEW_LINE> return response
Get the images for a specific person id. Returns: A dict respresentation of the JSON returned from the API.
625941b33617ad0b5ed67cab
def numeric(self, source, target, numeric, args, message): <NEW_LINE> <INDENT> if numeric == 1: <NEW_LINE> <INDENT> for ircchannel in self.ircchannels: <NEW_LINE> <INDENT> self.fire(JOIN(ircchannel)) <NEW_LINE> <DEDENT> <DEDENT> elif numeric == 433: <NEW_LINE> <INDENT> self.nick = newnick = "%s_" % self.nick <NEW_LINE> self.fire(NICK(newnick))
numeric Event This event is triggered by the ``IRC`` Protocol Component when we have received an IRC Numberic Event from server we are connected to.
625941b376e4537e8c351423
def freq(self, freq : int = None)-> None: <NEW_LINE> <INDENT> ...
用于获取或者设置 PWM 对象的频率,频率的范围为 [1, 156250]。如果参数为空,返回当前 PWM 对象的频率;如果参数非空,则使用该参数设置当前 PWM 对象的频率。
625941b3925a0f43d2549c1f
def scale_vecs(vecdict): <NEW_LINE> <INDENT> return [scalar_mul(value, 1/key) for (key,value) in vecdict.items()]
>>> v1 = Vec({1,2,3}, {2: 9}) >>> v2 = Vec({1,2,4}, {1: 1, 2: 2, 4: 8}) >>> scale_vecs({3: v1, 5: v2}) == [Vec({1,2,3},{2: 3.0}), Vec({1,2,4},{1: 0.2, 2: 0.4, 4: 1.6})] True
625941b31f037a2d8b945fac
def get_consumers(self, Consumer, channel): <NEW_LINE> <INDENT> return [ Consumer( queues=self.queue, callbacks=[self.on_message], accept=[self.message_default_format], prefetch_count=REANA_JOB_STATUS_CONSUMER_PREFETCH_COUNT if REANA_JOB_STATUS_CONSUMER_PREFETCH_COUNT else None, ) ]
Implement providing kombu.Consumers with queues/callbacks.
625941b30c0af96317bb7f96
def isNotInStr(search , content): <NEW_LINE> <INDENT> if isNotNone(search) and isStr(content): <NEW_LINE> <INDENT> if (len(content) >= len(search)): <NEW_LINE> <INDENT> return (search not in content) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> print('WRONG INPUT FOR [isNotInStr]') <NEW_LINE> return False
This function a string dos not contain a subsequence. A subsequence could be a char, string, or a value! :param search: string search element :param content: string content element
625941b356ac1b37e6263f8f
def test_users_enrolled_in_active_only(self): <NEW_LINE> <INDENT> CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id, is_active=True) <NEW_LINE> CourseEnrollmentFactory.create(user=self.user_2, course_id=self.course.id, is_active=False) <NEW_LINE> active_enrolled_users = list(CourseEnrollment.objects.users_enrolled_in(self.course.id)) <NEW_LINE> self.assertEqual([self.user], active_enrolled_users)
CourseEnrollment.users_enrolled_in should return only Users with active enrollments when `include_inactive` has its default value (False).
625941b301c39578d7e74bf1
def _sleep_timer(self): <NEW_LINE> <INDENT> if self.EngineModesManager.get_mic_mode() == "on": <NEW_LINE> <INDENT> self.EngineModesManager.set_mic_mode("sleeping")
Puts microphone to sleep if "on" via sleep_timer callback every x seconds
625941b399fddb7c1c9de148
def generate_token(): <NEW_LINE> <INDENT> credentials = oauth2.SpotifyClientCredentials( client_id='37373b59fc3442f88b4880d7bcaff6ea', client_secret='2edca93ea10e41c8b7601d693acad192') <NEW_LINE> token = credentials.get_access_token() <NEW_LINE> return token
Generate the token. Please respect these credentials :)
625941b3bf627c535bc12f84
def ask_if_true(self, query): <NEW_LINE> <INDENT> for _ in self.ask_generator(query): <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> return False
Return True if the KB entails query, else return False.
625941b3cad5886f8bd26d8f
def set_etc_hosts_address(hostname, ip): <NEW_LINE> <INDENT> def read_file(path): <NEW_LINE> <INDENT> with open(path) as file: <NEW_LINE> <INDENT> contents = file.read() <NEW_LINE> <DEDENT> return contents <NEW_LINE> <DEDENT> def write_etc_hosts(text): <NEW_LINE> <INDENT> assert text.strip() <NEW_LINE> with open('/etc/hosts', 'w') as hosts_file: <NEW_LINE> <INDENT> hosts_file.write(text) <NEW_LINE> <DEDENT> <DEDENT> def edit_text(): <NEW_LINE> <INDENT> hostname_found = False <NEW_LINE> newlines = [] <NEW_LINE> for line in oldlines: <NEW_LINE> <INDENT> parts = line.split() <NEW_LINE> if hostname in parts: <NEW_LINE> <INDENT> parts[0] = ip <NEW_LINE> hostname_found = True <NEW_LINE> <DEDENT> line = ' '.join(parts) <NEW_LINE> log.debug('new line: {}'.format(line)) <NEW_LINE> newlines.append(line) <NEW_LINE> <DEDENT> if not hostname_found: <NEW_LINE> <INDENT> line = '{} {}'.format(ip, hostname) <NEW_LINE> newlines.append(line) <NEW_LINE> <DEDENT> newtext = '\n'.join(newlines).strip() + '\n' <NEW_LINE> log.debug('new text:\n{}'.format(newtext)) <NEW_LINE> return newtext <NEW_LINE> <DEDENT> require_user('root') <NEW_LINE> oldlines = read_file('/etc/hosts').strip().split('\n') <NEW_LINE> log.debug('old /etc/hosts:\n{}'.format('\n'.join(oldlines))) <NEW_LINE> newtext = edit_text() <NEW_LINE> assert newtext <NEW_LINE> write_etc_hosts(newtext) <NEW_LINE> assert read_file('/etc/hosts') == newtext
Set host address in /etc/hosts from device address.
625941b307d97122c4178637
def _csr_begin(self): <NEW_LINE> <INDENT> assert 0 <= self._slot <= 4, "Provided slot must be between 0 and 4." <NEW_LINE> self._key = bytearray(64) <NEW_LINE> if self.private_key: <NEW_LINE> <INDENT> self._atecc.gen_key(self._key, self._slot, self.private_key) <NEW_LINE> return <NEW_LINE> <DEDENT> self._atecc.gen_key(self._key, self._slot, self.private_key)
Initializes CSR generation.
625941b310dbd63aa1bd295e
def ComputeCG(self,var): <NEW_LINE> <INDENT> Utw = self.basis.T * self.atoms.masses() <NEW_LINE> return 2.0 / self.box * np.dot(Utw,var)
Computes CG momenta or positions CG = U^t * Mass * var var could be atomic positions or velocities
625941b3d10714528d5ffa93
def test_office_with_invalid_Keys(self): <NEW_LINE> <INDENT> response = self.app_test_client.post('/api/v1/office/add',json=self.invalid_keys) <NEW_LINE> self.assertEqual(response.status_code,400)
test adding office with an invalid key
625941b3925a0f43d2549c20
def find_translation(self, translated_polyhedron): <NEW_LINE> <INDENT> no_translation_exception = ValueError('polyhedron is not a translation of self') <NEW_LINE> if ( set(self.rays()) != set(translated_polyhedron.rays()) or set(self.lines()) != set(translated_polyhedron.lines()) or self.n_vertices() != translated_polyhedron.n_vertices() ): <NEW_LINE> <INDENT> raise no_translation_exception <NEW_LINE> <DEDENT> sorted_vertices = sorted(map(vector, self.vertices())) <NEW_LINE> sorted_translated_vertices = sorted(map(vector, translated_polyhedron.vertices())) <NEW_LINE> v = sorted_translated_vertices[0] - sorted_vertices[0] <NEW_LINE> if any(vertex+v != translated_vertex for vertex, translated_vertex in zip(sorted_vertices, sorted_translated_vertices)): <NEW_LINE> <INDENT> raise no_translation_exception <NEW_LINE> <DEDENT> return v
Return the translation vector to ``translated_polyhedron``. INPUT: - ``translated_polyhedron`` -- a polyhedron. OUTPUT: A `\ZZ`-vector that translates ``self`` to ``translated_polyhedron``. A ``ValueError`` is raised if ``translated_polyhedron`` is not a translation of ``self``, this can be used to check that two polyhedra are not translates of each other. EXAMPLES:: sage: X = polytopes.n_cube(3) sage: X.find_translation(X + vector([2,3,5])) (2, 3, 5) sage: X.find_translation(2*X) Traceback (most recent call last): ... ValueError: polyhedron is not a translation of self
625941b3236d856c2ad4458d
def shape(self, original_shape): <NEW_LINE> <INDENT> shape = original_shape <NEW_LINE> for processor in self._stack: <NEW_LINE> <INDENT> shape = processor.shape(shape) <NEW_LINE> <DEDENT> return shape
Return output shape of state Args: original_shape: tuple containing original state Returns: tuple containing processed state shape
625941b315baa723493c3d1f
def canonicalize_bond(arr): <NEW_LINE> <INDENT> container_type = type(arr) <NEW_LINE> if len(arr) == 0: <NEW_LINE> <INDENT> raise ValueError("zero sized array") <NEW_LINE> <DEDENT> elif len(arr) == 1: <NEW_LINE> <INDENT> return arr <NEW_LINE> <DEDENT> elif arr[0] > arr[-1]: <NEW_LINE> <INDENT> return container_type(reversed(arr)) <NEW_LINE> <DEDENT> elif arr[0] == arr[-1]: <NEW_LINE> <INDENT> raise ValueError("Invalid bond with first and last indices equal") <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return arr
Canonicalize a bonded interaction. If arr[0] < arr[-1] then arr is returned, else if arr[0] > arr[-1], then arr[::-1] is returned. If arr[0] == arr[-1] then an exception is thrown. Parameters ---------- arr: list of int Bond indices. Returns ------- arr: list of int Canonicalized bond indices.
625941b3ad47b63b2c509d3a
def add_parent(self, id, tob, mate_id=-1, mate_tob=-1): <NEW_LINE> <INDENT> if mate_id == -1: <NEW_LINE> <INDENT> self.action_history.append(np.array([id, tob])) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.action_history.append(np.array([[id, tob], [mate_id, mate_tob]]))
Add parent information to logs
625941b33539df3088e2e0f9
def protocol_0501(abf): <NEW_LINE> <INDENT> assert isinstance(abf, pyabf.ABF) <NEW_LINE> timeSec1, timeSec2 = 1.10, 1.30 <NEW_LINE> p1, p2 = int(timeSec1*abf.dataRate), int(timeSec2*abf.dataRate) <NEW_LINE> plotFigNew(abf) <NEW_LINE> shadeDigitalOutput(abf, 4) <NEW_LINE> for sweep in abf.sweepList: <NEW_LINE> <INDENT> abf.setSweep(sweep) <NEW_LINE> abf.sweepY[:p1] = np.nan <NEW_LINE> abf.sweepY[p2:] = np.nan <NEW_LINE> plt.plot(abf.sweepX, abf.sweepY, alpha=.2, color='.5') <NEW_LINE> <DEDENT> avg = pyabf.sweep.averageTrace(abf, timeSec1=timeSec1, timeSec2=timeSec2) <NEW_LINE> abf.sweepY *= np.nan <NEW_LINE> abf.sweepY[p1:p2] = avg <NEW_LINE> plt.plot(abf.sweepX, abf.sweepY) <NEW_LINE> plotFigSave(abf, tag="opto-avg", labelAxes=True) <NEW_LINE> plotFigNew(abf) <NEW_LINE> shadeDigitalOutput(abf, 4) <NEW_LINE> vertOffset = False <NEW_LINE> for sweep in abf.sweepList: <NEW_LINE> <INDENT> abf.setSweep(sweep) <NEW_LINE> if not vertOffset: <NEW_LINE> <INDENT> vertOffset = np.max(abf.sweepY[p1:p2]) - np.min(abf.sweepY[p1:p2]) <NEW_LINE> vertOffset *= 1.2 <NEW_LINE> <DEDENT> plt.plot(abf.sweepX[p1:p2], abf.sweepY[p1:p2] + vertOffset*sweep, color='b', alpha=.7) <NEW_LINE> <DEDENT> plotFigSave(abf, tag="opto-stacked", labelAxes=True) <NEW_LINE> return
0501 opto -50.pro
625941b3a79ad161976cbef3
def detail_with_a_future_question(self): <NEW_LINE> <INDENT> future_poll = create_poll(poll_text="Future poll", days=5) <NEW_LINE> response = self.client.get( reverse('polls:detail'), args=(future_poll.id,)) <NEW_LINE> self.assertEqual(response.status_code, 404)
The detail view of a poll with `publication_date` in the future should return 404.
625941b3d7e4931a7ee9dcc9
def upload_config(config, config_folder_names=[], config_files={}): <NEW_LINE> <INDENT> names = config_folder_names + config_files.keys() <NEW_LINE> ctx = dict(map(lambda name: (name, '%s/%s' % (config.path, name)), names)) <NEW_LINE> tmpfolder = mkdtemp() <NEW_LINE> listify = lambda what: what if isinstance(what, list) else [what] <NEW_LINE> for folder_name in config_folder_names: <NEW_LINE> <INDENT> setattr(config, folder_name, listify(getattr(config, folder_name))) <NEW_LINE> <DEDENT> for folder_name in config_folder_names: <NEW_LINE> <INDENT> local('mkdir %s/%s' % (tmpfolder, folder_name)) <NEW_LINE> <DEDENT> def copyfolder(folder, what): <NEW_LINE> <INDENT> if not os.path.exists(folder): <NEW_LINE> <INDENT> os.makedirs(folder) <NEW_LINE> <DEDENT> with settings(warn_only=True): <NEW_LINE> <INDENT> local('cp -r %(folder)s/* %(tmpfolder)s/%(what)s' % dict( folder=folder, tmpfolder=tmpfolder, what=what)) <NEW_LINE> <DEDENT> <DEDENT> for what in config_folder_names: <NEW_LINE> <INDENT> map(lambda f: copyfolder(f, what), getattr(config, what)) <NEW_LINE> <DEDENT> folder_paths = " ".join(map(lambda folder_name: "./%s" % folder_name, config_folder_names)) <NEW_LINE> local('cd %s && tar -f config_dir.tgz -cz %s' % (tmpfolder, folder_paths)) <NEW_LINE> with settings(warn_only=True): <NEW_LINE> <INDENT> map(lambda what: sudo("rm -rf '%s'" % ctx[what]), ctx.keys()) <NEW_LINE> <DEDENT> put('%s/config_dir.tgz' % tmpfolder, config.path, use_sudo=True) <NEW_LINE> with cd(config.path): <NEW_LINE> <INDENT> sudo('tar -xf config_dir.tgz') <NEW_LINE> <DEDENT> for file, contents in config_files.iteritems(): <NEW_LINE> <INDENT> files.append(ctx[file], contents, use_sudo=True)
Common code to upload puppet and chef config files to remote server. Heavily based on upload procedure from fabric-provision: https://github.com/caffeinehit/fabric-provision/blob/master/provision/__init__.py
625941b36e29344779a623c4
def render_diff(a, b, n=3): <NEW_LINE> <INDENT> actions = [] <NEW_LINE> chunks = [] <NEW_LINE> for group in SequenceMatcher(None, a, b).get_grouped_opcodes(n): <NEW_LINE> <INDENT> old_line, old_end, new_line, new_end = group[0][1], group[-1][2], group[0][3], group[-1][4] <NEW_LINE> lines = [] <NEW_LINE> def add_line(old_lineno, new_lineno, action, line): <NEW_LINE> <INDENT> actions.append(action) <NEW_LINE> lines.append({ 'old_lineno': old_lineno, 'new_lineno': new_lineno, 'action': action, 'line': line, 'no_newline': not line.endswith(b'\n') }) <NEW_LINE> <DEDENT> chunks.append(lines) <NEW_LINE> for tag, i1, i2, j1, j2 in group: <NEW_LINE> <INDENT> if tag == 'equal': <NEW_LINE> <INDENT> for c, line in enumerate(a[i1:i2]): <NEW_LINE> <INDENT> add_line(i1+c, j1+c, 'unmod', e(line)) <NEW_LINE> <DEDENT> <DEDENT> elif tag == 'insert': <NEW_LINE> <INDENT> for c, line in enumerate(b[j1:j2]): <NEW_LINE> <INDENT> add_line(None, j1+c, 'add', e(line)) <NEW_LINE> <DEDENT> <DEDENT> elif tag == 'delete': <NEW_LINE> <INDENT> for c, line in enumerate(a[i1:i2]): <NEW_LINE> <INDENT> add_line(i1+c, None, 'del', e(line)) <NEW_LINE> <DEDENT> <DEDENT> elif tag == 'replace': <NEW_LINE> <INDENT> for c, line in enumerate(a[i1:i2]): <NEW_LINE> <INDENT> add_line(i1+c, None, 'del', e(line)) <NEW_LINE> <DEDENT> for c, line in enumerate(b[j1:j2]): <NEW_LINE> <INDENT> add_line(None, j1+c, 'add', e(line)) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> raise AssertionError('unknown tag %s' % tag) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return actions.count('add'), actions.count('del'), chunks
Parse the diff an return data for the template.
625941b34e696a04525c9203
def _ComputeDiskStatusInner(self, instance, snode_uuid, node_uuid2name_fn, dev): <NEW_LINE> <INDENT> drbd_info = None <NEW_LINE> if dev.dev_type in constants.DTS_DRBD: <NEW_LINE> <INDENT> if dev.logical_id[0] == instance.primary_node: <NEW_LINE> <INDENT> snode_uuid = dev.logical_id[1] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> snode_uuid = dev.logical_id[0] <NEW_LINE> <DEDENT> drbd_info = { "primary_node": node_uuid2name_fn(instance.primary_node), "primary_minor": dev.logical_id[3], "secondary_node": node_uuid2name_fn(snode_uuid), "secondary_minor": dev.logical_id[4], "port": dev.logical_id[2], "secret": dev.logical_id[5], } <NEW_LINE> <DEDENT> dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node, instance, dev) <NEW_LINE> dev_sstatus = self._ComputeBlockdevStatus(snode_uuid, instance, dev) <NEW_LINE> if dev.children: <NEW_LINE> <INDENT> dev_children = map(compat.partial(self._ComputeDiskStatusInner, instance, snode_uuid, node_uuid2name_fn), dev.children) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> dev_children = [] <NEW_LINE> <DEDENT> return { "iv_name": dev.iv_name, "dev_type": dev.dev_type, "logical_id": dev.logical_id, "drbd_info": drbd_info, "pstatus": dev_pstatus, "sstatus": dev_sstatus, "children": dev_children, "mode": dev.mode, "size": dev.size, "spindles": dev.spindles, "name": dev.name, "uuid": dev.uuid, }
Compute block device status. @attention: The device has to be annotated already.
625941b36fece00bbac2d4e8
def updateRange(service, SpreadsheetId, SheetName, req_range, sheetData): <NEW_LINE> <INDENT> requestBody = {'values': sheetData} <NEW_LINE> a1Note = "'{0}'!{1}".format(SheetName, req_range) <NEW_LINE> spreadServ = service.spreadsheets().values() <NEW_LINE> returnedRange = spreadServ.update( spreadsheetId=SpreadsheetId, range=a1Note, body=requestBody, valueInputOption="USER_ENTERED").execute() <NEW_LINE> if not returnedRange: <NEW_LINE> <INDENT> raise RangeNotUpdatedError(returnedRange) <NEW_LINE> <DEDENT> return returnedRange
Update
625941b326238365f5f0ec16
def __str__(self): <NEW_LINE> <INDENT> return self.text[:50] + "..."
Devolve uma representação em string do modelo
625941b330bbd722463cbb70
def clean_logs(self): <NEW_LINE> <INDENT> logger.info("Cleaning logs") <NEW_LINE> restart = False <NEW_LINE> if self.running: <NEW_LINE> <INDENT> logger.warn("The cluster needs to be stopped before cleaning.") <NEW_LINE> self.stop() <NEW_LINE> restart = True <NEW_LINE> <DEDENT> action = Remote("rm -f " + self.logs_file, self.hosts) <NEW_LINE> action.run() <NEW_LINE> if restart: <NEW_LINE> <INDENT> self.start()
Remove all MongoDB logs.
625941b34527f215b584c212
def test_get_statistics_top_country_empty(self): <NEW_LINE> <INDENT> top_country_name_empty = InstallationStatistics.get_statistics_top_country([]) <NEW_LINE> self.assertEqual(top_country_name_empty, '')
Test get_statistics_top_country method of the Installation statistics with an empty parameter.
625941b3377c676e91271f60
def getLayoutNames(self): <NEW_LINE> <INDENT> if self._db == '': <NEW_LINE> <INDENT> raise FMError('No database was selected') <NEW_LINE> <DEDENT> request = [] <NEW_LINE> request.append(uu({'-db': self._db})) <NEW_LINE> request.append(uu({'-layoutnames': ''})) <NEW_LINE> result = self._doRequest(request) <NEW_LINE> result = FMResultset.FMResultset(result) <NEW_LINE> layoutNames = [] <NEW_LINE> for layoutName in result.resultset: <NEW_LINE> <INDENT> layoutNames.append(string.lower(layoutName['LAYOUT_NAME'])) <NEW_LINE> <DEDENT> return layoutNames
This function returns the list of layouts for the current db.
625941b3b545ff76a8913bce
def twoSum(self, nums, target): <NEW_LINE> <INDENT> sortNum = sorted( nums ) <NEW_LINE> i, j = 0, len(nums) - 1 <NEW_LINE> while i < j: <NEW_LINE> <INDENT> temp = sortNum[i] + sortNum[j] <NEW_LINE> if temp < target: <NEW_LINE> <INDENT> i += 1 <NEW_LINE> <DEDENT> elif temp > target: <NEW_LINE> <INDENT> j -= 1 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if sortNum[i] != sortNum[j]: <NEW_LINE> <INDENT> return sorted([ nums.index(sortNum[i]), nums.index(sortNum[j])]) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return sorted([idx for idx in range( len(nums)) if nums[idx] == sortNum[i]])
:type nums: List[int] :type target: int :rtype: List[int]
625941b350485f2cf553cb46
def _validate_metric_name(name): <NEW_LINE> <INDENT> if not _VALID_PARAM_AND_METRIC_NAMES.match(name): <NEW_LINE> <INDENT> raise Exception("Invalid metric name: '%s'. %s" % (name, _BAD_CHARACTERS_MESSAGE)) <NEW_LINE> <DEDENT> if _path_not_unique(name): <NEW_LINE> <INDENT> raise Exception("Invalid metric name: '%s'. %s" % (name, _bad_path_message(name)))
Check that `name` is a valid metric name and raise an exception if it isn't.
625941b38da39b475bd64d24
def find(mal, regex, filtering='all', extra=False, user=None): <NEW_LINE> <INDENT> items = mal.find(regex, extra=extra, user=user) <NEW_LINE> if len(items) == 0: <NEW_LINE> <INDENT> print(color.colorize("No matches in list ᕙ(⇀‸↼‶)ᕗ", 'red')) <NEW_LINE> return <NEW_LINE> <DEDENT> if filtering != 'all': <NEW_LINE> <INDENT> items = [x for x in items if x['status_name'] == filtering] <NEW_LINE> <DEDENT> n_items = color.colorize(str(len(items)), 'cyan', 'underline') <NEW_LINE> print("Matched {} items:".format(n_items)) <NEW_LINE> sorted_items = sorted(items, key=itemgetter('status'), reverse=True) <NEW_LINE> for index, item in enumerate(sorted_items): <NEW_LINE> <INDENT> anime_pprint(index + 1, item, extra=extra)
Find all anime in a certain status given a regex.
625941b3baa26c4b54cb0ed2
def findshift(imagenum1, imagenum2, paramlog): <NEW_LINE> <INDENT> match=paramlog[paramlog['Filenumber']==imagenum1] <NEW_LINE> if len(match)==1: <NEW_LINE> <INDENT> fname1=match.iloc[0]['Filename'].replace('.sem','.jpg') <NEW_LINE> image1=Image.open(fname1) <NEW_LINE> imshiftx1=match.iloc[0]['ImageshiftX'] <NEW_LINE> imshifty1=match.iloc[0]['ImageshiftY'] <NEW_LINE> FOV=match.iloc[0]['FieldofView'] <NEW_LINE> <DEDENT> match=paramlog[paramlog['Filenumber']==imagenum2] <NEW_LINE> if len(match)==1: <NEW_LINE> <INDENT> fname2=match.iloc[0]['Filename'].replace('.sem','.jpg') <NEW_LINE> image2=Image.open(fname2) <NEW_LINE> imshiftx2=match.iloc[0]['ImageshiftX'] <NEW_LINE> imshifty2=match.iloc[0]['ImageshiftY'] <NEW_LINE> <DEDENT> image1= np.array(image1) <NEW_LINE> image2= np.array(image2) <NEW_LINE> driftx=imshiftx2-imshiftx1 <NEW_LINE> drifty=imshifty2-imshifty1 <NEW_LINE> pixshift, error, diffphase = register_translation(image1, image2) <NEW_LINE> shift=FOV*pixshift/512 <NEW_LINE> print('X Drift =', str(driftx), ' microns. Y drift =', str(drifty), 'microns. Uncorr shift =', shift) <NEW_LINE> return shift, error
Pass pre and post- images, determine stage drift in microns and any uncorrected pixel shift and print it error is returned from register_translation
625941b326238365f5f0ec17
def pformat_atom_detail(atom_detail, indent=0): <NEW_LINE> <INDENT> detail_type = logbook.atom_detail_type(atom_detail) <NEW_LINE> lines = ["%s%s: '%s'" % (" " * (indent), detail_type, atom_detail.name)] <NEW_LINE> lines.extend(_format_shared(atom_detail, indent=indent + 1)) <NEW_LINE> lines.append("%s- version = %s" % (" " * (indent + 1), misc.get_version_string(atom_detail))) <NEW_LINE> lines.append("%s- results = %s" % (" " * (indent + 1), atom_detail.results)) <NEW_LINE> lines.append("%s- failure = %s" % (" " * (indent + 1), bool(atom_detail.failure))) <NEW_LINE> lines.extend(_format_meta(atom_detail.meta, indent=indent + 1)) <NEW_LINE> return "\n".join(lines)
Pretty formats a atom detail.
625941b367a9b606de4a7c6b
def runCD(cd): <NEW_LINE> <INDENT> logger.debug("runCD: %s", cd) <NEW_LINE> def preexec(): <NEW_LINE> <INDENT> resource.setrlimit(resource.RLIMIT_CPU, (LIMIT_SEC, LIMIT_SEC)) <NEW_LINE> <DEDENT> if cd.state != 'Q': <NEW_LINE> <INDENT> raise RuntimeError("This CD is not in state queued! (%s)"%cd) <NEW_LINE> <DEDENT> cd.state = 'R' <NEW_LINE> cd.save() <NEW_LINE> pid = os.fork() <NEW_LINE> if pid == 0: <NEW_LINE> <INDENT> preexec() <NEW_LINE> try: <NEW_LINE> <INDENT> cd._run() <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> type, value, tb = sys.exc_info() <NEW_LINE> import traceback <NEW_LINE> logger.error("queue.py: printing exception") <NEW_LINE> logger.error("%s"%type) <NEW_LINE> logger.error("%s"%value) <NEW_LINE> logger.error("\n".join(traceback.format_tb(tb))) <NEW_LINE> f = open(cd.basedir+'/jako-queue.stdout', 'w') <NEW_LINE> print >> f, "%r"%value <NEW_LINE> print >> f <NEW_LINE> print >> f, "\n".join(traceback.format_tb(tb)) <NEW_LINE> f.close() <NEW_LINE> cd.state = 'X' <NEW_LINE> cd.dtime = utcnow() <NEW_LINE> cd.save() <NEW_LINE> os._exit(1) <NEW_LINE> <DEDENT> os._exit(0) <NEW_LINE> <DEDENT> _waited_pid, status = os.waitpid(pid, 0) <NEW_LINE> signal = status % 256 <NEW_LINE> exitstatus = status // 256 <NEW_LINE> logger.debug("runCD: done running %s %s %s %s", cd, _waited_pid, signal, exitstatus) <NEW_LINE> cd = models.CD.objects.get(id=cd.id) <NEW_LINE> if exitstatus == 0: <NEW_LINE> <INDENT> cd.state = 'D' <NEW_LINE> cd.save() <NEW_LINE> return True <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> cd.state = 'X' <NEW_LINE> cd.dtime = utcnow() <NEW_LINE> cd.save() <NEW_LINE> message = 'CD(%s) died, signal=%s, exitstatus=%s'%(cd.id, signal, exitstatus) <NEW_LINE> return False
Run a single CD method and return. Use `run()` as the main entry point.
625941b338b623060ff0aba4
def _empty_adaptor(self): <NEW_LINE> <INDENT> LOG.debug("The adaptor is empty") <NEW_LINE> map(self.inhibit, self.outgoingTransitions(self.getInitial()))
The adaptor inhibits everything and thus it is empty This method is called when it inhibits the initial state
625941b3d164cc6175782afb
def test_browserlayer_removed(self): <NEW_LINE> <INDENT> from edi.itunesquizz.interfaces import IEdiItunesquizzLayer <NEW_LINE> from plone.browserlayer import utils <NEW_LINE> self.assertNotIn( IEdiItunesquizzLayer, utils.registered_layers())
Test that IEdiItunesquizzLayer is removed.
625941b36fece00bbac2d4e9
def get(self,request): <NEW_LINE> <INDENT> print("没有使用优化ORM-起始时间:{}".format(datetime.datetime.now())) <NEW_LINE> obj_list = models.UserProfile.objects.filter(id__lte=5) <NEW_LINE> print("数据量:{}".format(len(obj_list))) <NEW_LINE> for foo in obj_list: <NEW_LINE> <INDENT> temp = foo.name <NEW_LINE> <DEDENT> print("没有使用优化ORM-结束时间:{}".format(datetime.datetime.now())) <NEW_LINE> print("\n") <NEW_LINE> print("------------------------") <NEW_LINE> print("\n") <NEW_LINE> print("优化ORM-起始时间:{}".format(datetime.datetime.now())) <NEW_LINE> obj_list = models.UserProfile.objects.select_related().filter(id__lte=5) <NEW_LINE> print("数据量:{}".format(len(obj_list))) <NEW_LINE> for foo in obj_list: <NEW_LINE> <INDENT> temp = foo.name <NEW_LINE> <DEDENT> print("优化ORM-结束时间:{}".format(datetime.datetime.now())) <NEW_LINE> return Response({ "success": False, "msg": "小数据查询", "results": "" }, status=status.HTTP_200_OK)
小数据查询,验证sql语句 :param request: :return:
625941b3507cdc57c6306a80
def exportLabelledArrayWithHeader( self, arr, names, header, fname, sep=',', format='%f' ): <NEW_LINE> <INDENT> if names != None: <NEW_LINE> <INDENT> assert arr.shape[0] == len( names ), '\n ... rows must equal number of names!' <NEW_LINE> <DEDENT> if header != None: <NEW_LINE> <INDENT> assert arr.shape[1] == len( header ), '\n ... cols must equal number of header names!' <NEW_LINE> <DEDENT> F = open(fname, 'w') <NEW_LINE> cntr = 0 <NEW_LINE> if header != None: <NEW_LINE> <INDENT> if names != None: <NEW_LINE> <INDENT> hstr = ' ' + sep <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> hstr = '' <NEW_LINE> <DEDENT> for h in header: <NEW_LINE> <INDENT> hstr += str(h) + sep <NEW_LINE> <DEDENT> hstr = hstr[:-1] + '\n' <NEW_LINE> F.write(hstr) <NEW_LINE> del hstr <NEW_LINE> <DEDENT> for r in range(arr.shape[0]): <NEW_LINE> <INDENT> if names != None: <NEW_LINE> <INDENT> F.write(('%s' + sep) % names[r]) <NEW_LINE> <DEDENT> for c in range(arr.shape[1]): <NEW_LINE> <INDENT> if c < arr.shape[1] - 1: <NEW_LINE> <INDENT> F.write((format + sep) % arr[r, c]) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> F.write((format + '\n') % arr[r, c]) <NEW_LINE> <DEDENT> <DEDENT> cntr += 1 <NEW_LINE> if cntr >= 250: <NEW_LINE> <INDENT> F.flush() <NEW_LINE> cntr = 1 <NEW_LINE> <DEDENT> <DEDENT> F.write('\n') <NEW_LINE> F.flush() <NEW_LINE> F.close() <NEW_LINE> print('exported to %s' % fname)
Export an array with row names and header - *arr* the an array like object - *names* the list of row names - *header* the list of column names - *fname* the output filename - *sep* [default=','] the column separator - *format* [default='%s'] the output number format - *appendlist* [default=False] if True append the array to *fname* otherwise create a new file
625941b37047854f462a11bc
def get_ResPhase(self, **kwargs): <NEW_LINE> <INDENT> rp = ResPhase(self._Z, **kwargs) <NEW_LINE> return rp
returns a ResPhase object from z_object
625941b363d6d428bbe442a5
def test_suite(): <NEW_LINE> <INDENT> print("tests for turn clockwise") <NEW_LINE> test(turn_clockwise("N") == "E") <NEW_LINE> test(turn_clockwise("W") == "N") <NEW_LINE> test(turn_clockwise(42) == None) <NEW_LINE> test(turn_clockwise(" ") == None) <NEW_LINE> print("\nday to name") <NEW_LINE> test(day_name(3) == "Wednesday") <NEW_LINE> test(day_name(6) == "Saturday") <NEW_LINE> test(day_name(42) == None) <NEW_LINE> print("\nday name to number") <NEW_LINE> test(day_num("Friday") == 5) <NEW_LINE> test(day_num("Sunday") == 0) <NEW_LINE> test(day_num(day_name(3)) == 3) <NEW_LINE> test(day_name(day_num("Thursday")) == "Thursday") <NEW_LINE> test(day_num("Halloween") == None) <NEW_LINE> print("\nday_add") <NEW_LINE> test(day_add("Monday", 4) == "Friday") <NEW_LINE> test(day_add("Tuesday", 0) == "Tuesday") <NEW_LINE> test(day_add("Tuesday", 14) == "Tuesday") <NEW_LINE> test(day_add("Sunday", 100) == "Tuesday") <NEW_LINE> test(day_add("Sunday", -1) == "Saturday") <NEW_LINE> test(day_add("Sunday", -7) == "Sunday") <NEW_LINE> test(day_add("Tuesday", -100) == "Sunday") <NEW_LINE> print("\ndays_in_month") <NEW_LINE> test(days_in_month("February") == 28) <NEW_LINE> test(days_in_month("December") == 31)
Run the suite of tests for code in this module (this file).
625941b3293b9510aa2c3048
def set_grado_controller(self, grado_controller): <NEW_LINE> <INDENT> self.__gradoController = grado_controller
Actualiza el controlador de la entidad grado. :param grado_controller: Controlador de grado (GradoController)
625941b366673b3332b91e45
def jsSelectOne(self, q, cbId): <NEW_LINE> <INDENT> return 'onSelectTemplateObject(%s,%s,%s)' % (q(cbId), q(self.formName), q(self.insert))
Generates the Javascript code to execute when a single object is selected in the popup.
625941b3d6c5a10208143df4
def append_nc(nc_file, var_name, dtype='f4', chunksizes=(1, 128, 128), fill_value=-9999, metadata={}, logging=logging): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> nc_obj = nc.Dataset(nc_file, 'a') <NEW_LINE> <DEDENT> except IOError: <NEW_LINE> <INDENT> logging.error('Cannot write to {:s}'.format(nc_file)) <NEW_LINE> sys.exit(1) <NEW_LINE> <DEDENT> variab = nc_obj.createVariable(var_name, dtype, ('time', 'lat', 'lon',), chunksizes=chunksizes, fill_value=fill_value, zlib=True) <NEW_LINE> variab.coordinates = 'lat lon' <NEW_LINE> if metadata: <NEW_LINE> <INDENT> for attribute in metadata: <NEW_LINE> <INDENT> variab.setncattr(attribute, metadata[attribute]) <NEW_LINE> <DEDENT> <DEDENT> nc_obj.sync() <NEW_LINE> nc_obj.close()
Write a new (empty) variable to target NetCDF file. input: :: nc_file: NetCDF object, referring to target file var_name: String, variable name of source NetCDF metadata: dictionary of attributes, belonging to the variable.
625941b391af0d3eaac9b7c1
def plot_hist(ph): <NEW_LINE> <INDENT> fl2 = "/home/amaity/Dropbox/Datasets-Analyses/ptss-poc/lace_find_normal/dfs-8000-roi-instrumentation-d8w5" <NEW_LINE> phx = pd.read_csv(fl2+"/dataset_ph"+str(ph)+".csv") <NEW_LINE> t1 = phx['time'].values <NEW_LINE> t2 = t1*1000.0 <NEW_LINE> ulim = np.max(t2) <NEW_LINE> llim = np.min(t2) <NEW_LINE> print("ph-%d, llim : %d"%(ph,llim)) <NEW_LINE> print("ph-%d, ulim : %d"%(ph,ulim)) <NEW_LINE> alpha = 1 <NEW_LINE> beta = 1 <NEW_LINE> a = 31 <NEW_LINE> b = 33 <NEW_LINE> x = np.linspace(a,b,num=1000) <NEW_LINE> pdfapp = norm(loc=np.mean(t2),scale=np.std(t2)) <NEW_LINE> y = pdfapp.pdf(x) <NEW_LINE> plt.hist(t2,bins=1000,density=True,label="Actual Distribution") <NEW_LINE> plt.plot(x,y,label="Normal (mean:%04.2fms,std:%04.2fms)"%(np.mean(t2),np.std(t2)),color="black") <NEW_LINE> plt.xlabel("Execution Time (ms)") <NEW_LINE> plt.title("Execution Time Distribution") <NEW_LINE> plt.xlim(a,b) <NEW_LINE> plt.legend() <NEW_LINE> plt.savefig(fl2+"/et-sf"+str(ph)+".pdf") <NEW_LINE> plt.close()
Plot the Histograms and Means and Variances of lace benchmarks Input the phase number
625941b37b180e01f3dc45b7
def owners(self, org): <NEW_LINE> <INDENT> return self._parsed("organizations/" + org + "/owners")
List the owners of an organization.
625941b38e05c05ec3eea11f
def targz_extract_programm(dProgramm=None, sPackageDir=INSTALL_PACKAGES_DIR_DEFAULT): <NEW_LINE> <INDENT> if dProgramm is None: <NEW_LINE> <INDENT> log.warning(u'Targz. Не определен пакет для разархивирования') <NEW_LINE> return False <NEW_LINE> <DEDENT> remove_programm(dProgramm) <NEW_LINE> install_dir = None <NEW_LINE> if 'dir' in dProgramm: <NEW_LINE> <INDENT> install_dir = normpath(dProgramm['dir']) <NEW_LINE> <DEDENT> if install_dir is None: <NEW_LINE> <INDENT> log.warning(u'Targz. Не определена инсталляционная директория для пакета <%s>' % dProgramm.get('name', None)) <NEW_LINE> return False <NEW_LINE> <DEDENT> if install_dir and not os.path.exists(install_dir): <NEW_LINE> <INDENT> log.info(u'Создание инсталляционной директории <%s>' % install_dir) <NEW_LINE> os.makedirs(install_dir) <NEW_LINE> <DEDENT> tar_filename = os.path.join('.', sPackageDir, dProgramm['programm']) <NEW_LINE> tar_file_name = normpath(tar_filename) <NEW_LINE> log.info(u'Полное имя файла TaGz <%s> программы для разархивирования (%s)' % (tar_file_name, tar_filename)) <NEW_LINE> console = dProgramm.get('console', True) <NEW_LINE> return targz_extract_to_dir(tar_file_name, install_dir, bConsole=console)
Распаковать tar архив.
625941b32c8b7c6e89b3557a
def test_visible(self): <NEW_LINE> <INDENT> self.assertTrue(self.ui.get_visible())
The widget is visible.
625941b31f037a2d8b945fae
def origin_crop_to_target_shape(image, target_shape, origin): <NEW_LINE> <INDENT> native_shape = extract_height_width(image.shape) <NEW_LINE> target_shape = extract_height_width(target_shape) <NEW_LINE> if not crop_in_bounds(native_shape, target_shape, origin): <NEW_LINE> <INDENT> return ((0, 0) + native_shape) <NEW_LINE> <DEDENT> return (origin + target_shape)
Best effort to crop an image to a target shape from fixed origin Arguments: image An image. Either single channel (grayscale) or multi-channel (color) target_shape Target shape of the image section to crop (height, width) origin Tuple containing hardcoded origin (row_offset, column_offset). The "origin" is the upper-left corner of the cropped image relative to the top left at (0,0). Returns: A crop description
625941b30c0af96317bb7f98
def save(self, filename=None, force_unicode=False, quiet=False): <NEW_LINE> <INDENT> if filename: <NEW_LINE> <INDENT> if isinstance(filename, list): <NEW_LINE> <INDENT> file_to_use = os.path.join(*filename) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> file_to_use = filename <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> file_to_use = self.filename <NEW_LINE> <DEDENT> if quiet == False: <NEW_LINE> <INDENT> print("Saving : {0}".format(file_to_use)) <NEW_LINE> <DEDENT> if ".csv" in file_to_use: <NEW_LINE> <INDENT> export_csv(file_to_use, self.header, self.data, force_unicode=force_unicode) <NEW_LINE> <DEDENT> elif ".psv" in file_to_use: <NEW_LINE> <INDENT> export_csv(file_to_use, self.header, self.data, force_unicode=force_unicode) <NEW_LINE> <DEDENT> elif ".xlsx" in file_to_use: <NEW_LINE> <INDENT> self.xlsx_book().save(file_to_use) <NEW_LINE> <DEDENT> elif ".xls" in file_to_use: <NEW_LINE> <INDENT> self.xls_book().save(file_to_use)
save out as a csv or xls
625941b391af0d3eaac9b7c2
def _request(self, buf, properties, **kwargs): <NEW_LINE> <INDENT> self.ensure_alive() <NEW_LINE> try: <NEW_LINE> <INDENT> input_format = properties.get("inputFormat", "text") <NEW_LINE> if input_format == "text": <NEW_LINE> <INDENT> ctype = "text/plain; charset=utf-8" <NEW_LINE> <DEDENT> elif input_format == "serialized": <NEW_LINE> <INDENT> ctype = "application/x-protobuf" <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise ValueError("Unrecognized inputFormat " + input_format) <NEW_LINE> <DEDENT> if 'username' in kwargs and 'password' in kwargs: <NEW_LINE> <INDENT> kwargs['auth'] = requests.auth.HTTPBasicAuth(kwargs['username'], kwargs['password']) <NEW_LINE> kwargs.pop('username') <NEW_LINE> kwargs.pop('password') <NEW_LINE> <DEDENT> r = requests.post(self.endpoint, params={'properties': str(properties)}, data=buf, headers={'content-type': ctype}, timeout=(self.timeout*2)/1000, **kwargs) <NEW_LINE> r.raise_for_status() <NEW_LINE> return r <NEW_LINE> <DEDENT> except requests.HTTPError as e: <NEW_LINE> <INDENT> if r.text == "CoreNLP request timed out. Your document may be too long.": <NEW_LINE> <INDENT> raise TimeoutException(r.text) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise AnnotationException(r.text)
Send a request to the CoreNLP server. :param (str | bytes) buf: data to be sent with the request :param (dict) properties: properties that the server expects :return: request result
625941b35fc7496912cc3735
def as_dict(self, include_private=False): <NEW_LINE> <INDENT> keys = [] <NEW_LINE> transactions = [] <NEW_LINE> for netw in self.networks(): <NEW_LINE> <INDENT> for key in self.keys(network=netw.name, include_private=include_private, as_dict=True): <NEW_LINE> <INDENT> keys.append(key) <NEW_LINE> <DEDENT> if self.multisig: <NEW_LINE> <INDENT> for t in self.transactions(include_new=True, account_id=0, network=netw.name): <NEW_LINE> <INDENT> transactions.append(t.as_dict()) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> accounts = self.accounts(network=netw.name) <NEW_LINE> if not accounts: <NEW_LINE> <INDENT> accounts = [0] <NEW_LINE> <DEDENT> for account_id in accounts: <NEW_LINE> <INDENT> for t in self.transactions(include_new=True, account_id=account_id, network=netw.name): <NEW_LINE> <INDENT> transactions.append(t.as_dict()) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> return { 'wallet_id': self.wallet_id, 'name': self.name, 'owner': self._owner, 'scheme': self.scheme, 'witness_type': self.witness_type, 'main_network': self.network.name, 'main_balance': self.balance(), 'main_balance_str': self.balance(as_string=True), 'balances': self._balances, 'default_account_id': self.default_account_id, 'multisig_n_required': self.multisig_n_required, 'cosigner_wallet_ids': [w.wallet_id for w in self.cosigner], 'cosigner_public_masters': [w.public_master().key().wif() for w in self.cosigner], 'sort_keys': self.sort_keys, 'main_key_id': self.main_key_id, 'encoding': self.encoding, 'keys': keys, 'transactions': transactions, }
Return wallet information in dictionary format :param include_private: Include private key information in dictionary :type include_private: bool :return dict:
625941b3fb3f5b602dac3446
def run_button(self): <NEW_LINE> <INDENT> self.STOP = False <NEW_LINE> self.PRACTICE_TOOL = self.practice_tool_var.get() <NEW_LINE> self.update_status_bar("Running ...") <NEW_LINE> try: <NEW_LINE> <INDENT> if self.init_thread.init.ROLES_ASSIGNED_FLAG: <NEW_LINE> <INDENT> tracker_thread = thread_generator.TrackerThread(self.init_thread) <NEW_LINE> <DEDENT> <DEDENT> except: <NEW_LINE> <INDENT> self.init_thread = thread_generator.InitThread(self) <NEW_LINE> tracker_thread = thread_generator.TrackerThread(self.init_thread) <NEW_LINE> <DEDENT> self.logger_thread = thread_generator.LoggerThread(self, self.init_thread, tracker_thread)
Starts with Initialization and then runs Jungle Tracking Loop - Resets Stop
625941b3e1aae11d1e749a63
def __getitem__(self, item): <NEW_LINE> <INDENT> indexes = self.indexes[item * self.batch_size:(item + 1) * self.batch_size] <NEW_LINE> ids_list_subset = self.ids_list[indexes] <NEW_LINE> return self.__data_generation(indexes, ids_list_subset)
return a batch
625941b38a43f66fc4b53e1d
def get_file_name_from_full_path(file_path): <NEW_LINE> <INDENT> file_path = file_path[file_path.rfind("/") + 1:] <NEW_LINE> if file_path.find(".") != -1: <NEW_LINE> <INDENT> file_path = file_path[:file_path.find(".")] <NEW_LINE> <DEDENT> return file_path
Remove any directory info and file extensions to just get the file name of an inputted path/file with extension :param file_path: string path or name of file to be standardised/cleaned, potentially with a file extension :return: string of just the file name
625941b34a966d76dd550dba
def testSharePictureInGridViewWithYouTube(self): <NEW_LINE> <INDENT> u._clearAllResource() <NEW_LINE> u._prepareVideo() <NEW_LINE> time.sleep(2) <NEW_LINE> d.swipe(550,1100,551,1101) <NEW_LINE> u.shareItem('YouTube') <NEW_LINE> assert d(text = 'Choose an account').wait.exists(timeout = 2000)
1.Launch gallery activity 2.Enter Grid view 3.Long touch a pic or a video to select 4.Tap share icon 5.Tap YouTube option
625941b3d8ef3951e32432ed
def __init__(self, tfrecord_path, batch_size=1, num_parallel_batches=None, shuffle_buffer_size=None, repeat=None, prefetch_buffer_size=1): <NEW_LINE> <INDENT> if not tfrecord_path.endswith('.tfrecord'): <NEW_LINE> <INDENT> raise ValueError('The TFRecord path must end with ".tfrecord", however ' 'the path you specified was: %s' % tfrecord_path) <NEW_LINE> <DEDENT> self.tfrecord_path = tfrecord_path <NEW_LINE> self.batch_size = batch_size <NEW_LINE> self.num_parallel_batches = (num_parallel_batches if num_parallel_batches else multiprocessing.cpu_count()) <NEW_LINE> self.shuffle_buffer_size = shuffle_buffer_size <NEW_LINE> self.repeat = repeat <NEW_LINE> self.prefetch_buffer_size = prefetch_buffer_size <NEW_LINE> self.metadata_path = tfrecord_path_to_metadata_path(tfrecord_path) <NEW_LINE> with open(self.metadata_path) as f: <NEW_LINE> <INDENT> self.metadata = json.load(f) <NEW_LINE> <DEDENT> if self.metadata['type'] == 'Example': <NEW_LINE> <INDENT> self.features_parser_config = { key: self._get_feature_parser_config(val) for key, val in self.metadata['features'].items() } <NEW_LINE> <DEDENT> if self.metadata['type'] == 'SequenceExample': <NEW_LINE> <INDENT> if 'context_features' in self.metadata: <NEW_LINE> <INDENT> self.context_features_parser_config = { key: self._get_feature_parser_config(val) for key, val in self.metadata['context_features'].items() } <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.context_features_parser_config = None <NEW_LINE> <DEDENT> if 'sequence_features' in self.metadata: <NEW_LINE> <INDENT> self.sequence_features_parser_config = { key: self._get_feature_parser_config(val, is_sequence_feature=True) for key, val in self.metadata['sequence_features'].items() } <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.sequence_features_parser_config = None
Initialize the database object. Store the initialization parameters and read in the metadata from the metadata file.
625941b3a79ad161976cbef5
def build_state(self): <NEW_LINE> <INDENT> waypoint = self.planner.next_waypoint() <NEW_LINE> inputs = self.env.sense(self) <NEW_LINE> state = (waypoint, inputs['light']) <NEW_LINE> return state
The build_state function is called when the agent requests data from the environment. The next waypoint, the intersection inputs, and the deadline are all features available to the agent.
625941b36e29344779a623c6
def check_syntax(arg: str) -> None: <NEW_LINE> <INDENT> if ',' in arg: <NEW_LINE> <INDENT> print(MSG_ERROR + " Do not use commas (','), is an illegal char here.") <NEW_LINE> raise SyntaxError("Do not use commas (',')")
Check strings syntax. Gitcher does not avoid to use commas ',' in string values. :param arg: Argument to check syntax :type arg: str :return: True or false :rtype: bool :raise SyntaxError: If arg is illegal
625941b34e696a04525c9205
def device_get_option_hc(self, f, Status, option): <NEW_LINE> <INDENT> if (option == DeviceOptionClass2.RW_TEMP_LIM_LOWER or option == DeviceOptionClass2.RW_TEMP_LIM_HIGHER): <NEW_LINE> <INDENT> optdata = c_float() <NEW_LINE> def get_optval(raw_optdata): return raw_optdata.value <NEW_LINE> <DEDENT> elif (option == DeviceOptionClass2.RW_BACKOFF_TIME_NORMAL or option == DeviceOptionClass2.RW_BACKOFF_TIME_HIGH or option == DeviceOptionClass2.RW_BACKOFF_TIME_CRITICAL or option == DeviceOptionClass2.RW_TEMPERATURE_DEBUG): <NEW_LINE> <INDENT> optdata = c_int() <NEW_LINE> def get_optval(raw_optdata): return raw_optdata.value <NEW_LINE> <DEDENT> elif (option == DeviceOptionClass3.RW_SHELL_ENABLE or option == DeviceOptionClass3.RW_LOG_LEVEL or option == DeviceOptionClass3.RW_MVTENSOR_LOG_LEVEL or option == DeviceOptionClass3.RW_XLINK_LOG_LEVEL): <NEW_LINE> <INDENT> optdata = c_int() <NEW_LINE> def get_optval(raw_optdata): return raw_optdata.value <NEW_LINE> <DEDENT> elif option == DeviceOptionClass2.RO_OPTIMISATION_LIST: <NEW_LINE> <INDENT> optdata = create_string_buffer( MAX_OPTIMISATIONS * OPTIMISATION_NAME_LEN) <NEW_LINE> def get_optval(raw_optdata): <NEW_LINE> <INDENT> return raw_optdata.raw.decode().replace('\x00', ' ').split() <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> raise Exception(Status.INVALID_PARAMETERS) <NEW_LINE> <DEDENT> optsize = c_uint(sizeof(optdata)) <NEW_LINE> status = f.ncDeviceGetOption(self.handle, option.value, byref(optdata), byref(optsize)) <NEW_LINE> if status != Status.OK.value: <NEW_LINE> <INDENT> raise Exception(Status(status)) <NEW_LINE> <DEDENT> if (option == DeviceOptionClass2.RW_TEMP_LIM_LOWER or option == DeviceOptionClass2.RW_TEMP_LIM_HIGHER or option == DeviceOptionClass2.RW_BACKOFF_TIME_NORMAL or option == DeviceOptionClass2.RW_BACKOFF_TIME_HIGH or option == DeviceOptionClass2.RW_BACKOFF_TIME_CRITICAL or option == DeviceOptionClass2.RW_TEMPERATURE_DEBUG or option == DeviceOptionClass3.RW_SHELL_ENABLE or option == DeviceOptionClass3.RW_LOG_LEVEL or option == DeviceOptionClass3.RW_MVTENSOR_LOG_LEVEL or option == DeviceOptionClass3.RW_XLINK_LOG_LEVEL or option == DeviceOptionClass2.RO_OPTIMISATION_LIST): <NEW_LINE> <INDENT> return get_optval(optdata) <NEW_LINE> <DEDENT> v = create_string_buffer(optsize.value) <NEW_LINE> memmove(v, optdata, optsize.value) <NEW_LINE> return int.from_bytes(v.raw, byteorder='little')
Get optional information from the device. :param option: a DeviceOption enumeration :return option: value for the option
625941b30383005118ecf394
def compute_silhouette_flow(engine, pair): <NEW_LINE> <INDENT> with NamedTemporaryFile(suffix='.png') as exemplar_f, NamedTemporaryFile(suffix='.png') as shape_f: <NEW_LINE> <INDENT> base_pattern = np.dstack(( np.zeros(config.SHAPE_REND_SHAPE), *np.meshgrid( np.linspace(0, 1, config.SHAPE_REND_SHAPE[0]), np.linspace(0, 1, config.SHAPE_REND_SHAPE[1])))) <NEW_LINE> exemplar_sil = bright_pixel_mask( pair.exemplar.load_cropped_image(), percentile=95) <NEW_LINE> exemplar_sil = binary_closing(exemplar_sil, selem=disk(3)) <NEW_LINE> exemplar_sil = transform.resize(exemplar_sil, (500, 500), anti_aliasing=True, mode='reflect') <NEW_LINE> shape_sil = pair.load_data(config.SHAPE_REND_SEGMENT_MAP_NAME) - 1 <NEW_LINE> shape_sil = (shape_sil > -1) <NEW_LINE> shape_sil = binary_closing(shape_sil, selem=disk(3)) <NEW_LINE> exemplar_sil_im = exemplar_sil[:, :, None].repeat(repeats=3, axis=2).astype(float) <NEW_LINE> shape_sil_im = shape_sil[:, :, None].repeat(repeats=3, axis=2).astype(float) <NEW_LINE> exemplar_sil_im[exemplar_sil == 0] = base_pattern[exemplar_sil == 0] <NEW_LINE> shape_sil_im[shape_sil == 0] = base_pattern[shape_sil == 0] <NEW_LINE> vis.image(exemplar_sil_im.transpose((2, 0, 1)), win='exemplar-sil') <NEW_LINE> vis.image(shape_sil_im.transpose((2, 0, 1)), win='shape-sil') <NEW_LINE> with warnings.catch_warnings(): <NEW_LINE> <INDENT> warnings.simplefilter("ignore") <NEW_LINE> imsave(exemplar_f.name, exemplar_sil_im) <NEW_LINE> imsave(shape_f.name, shape_sil_im) <NEW_LINE> <DEDENT> vx, vy = engine.siftflow(str(exemplar_f.name), str(shape_f.name), nargout=2) <NEW_LINE> vx, vy = resize_flow(np.array(vx), np.array(vy), shape=config.SHAPE_REND_SHAPE) <NEW_LINE> <DEDENT> return vx, vy
Compute silhouette based flow.
625941b3cc40096d61595704
def _helper_dup(ind_1,ind_2,ind_3,ind_4,group,name): <NEW_LINE> <INDENT> new_row=group.iloc[ind_1:ind_2,] <NEW_LINE> if (pd.notnull(new_row.iloc[0]['Add_equip'])): <NEW_LINE> <INDENT> print('Add_equip in first row is not empty, check:\n',name,'\n') <NEW_LINE> <DEDENT> if (pd.notnull(group.iloc[ind_3:ind_4,].iloc[0]['Add_equip'])): <NEW_LINE> <INDENT> print('Add_equip in second row is not empty, check:\n',name,'\n') <NEW_LINE> <DEDENT> new_row['Add_equip']=group.iloc[ind_3:ind_4,].iloc[0]['Main_equip'] <NEW_LINE> if (pd.notnull(new_row.iloc[0]['h/ha_2'])): <NEW_LINE> <INDENT> print('h/ha_2 in first row is not empty, check:\n',name,'\n') <NEW_LINE> <DEDENT> if (pd.notnull(group.iloc[ind_3:ind_4,].iloc[0]['h/ha_2'])): <NEW_LINE> <INDENT> print('h/ha_2 in second row is not empty, check:\n',name,'\n') <NEW_LINE> <DEDENT> new_row['h/ha_2']=group.iloc[ind_3:ind_4,].iloc[0]['h/ha'] <NEW_LINE> if (pd.notnull(new_row.iloc[0]['m3/h_2'])): <NEW_LINE> <INDENT> print('m3/h_2 in first row is not empty, check:\n',name,'\n') <NEW_LINE> <DEDENT> if (pd.notnull(group.iloc[ind_3:ind_4,].iloc[0]['m3/h_2'])): <NEW_LINE> <INDENT> print('m3/h_2 in second row is not empty, check:\n',name,'\n') <NEW_LINE> <DEDENT> new_row['m3/h_2']=group.iloc[ind_3:ind_4,].iloc[0]['m3/h'] <NEW_LINE> if (pd.notnull(new_row.iloc[0]['fresh_t/h_2'])): <NEW_LINE> <INDENT> print('fresh_t/h_2 in first row is not empty, check:\n',name,'\n') <NEW_LINE> <DEDENT> if (pd.notnull(group.iloc[ind_3:ind_4,].iloc[0]['fresh_t/h_2'])): <NEW_LINE> <INDENT> print('fresh_t/h_2 in second row is not empty, check:\n',name,'\n') <NEW_LINE> <DEDENT> new_row['fresh_t/h_2']=group.iloc[ind_3:ind_4,].iloc[0]['fresh_t/h'] <NEW_LINE> new_row['Power_(CV)_M_2']=group.iloc[ind_3:ind_4,].iloc[0]['Power_(CV)_M'] <NEW_LINE> new_row['Mass_(t)_A']=group.iloc[ind_3:ind_4,].iloc[0]['Mass_(t)_M_n'] <NEW_LINE> new_row['Hours_of_use_during_whole_life_A']=group.iloc[ind_3:ind_4,].iloc[0]['Hours_of_use_during_whole_life_M'] <NEW_LINE> new_row['Consumption_(l/h)_2']=group.iloc[ind_3:ind_4,].iloc[0]['Consumption_(l/h)'] <NEW_LINE> return new_row
helper function for recombine_dup_rows_into_one
625941b35f7d997b8717484b
def stop_generating_sentence(strings): <NEW_LINE> <INDENT> if len(strings) < 1: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> if re.match(r"(?:\.|!+|\?+)", strings[-1]): <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> return False
returns True if the last object in a list of strings is punctuation Used to tell if another function needs to stop generating textj
625941b3be8e80087fb209ff