content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def get_H(m, n): """Calculate the distance of each point of the m, n matrix from the center""" u = np.array([i if i <= m / 2 else m - i for i in range(m)], dtype=np.float32) v = np.array([i if i <= m / 2 else m - i for i in range(m)], dtype=np.float32) v.shape = n, 1 return (u - m/2)**2 + (v - n/2)**2
23ea3f28816283c42f4722a6a5044772f2c0d2c3
3,653,600
def create_users(xml_filename, test_mode=False, verbose=False): """ Import OET cruise record XML file and create django auth users from the list of participants :param filename: the name of the XML file :return: the number of users created """ num_created = 0 cruise_record = xml2struct(xml_filename) participant_list = cruise_record['oet:oetcruise']['r2r:cruise']['r2r:cruiseParticipants']['r2r:cruiseParticipant'] names = [participant['r2r:cruiseParticipantName']['text'] for participant in participant_list] for name in names: split_name = name.split() first_name = split_name[0] last_name = "".join(split_name[1:]) new_user = create_user(first_name, last_name, save=not test_mode, verbose=verbose) if new_user: print 'Created user', new_user.username, '(%s)' % name num_created += 1 return num_created
c15bf515f482b7b82bfa94e96708ec4d4caf96be
3,653,601
import json def writeJSONFile(filename,JSONDocument): """ Writes a JSON document to a named file Parameters ---------- filename : str name of the file JSONDocument : str JSON document to write to the file Returns ------- True """ filename='data/'+filename with open(filename, 'w') as outfile: json.dump(JSONDocument, outfile) return True
4f20b42a5f38554589a7bb03039ba348e3b0bb15
3,653,602
def read_readme(): """Read README content. If the README.rst file does not exist yet (this is the case when not releasing) only the short description is returned. """ try: return local_file('README.rst') except IOError: return __doc__
ed3c00a1f6e05072b59895efc93dd2380d590553
3,653,603
def get_data_loader(dataset, dataset_dir, batch_size, workers=8, is_training=False): """ Create data loader. """ return data.DataLoader( get_dataset(dataset, is_training=is_training, dataset_dir=dataset_dir), batch_size=batch_size, shuffle=is_training, num_workers=workers, pin_memory=True, )
c7a126f37a78ef527a3e51136ffd9fbacbb5ddec
3,653,604
def listwhom(detailed=False): """Return the list of currently avalailable databases for covid19 data in PyCoA. The first one is the default one. If detailed=True, gives information location of each given database. """ try: if int(detailed): df = pd.DataFrame(get_db_list_dict()) df = df.T.reset_index() df.index = df.index+1 df = df.rename(columns={'index':'Database',0: "WW/iso3",1:'Granularité',2:'WW/Name'}) return df else: return _db.get_available_database() except: raise CoaKeyError('Waiting for a boolean !')
a912113ee5f713522b5abfbdd8bc77cea54a5b10
3,653,605
def project(s): """Maps (x,y,z) coordinates to planar-simplex.""" # Is s an appropriate sequence or just a single point? try: return unzip(map(project_point, s)) except TypeError: return project_point(s) except IndexError: # for numpy arrays return project_point(s)
1039e29da4b1a7c733449354d507525d746fc389
3,653,606
def point_at_angle_on_ellipse( phi: ArrayLike, coefficients: ArrayLike ) -> NDArray: """ Return the coordinates of the point at angle :math:`\\phi` in degrees on the ellipse with given canonical form coefficients. Parameters ---------- phi Point at angle :math:`\\phi` in degrees to retrieve the coordinates of. coefficients General form ellipse coefficients as follows: the center coordinates :math:`x_c` and :math:`y_c`, semi-major axis length :math:`a_a`, semi-minor axis length :math:`a_b` and rotation angle :math:`\\theta` in degrees of its semi-major axis :math:`a_a`. Returns ------- :class:`numpy.ndarray` Coordinates of the point at angle :math:`\\phi` Examples -------- >>> coefficients = np.array([0.5, 0.5, 2, 1, 45]) >>> point_at_angle_on_ellipse(45, coefficients) # doctest: +ELLIPSIS array([ 1., 2.]) """ phi = np.radians(phi) x_c, y_c, a_a, a_b, theta = tsplit(coefficients) theta = np.radians(theta) cos_phi = np.cos(phi) sin_phi = np.sin(phi) cos_theta = np.cos(theta) sin_theta = np.sin(theta) x = x_c + a_a * cos_theta * cos_phi - a_b * sin_theta * sin_phi y = y_c + a_a * sin_theta * cos_phi + a_b * cos_theta * sin_phi return tstack([x, y])
223e38a209280754538c5e1141317463ae4f4b98
3,653,607
def bmm(tensor1, tensor2): """ Performs a batch matrix-matrix product of this tensor and tensor2. Both tensors must be 3D containing equal number of matrices. If this is a (b x n x m) Tensor, batch2 is a (b x m x p) Tensor, Result will be a (b x n x p) Tensor. Parameters ---------- tensor1 : TensorBase The first operand in the bmm operation tensor2 : TensorBase The second operand in the bmm operation Returns ------- TensorBase: Output Tensor; with bmm operation """ _ensure_tensorbase(tensor1) _ensure_tensorbase(tensor2) if tensor2.data.ndim != 3: print("dimension of tensor2 is not 3") elif tensor1.data.ndim != 3: print("dimension of tensor1 is not 3") elif tensor1.encrypted or tensor2.encrypted: return NotImplemented else: out = np.matmul(tensor1.data, tensor2.data) return TensorBase(out)
f3663c612024195cda85b11019423cdb71d75da4
3,653,608
def get_monotask_from_macrotask(monotask_type, macrotask): """ Returns a Monotask of the specified type from the provided Macrotask. """ return next((monotask for monotask in macrotask.monotasks if isinstance(monotask, monotask_type)))
46d4516327c89755eaa3ba6f6fa3503aae0c5bd9
3,653,609
from SPARQLWrapper import SPARQLWrapper, JSON def vivo_query(query, parms): """ A new VIVO query function using SPARQLWrapper. Tested with Stardog, UF VIVO and Dbpedia :param query: SPARQL query. VIVO PREFIX will be added :param parms: dictionary with query parms: queryuri, username and password :return: result object, typically JSON :rtype: dict """ logger.debug(u"in vivo_query\n{}".format(parms)) sparql = SPARQLWrapper(parms['queryuri']) new_query = parms['prefix'] + '\n' + query sparql.setQuery(new_query) logger.debug(new_query) sparql.setReturnFormat(JSON) sparql.addParameter("email", parms['username']) sparql.addParameter("password", parms['password']) # sparql.setCredentials(parms['username'], parms['password']) results = sparql.query() results = results.convert() return results
396d2f8cfdd930f85d37a9b90be0cdb49bb47a4e
3,653,610
def get_services_by_type(service_type, db_session): # type: (Str, Session) -> Iterable[models.Service] """ Obtains all services that correspond to requested service-type. """ ax.verify_param(service_type, not_none=True, not_empty=True, http_error=HTTPBadRequest, msg_on_fail="Invalid 'service_type' value '" + str(service_type) + "' specified") services = db_session.query(models.Service).filter(models.Service.type == service_type) return sorted(services, key=lambda svc: svc.resource_name)
7352eb1e126af170f0c460edd9b69c77e07e3e0a
3,653,611
import os from shutil import copyfile import subprocess def copy_arch(arch, library_dir, libgfortran, libquadmath): """Copy libraries specific to a given architecture. Args: arch (str): The architecture being copied. library_dir (str): The directory containing the dynamic libraries. libgfortran (str): The name (not path) of the ``libgfortran`` dynamic library. libquadmath (str): The name (not path) of the ``libquadmath`` dynamic library. Returns: Tuple[str, str, str, str]: Four-tuple of * The path to the ``arch``-specific location of the newly created ``libgfortran`` * The path to the location of the universal ``libgfortran`` (not yet created, but reference here as the ``install_name``) * The path to the ``arch``-specific location of the newly created ``libquadmath`` * The path to the location of the universal ``libquadmath`` (not yet created, but reference here as the ``install_name``) """ sub_dir = os.path.join(FRANKENSTEIN, arch) os.mkdir(sub_dir) # Determine the old/new filenames. old_libgfortran = os.path.join(library_dir, libgfortran) arch_libgfortran = os.path.join(sub_dir, libgfortran) universal_libgfortran = os.path.join(FRANKENSTEIN, libgfortran) old_libquadmath = os.path.join(library_dir, libquadmath) arch_libquadmath = os.path.join(sub_dir, libquadmath) universal_libquadmath = os.path.join(FRANKENSTEIN, libquadmath) # Update ``libgfortran`` copyfile(old_libgfortran, arch_libgfortran) os.chmod(arch_libgfortran, 0o644) subprocess.check_call( ("install_name_tool", "-id", universal_libgfortran, arch_libgfortran) ) subprocess.check_call( ( "install_name_tool", "-change", old_libquadmath, universal_libquadmath, arch_libgfortran, ) ) os.chmod(arch_libgfortran, 0o444) print("{}:".format(arch_libgfortran)) print("\t``install_name``:") print("\t\t{}".format(universal_libgfortran)) print("\tDependencies:") dependencies = get_dependencies(arch_libgfortran, check_exists=False) for dependency in dependencies: print("\t\t{}".format(dependency)) # Update ``libquadmath`` copyfile(old_libquadmath, arch_libquadmath) os.chmod(arch_libquadmath, 0o644) subprocess.check_call( ("install_name_tool", "-id", universal_libquadmath, arch_libquadmath) ) os.chmod(arch_libquadmath, 0o444) print("{}:".format(arch_libquadmath)) print("\t``install_name``:") print("\t\t{}".format(universal_libquadmath)) print("\tDependencies:") dependencies = get_dependencies(arch_libquadmath, check_exists=False) for dependency in dependencies: print("\t\t{}".format(dependency)) return ( arch_libgfortran, universal_libgfortran, arch_libquadmath, universal_libquadmath, )
9c297b892dd1b1108a634c4abd97384275dbb05a
3,653,612
import os def getREADMEforDescription(readmePath=os.path.join(os.path.abspath(os.path.dirname(__file__)), 'README.md')): """Use the Markdown from the file for the package's long_description. long_description_content_type should be 'text/markdown' in this case. This is why we need the README to be in the MANIFEST.in file. """ try: with open(readmePath) as readme: return '\n' + readme.read() except FileNotFoundError: return 'Package for fuzzing.'
2b0eff2cb2a7fe5d94a512c6f62b4ad8bf48b290
3,653,613
def abstractable(cls): """ A class decorator that scoops up AbstractValueRange class properties in order to create .validate and .abstract methods for the class. Note that properties added after the class is defined aren't counted. Each AbstractValueRange found is is also replaced with a class instance constructed from it. """ cls._ranges = [] for prp in dir(cls): a = getattr(cls, prp) if isinstance(a, AbstractValueRange): cls._ranges.append((prp, a)) setattr(cls, prp, cls(a.val)) cls._ranges = sorted(cls._ranges, key=lambda nr: nr[1].mn) @classmethod def validate(cls, val): ovn = min(r.mn for (n, r) in cls._ranges) ovx = max(r.mx for (n, r) in cls._ranges) return (isinstance(val, float) and val >= ovn and val <= ovx) @classmethod def abstract(cls, val): found = None for (n, r) in cls._ranges[:-1]: if ( ( r.mn == r.mx and val == r.mn ) or (val >= r.mn and val < r.mx) ): found = r.val elif val < r.mn: break # check final range including top if found == None: (n, r) = cls._ranges[-1] if (r.mn == r.mx and val == r.mn) or (val >= r.mn and val <= r.mx): found = r.val if found == None: raise ValueError( "Can't abstract value '{}' as a {}: outside acceptable range.".format( val, cls.__name__ ) ) return cls(found) def _pack_(self): for (n, r) in type(self)._ranges: if self == r.val: return n return self @classmethod def _unpack_(cls, obj): return cls(obj) cls.validate = validate cls.abstract = abstract cls._pack_ = _pack_ cls._unpack_ = _unpack_ return cls
ac14a1148d74a38618a8adc58df5a251296e72ee
3,653,614
def summary1c(sequence): """ What comes in: A sequence of integers, all >= 2. What goes out: -- Returns the sum of INDICES of the items in the sequence that are prime. Side effects: None. Examples: -- If the given sequence is [20, 23, 29, 30, 33, 29, 100, 2, 4], then the returned value is 15, since the primes in the sequence are at INDICES 1, 2, 5 and 7, and 1 + 2 + 5 + 7 = 15. """ total = 0 for k in range(len(sequence)): if is_prime(sequence[k]): total += k return total # ------------------------------------------------------------------------- # DONE: 5. Implement and test this function. # -------------------------------------------------------------------------
c2c2f60fecafc883899942b389ce1780638342da
3,653,615
from typing import List from typing import Tuple def choose_page(btn_click_list: List[Tuple[int, str]]) -> str: """ Given a list of tuples of (num_clicks, next_page) choose the next_page that corresponds to exactly 1 num_clicks. This is to help with deciding which page to go to next when clicking on one of many buttons on a page. The expectation is that exactly one button will have been clicked, so we get a deterministic next page. :param btn_click_list: List of tuples of (num_clicks, next_page). :return: The id of the next page. """ for tup in btn_click_list: if tup[0] == 1: return tup[1] raise ValueError( "No clicks were detected, or the click list is misconfigured: {}".format( btn_click_list ) )
e61bc1e52c6531cf71bc54faea0d03976eb137ad
3,653,616
from datetime import datetime def get_content(request, path=''): """Get content from datastore as requested on the url path Args: path - comes without leading slash. / added in code """ content = StaticContent.get_by_key_name("/%s" % path) if not content: if path == '': # Nothing generated yet. Inform user to create some content return render_to_response("blog/themes/%s/listing.html" % config.theme, {'config': config, 'no_post': True,}) else: raise NotFound serve = True # check modifications and etag if 'If-Modified-Since' in request.headers: last_seen = datetime.datetime.strptime( request.headers['If-Modified-Since'], HTTP_DATE_FMT) if last_seen >= content.last_modified.replace(microsecond=0): serve = False if 'If-None-Match' in request.headers: etags = [x.strip('" ') for x in request.headers['If-None-Match'].split(',')] if content.etag in etags: serve = False response = _output(content, serve) return response
b7bb9550b78cb723ef669ad1c0df597f02d9d673
3,653,617
def reconstruct_entity(input_examples, entitys_iter): """ the entitys_iter contains the prediction entity of the splited examples. We need to reconstruct the complete entitys for each example in input_examples. and return the results as dictionary. input_examples: each should contains (start, end) indice. entitys_iter: iterator of entitys Overlaps follows first in first set order: -------------------------------------- O O O B-PER I-PER O O O O B-GPE I-GPE O B-LOC I-LOC O O -------------------------------------- O O O B-PER I-PER O B-GPE I-GPE O O -------------------------------------- return: the complete entitys of each input example. """ predict_entitys = [] for i, example in enumerate(input_examples): _entity = [] for span in example.sentence_spans: _, _, start, end = span # +1 to skip the first padding _entity.extend(next(entitys_iter)[start : end]) predict_entitys.append(_entity) assert len(predict_entitys) == len(input_examples) return predict_entitys
520acff8bfd0616a045ca1286c51d75ea9465f0e
3,653,618
from typing import Dict from typing import List import yaml def ensure_valid_schema(spec: Dict) -> List[str]: """ Ensure that the provided spec has no schema errors. Returns a list with all the errors found. """ error_messages = [] validator = cerberus.Validator(yaml.safe_load(SNOWFLAKE_SPEC_SCHEMA)) validator.validate(spec) for entity_type, err_msg in validator.errors.items(): if isinstance(err_msg[0], str): error_messages.append(f"Spec error: {entity_type}: {err_msg[0]}") continue for error in err_msg[0].values(): error_messages.append(f"Spec error: {entity_type}: {error[0]}") if error_messages: return error_messages schema = { "databases": yaml.safe_load(SNOWFLAKE_SPEC_DATABASE_SCHEMA), "roles": yaml.safe_load(SNOWFLAKE_SPEC_ROLE_SCHEMA), "users": yaml.safe_load(SNOWFLAKE_SPEC_USER_SCHEMA), "warehouses": yaml.safe_load(SNOWFLAKE_SPEC_WAREHOUSE_SCHEMA), } validators = { "databases": cerberus.Validator(schema["databases"]), "roles": cerberus.Validator(schema["roles"]), "users": cerberus.Validator(schema["users"]), "warehouses": cerberus.Validator(schema["warehouses"]), } entities_by_type = [] for entity_type, entities in spec.items(): if entities and entity_type in ["databases", "roles", "users", "warehouses"]: entities_by_type.append((entity_type, entities)) for entity_type, entities in entities_by_type: for entity_dict in entities: for entity_name, config in entity_dict.items(): validators[entity_type].validate(config) for field, err_msg in validators[entity_type].errors.items(): error_messages.append( VALIDATION_ERR_MSG.format( entity_type, entity_name, field, err_msg[0] ) ) return error_messages
216ce1189a66e83cf1b73cf5e2834434dcd73c9b
3,653,619
def realord(s, pos=0): """ Returns the unicode of a character in a unicode string, taking surrogate pairs into account """ if s is None: return None code = ord(s[pos]) if code >= 0xD800 and code < 0xDC00: if len(s) <= pos + 1: print("realord warning: missing surrogate character") return 0 code2 = ord(s[pos + 1]) if code2 >= 0xDC00 and code < 0xE000: code = 0x10000 + ((code - 0xD800) << 10) + (code2 - 0xDC00) return hex(code).replace("x", "")
6683725d24a984ecf4feb2198e29a3b68c7f1d5b
3,653,620
import numpy def evaluateSpectral(left_state,right_state,xy): """Use this method to compute the Roe Average. q(state) q[0] = rho q[1] = rho*u q[2] = rho*v q[3] = rho*e """ spec_state = numpy.zeros(left_state.shape) rootrhoL = numpy.sqrt(left_state[0]) rootrhoR = numpy.sqrt(right_state[0]) tL = left_state/left_state[0] #Temporary variable to access e, u, v, and w - Left tR = right_state/right_state[0] #Temporary variable to access e, u, v, and w - Right #Calculations denom = 1/(rootrhoL+rootrhoR) spec_state[0] = rootrhoL*rootrhoR spec_state[1] = (rootrhoL*tL[1]+rootrhoR*tR[1])*denom spec_state[2] = (rootrhoL*tL[2]+rootrhoR*tR[2])*denom spec_state[3] = (rootrhoL*tL[3]+rootrhoR*tR[3])*denom spvec = (spec_state[0],spec_state[0]*spec_state[1],spec_state[0]*spec_state[2],spec_state[0]*spec_state[3]) P = getPressure(spvec) dim = 1 if xy else 2 #if true provides u dim else provides v dim spectralRadius = (numpy.sqrt(gamma*P/spec_state[0])+abs(spec_state[dim])) spectralRadius = 0 if numpy.isnan(spectralRadius) else spectralRadius #sets spectral radius to zero if it's nan return spectralRadius*(left_state-right_state)
f0c5d23396f486250de0e92f1abde4d03545f4f7
3,653,621
def get_multidata_bbg(requests): """function for multiple asynchronous refdata requests, returns a dictionary of the form correlationID:result. Function Parameters ---------- requests : dictionary of correlationID:request pairs. CorrelationIDs are unique integers (cannot reuse until previous requests have returned). Requests can be either dicts of named arguments or list-likes of ordered arguments. Although technically anything can be made into a blpapi.CorrelationId, integers simplify usage. Request Parameters ---------- identifiers : list-like object of bloomberg identifiers of the form 'symbol [exchange] <yellow key>'. Symbol can be ticker/name/ cusip/etc. fields : list-like object of bloomberg field mnemonics or CALCRT ID. Although either can be input, only the mnemonic will be output. overrides : list-like object of tuples or dictionary. Tuples must be of the form [(fieldId, value), ], while dictionaries are {fieldId: value, }. FieldId(s) are mnemonics or CALCRT IDs, values will be converted to the proper type if possible. """ with bs.Session() as session: try: if not isinstance(requests, dict): raise be.InputError('request_mult_refdata requires a ' 'dictionary of correlationId:input pairs') for corr_id, req in requests.items(): if isinstance(req, dict): inputs = req elif hasattr(req, '__iter__'): if len(req) == 3: pass elif len(req) == 2: req = list(req) req.append(None) else: raise be.InputError('Request {0} has {1} items' ', expected 2-3.'.format(corr_id, len(req))) inputs = dict(zip((IDS, FLDS, OVDS), req)) else: raise be.InputError('Request {0} is of type: {0}, ' 'expected dict or list-like'.format(corr_id, type(req))) _ref_req_queue(session, corr_id, inputs) except be.InputError as err: print err _refdata_to_bloomberg(session) session.queue.join() rtn = session.correlation_ids return rtn
d0580910ac74fe7ac85795caa6b5321122626986
3,653,622
def specific_kinetic_energy(particles): """ Returns the specific kinetic energy of each particle in the set. >>> from amuse.datamodel import Particles >>> particles = Particles(2) >>> particles.vx = [1.0, 1.0] | units.ms >>> particles.vy = [0.0, 0.0] | units.ms >>> particles.vz = [0.0, 0.0] | units.ms >>> particles.mass = [1.0, 1.0] | units.kg >>> particles.specific_kinetic_energy() quantity<[0.5, 0.5] m**2 * s**-2> """ return 0.5*(particles.vx**2+particles.vy**2+particles.vz**2)
89a126c23b291a526401a00f812b40a5283319f4
3,653,623
def parse_loot_percentage(text): """Use to parse loot percentage string, ie: Roubo: 50% becomes 0.5""" percentage = float(text.split(':')[1].strip("%")) / 100 return percentage
97dc4f20f02ef0e5d3e592d3084dce80549777ce
3,653,624
def major_minor_change(old_version, new_version): """Check if a major or minor change occurred.""" major_mismatch = old_version.major != new_version.major minor_mismatch = old_version.minor != new_version.minor if major_mismatch or minor_mismatch: return True return False
effa9f55c82a9edcacd79e07716527f314e41f39
3,653,625
from typing import Optional from typing import List from typing import Dict def list_all_queues(path: str, vhost: Optional[str] = '/') -> List[Dict]: """Send a request to RabbitMQ api to list all the data queues. Args: path: Path to the RabbitMQ management api to send the request to. vhost: Virtual host of the RabbitMQ. Returns: List of all the data queues. """ quoted_vhost = parse.quote_plus(vhost) queues_path = path + f'api/queues/{quoted_vhost}' queues = request_sender.make_request('GET', queues_path) return queues
9b57721509fdc6ec31eebb0ca8a0f28797419d95
3,653,626
def get_tf_model_variables(config_path, init_checkpoint): """Return tf model parameters in a dictionary format. Args: config_path: path to TF model configuration file init_checkpoint: path to saved TF model checkpoint Returns: tf_config: dictionary tf model configurations tf_variables: dictionary of tf variables tf_model: tensorflow BERT model generated using input config and checkpoint """ # Load saved model configuration config = configs.BertConfig.from_json_file(config_path) # Generate BERT TF model and initiate variable update from checkpoint seq_len = 20 _, tf_model = bert_models.squad_model(config, seq_len) checkpoint = tf.train.Checkpoint(model=tf_model) checkpoint.restore(init_checkpoint).assert_existing_objects_matched() tf_config = config.__dict__ tf_variables = {v.name: v.numpy() for v in tf_model.variables} return tf_config, tf_variables, tf_model
5c9a1c138f3c12460668464d2c865787d7720e95
3,653,627
def org_unit_type_filter(queryset, passed_in_org_types): """Get specific Organisational units based on a filter.""" for passed_in_org_type in passed_in_org_types: queryset = queryset.filter(org_unit_type_id=passed_in_org_type) return queryset
0495cabe121f8d6fdb584538f13764bd81d978c5
3,653,628
def is_str_digit(n: str) -> bool: """Check whether the given string is a digit or not. """ try: float(n) return True except ValueError: return False
0e3b4c38cfd9fe2024bde2b63502c74dce307533
3,653,629
import cv2 import random from re import DEBUG def draw_all_poly_detection(im_array, detections, class_names, scale, cfg, threshold=0.2): """ visualize all detections in one image :param im_array: [b=1 c h w] in rgb :param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ] :param class_names: list of names in imdb :param scale: visualize the scaled image :return: """ color_white = (255, 255, 255) im = image.transform_inverse(im_array, cfg.network.PIXEL_MEANS) # change to bgr im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR) if DEBUG: class_names = ['__background__', 'fg'] for j, name in enumerate(class_names): if name == '__background__': continue color = (random.randint(0, 256), random.randint(0, 256), random.randint(0, 256)) # generate a random color dets = detections[j] for det in dets: bbox = det[:8] * scale score = det[-1] if score < threshold: continue bbox = map(int, bbox) # draw first point cv2.circle(im, (bbox[0], bbox[1]), 3, (0, 0, 255), -1) for i in range(3): cv2.line(im, (bbox[i * 2], bbox[i * 2 + 1]), (bbox[(i+1) * 2], bbox[(i+1) * 2 + 1]), color=color, thickness=2) cv2.line(im, (bbox[6], bbox[7]), (bbox[0], bbox[1]), color=color, thickness=2) cv2.putText(im, '%s %.3f' % (class_names[j], score), (bbox[0], bbox[1] + 10), color=color_white, fontFace=cv2.FONT_HERSHEY_COMPLEX, fontScale=0.5) return im
4c9e3fc04b9743a687791341f70a51b8f66c7447
3,653,630
import os import tempfile def generate_dswx_layers(input_list, output_file, hls_thresholds = None, dem_file=None, output_interpreted_band=None, output_rgb_file=None, output_infrared_rgb_file=None, output_binary_water=None, output_confidence_layer=None, output_diagnostic_layer=None, output_non_masked_dswx=None, output_shadow_masked_dswx=None, output_landcover=None, output_shadow_layer=None, output_cloud_mask=None, output_dem_layer=None, landcover_file=None, built_up_cover_fraction_file=None, flag_offset_and_scale_inputs=False, scratch_dir='.', product_id=None, flag_debug=False): """Apply shadow layer onto interpreted layer Parameters ---------- input_list: list Input file list output_file: str Output filename hls_thresholds: HlsThresholds (optional) HLS reflectance thresholds for generating DSWx-HLS products dem_file: str (optional) DEM filename output_interpreted_band: str (optional) Output interpreted band filename output_rgb_file: str (optional) Output RGB filename output_infrared_rgb_file: str (optional) Output infrared RGB filename output_binary_water: str (optional) Output binary water filename output_confidence_layer: str (optional) Output confidence layer filename output_diagnostic_layer: str (optional) Output diagnostic layer filename output_non_masked_dswx: str (optional) Output (non-masked) interpreted layer filename output_shadow_masked_dswx: str (optional) Output shadow-masked filename output_landcover: str (optional) Output landcover classification file output_shadow_layer: str (optional) Output shadow layer filename output_cloud_mask: str (optional) Output cloud/cloud-shadow mask filename output_dem_layer: str (optional) Output elevation layer filename landcover_file: str (optional) Output landcover filename built_up_cover_fraction_file: str (optional) Output built-up cover fraction filename flag_offset_and_scale_inputs: bool (optional) Flag indicating if DSWx-HLS should be offsetted and scaled scratch_dir: str (optional) Temporary directory product_id: str (optional) Product ID that will be saved in the output' product's metadata flag_debug: bool (optional) Flag to indicate if execution is for debug purposes. If so, only a subset of the image will be loaded into memory Returns ------- success : bool Flag success indicating if execution was successful """ if hls_thresholds is None: hls_thresholds = parse_runconfig_file() if scratch_dir is None: scratch_dir = '.' logger.info('input parameters:') logger.info(' file(s):') for input_file in input_list: logger.info(f' {input_file}') logger.info(f' output_file: {output_file}') logger.info(f' DEM file: {dem_file}') logger.info(f' scratch directory: {scratch_dir}') os.makedirs(scratch_dir, exist_ok=True) image_dict = {} offset_dict = {} scale_dict = {} output_files_list = [] build_vrt_list = [] dem = None shadow_layer = None if product_id is None and output_file: product_id = os.path.splitext(os.path.basename(output_file))[0] elif product_id is None: product_id = 'dswx_hls' dswx_metadata_dict = _get_dswx_metadata_dict(product_id) version = None if not isinstance(input_list, list) or len(input_list) == 1: success = _load_hls_product_v1(input_list, image_dict, offset_dict, scale_dict, dswx_metadata_dict, flag_offset_and_scale_inputs, flag_debug = flag_debug) if success: version = '1.4' else: success = None # If success is None or False: if success is not True: success = _load_hls_product_v2(input_list, image_dict, offset_dict, scale_dict, dswx_metadata_dict, flag_offset_and_scale_inputs, flag_debug = flag_debug) if not success: logger.info(f'ERROR could not read file(s): {input_list}') return False version = '2.0' hls_dataset_name = image_dict['hls_dataset_name'] _populate_dswx_metadata_datasets(dswx_metadata_dict, hls_dataset_name, dem_file=None, landcover_file=None, built_up_cover_fraction_file=None) spacecraft_name = dswx_metadata_dict['SPACECRAFT_NAME'] logger.info(f'processing HLS {spacecraft_name[0]}30 dataset v.{version}') blue = image_dict['blue'] green = image_dict['green'] red = image_dict['red'] nir = image_dict['nir'] swir1 = image_dict['swir1'] swir2 = image_dict['swir2'] qa = image_dict['qa'] geotransform = image_dict['geotransform'] projection = image_dict['projection'] length = image_dict['length'] width = image_dict['width'] sun_azimuth_angle_meta = dswx_metadata_dict['MEAN_SUN_AZIMUTH_ANGLE'].split(', ') sun_zenith_angle_meta = dswx_metadata_dict['MEAN_SUN_ZENITH_ANGLE'].split(', ') if len(sun_azimuth_angle_meta) == 2: sun_azimuth_angle = (float(sun_azimuth_angle_meta[0]) + float(sun_azimuth_angle_meta[1])) / 2.0 else: sun_azimuth_angle = float(sun_azimuth_angle_meta[0]) if len(sun_zenith_angle_meta) == 2: sun_zenith_angle = (float(sun_zenith_angle_meta[0]) + float(sun_zenith_angle_meta[1])) / 2.0 else: sun_zenith_angle = float(sun_zenith_angle_meta[0]) # Sun elevation and zenith angles are complementary sun_elevation_angle = 90 - float(sun_zenith_angle) logger.info(f'Mean Sun azimuth angle: {sun_azimuth_angle}') logger.info(f'Mean Sun elevation angle: {sun_elevation_angle}') if dem_file is not None: # DEM if output_dem_layer is None: dem_cropped_file = tempfile.NamedTemporaryFile( dir=scratch_dir, suffix='.tif').name else: dem_cropped_file = output_dem_layer dem = _relocate(dem_file, geotransform, projection, length, width, scratch_dir, resample_algorithm='cubic', relocated_file=dem_cropped_file) # TODO: # 1. crop DEM with a margin # 2. save metadata to DEM layer hillshade = _compute_hillshade(dem_cropped_file, scratch_dir, sun_azimuth_angle, sun_elevation_angle) shadow_layer = _compute_otsu_threshold(hillshade, is_normalized = True) if output_shadow_layer: _save_array(shadow_layer, output_shadow_layer, dswx_metadata_dict, geotransform, projection, description=band_description_dict['SHAD'], output_files_list=build_vrt_list) if landcover_file is not None: if output_landcover is None: relocated_landcover_file = tempfile.NamedTemporaryFile( dir=scratch_dir, suffix='.tif').name else: relocated_landcover_file = output_landcover # Land Cover # TODO output_landcover will be the output of create_landcover_mask() landcover = _relocate(landcover_file, geotransform, projection, length, width, scratch_dir, relocated_file=relocated_landcover_file) if built_up_cover_fraction_file is not None: # Build-up cover fraction built_up_cover_fraction = _relocate(built_up_cover_fraction_file, geotransform, projection, length, width, scratch_dir, relocated_file = 'temp_built_up_cover_fraction.tif') # Set invalid pixels to fill value (255) if not flag_offset_and_scale_inputs: invalid_ind = np.where(blue < -5000) else: invalid_ind = np.where(blue < -0.5) if output_rgb_file: _save_output_rgb_file(red, green, blue, output_rgb_file, offset_dict, scale_dict, flag_offset_and_scale_inputs, geotransform, projection, invalid_ind=invalid_ind, output_files_list=output_files_list) if output_infrared_rgb_file: _save_output_rgb_file(swir1, nir, red, output_infrared_rgb_file, offset_dict, scale_dict, flag_offset_and_scale_inputs, geotransform, projection, invalid_ind=invalid_ind, output_files_list=output_files_list, flag_infrared=True) diagnostic_layer = _compute_diagnostic_tests( blue, green, red, nir, swir1, swir2, hls_thresholds) if output_diagnostic_layer: _save_array(diagnostic_layer, output_diagnostic_layer, dswx_metadata_dict, geotransform, projection, description=band_description_dict['DIAG'], output_files_list=build_vrt_list) interpreted_dswx_band = generate_interpreted_layer(diagnostic_layer) if invalid_ind is not None: interpreted_dswx_band[invalid_ind] = 255 if output_non_masked_dswx: save_dswx_product(interpreted_dswx_band, output_non_masked_dswx, dswx_metadata_dict, geotransform, projection, description=band_description_dict['WTR-1'], scratch_dir=scratch_dir, output_files_list=build_vrt_list) if shadow_layer is not None: shadow_masked_dswx = _apply_shadow_layer( interpreted_dswx_band, shadow_layer) else: shadow_masked_dswx = interpreted_dswx_band if output_shadow_masked_dswx is not None: save_dswx_product(shadow_masked_dswx, output_shadow_masked_dswx, dswx_metadata_dict, geotransform, projection, description=band_description_dict['WTR-2'], scratch_dir=scratch_dir, output_files_list=build_vrt_list) cloud, masked_dswx_band = _compute_mask_and_filter_interpreted_layer( shadow_masked_dswx, qa) if invalid_ind is not None: # Set invalid pixels to mask fill value (255) cloud[invalid_ind] = 255 masked_dswx_band[invalid_ind] = 255 if output_interpreted_band: save_dswx_product(masked_dswx_band, output_interpreted_band, dswx_metadata_dict, geotransform, projection, description=band_description_dict['WTR'], scratch_dir=scratch_dir, output_files_list=build_vrt_list) if output_cloud_mask: save_mask(cloud, output_cloud_mask, dswx_metadata_dict, geotransform, projection, description=band_description_dict['CLOUD'], output_files_list=build_vrt_list) binary_water_layer = _get_binary_water_layer(masked_dswx_band) if output_binary_water: _save_binary_water(binary_water_layer, output_binary_water, dswx_metadata_dict, geotransform, projection, description=band_description_dict['BWTR'], output_files_list=build_vrt_list) # TODO: fix CONF layer!!! if output_confidence_layer: _save_binary_water(binary_water_layer, output_confidence_layer, dswx_metadata_dict, geotransform, projection, description=band_description_dict['CONF'], output_files_list=build_vrt_list) # save output_file as GeoTIFF if output_file and not output_file.endswith('.vrt'): save_dswx_product(masked_dswx_band, output_file, dswx_metadata_dict, geotransform, projection, bwtr=binary_water_layer, diag=diagnostic_layer, wtr_1=interpreted_dswx_band, wtr_2=shadow_masked_dswx, shad=shadow_layer, cloud=cloud, dem=dem, scratch_dir=scratch_dir, output_files_list=output_files_list) # save output_file as VRT elif output_file: vrt_options = gdal.BuildVRTOptions(resampleAlg='nearest') gdal.BuildVRT(output_file, build_vrt_list, options=vrt_options) build_vrt_list.append(output_file) logger.info(f'file saved: {output_file}') logger.info('list of output files:') for filename in build_vrt_list + output_files_list: logger.info(filename) return True
eb783b48276e6b62b63665fe84bfa5b5bf2a04cb
3,653,631
import operator def range_check_function(bottom, top): """Returns a function that checks if bottom <= arg < top, allowing bottom and/or top to be None""" if top is None: if bottom is None: # Can't currently happen (checked before calling this), but let's do something reasonable return lambda _: True else: return partial(operator.le, bottom) elif bottom is None: return partial(operator.gt, top) else: def range_f(v): return v >= bottom and v < top return range_f
95e22a544633f166b275d548fd4a07383e3ea098
3,653,632
def filter_employee(): """ When the client requests a specific employee. Valid queries: ?employeeid=<employeeid> Returns: json representation of product. """ query_parameters = request.args conn = psycopg2.connect(DATABASE_URL, sslmode='require') cursor = conn.cursor() lookup_code = query_parameters.get('employeeid') base_query = "SELECT * FROM employee WHERE" if lookup_code: query = "{} employeeid = '{}'".format(base_query, lookup_code) cursor.execute(query) record_list = cursor.fetchall() conn.close() data_list = parse_employee_info(record_list) return jsonify(data_list) else: conn.close() return "<h1>404</h1><p>The employeeid was not found.</p>"
2238b10ad528a1ce523ff206d9f41f04e369adb4
3,653,633
import chunk def ParallelLSTDQ(D,env,w,damping=0.001,ncpus=None): """ D : source of samples (s,a,r,s',a') env: environment contianing k,phi,gamma w : weights for the linear policy evaluation damping : keeps the result relatively stable ncpus : the number of cpus to use """ if ncpus: nprocess = ncpus else: nprocess = cpu_count() pool = Pool(nprocess) indx = chunk(len(D),nprocess) results = [] for (i,j) in indx: r = pool.apply_async(dict_loop,(D[i:j],env,w,0.0)) # note that damping needs to be zero here results.append(r) k = len(w) A = sp.identity(k,format='csr') * damping b = sp_create(k,1,'csr') for r in results: T,t = r.get() A = A + T b = b + t # close out the pool of workers pool.close() pool.join() w,info = solve(A,b,method="spsolve") return A,b,w,info
9a9ca1247fccf45c523d64e9dbff2313c6c9572b
3,653,634
def get_value_from_settings_with_default_string(wf, value, default_value): """Returns either a value as set in the settings file or a default as specified by caller""" try: ret = wf.settings[value]['value'] return str(ret) except KeyError: return default_value
7a08ac33073b451a6000931c6bd5b41f33b0c486
3,653,635
def jsonify(records): """ Parse asyncpg record response into JSON format """ return [dict(r.items()) for r in records]
618cb538331c4eb637aa03f0ba857da3f2fa4c1c
3,653,636
def smoothing_cross_entropy(logits, labels, vocab_size, confidence, gaussian=False, zero_pad=True): """Cross entropy with label smoothing to limit over-confidence. Args: logits: Tensor of size [batch_size, ?, vocab_size] labels: Tensor of size [batch_size, ?] vocab_size: Tensor representing the size of the vocabulary. confidence: Used to determine on and off values for label smoothing. If `gaussian` is true, `confidence` is the variance to the gaussian distribution. gaussian: Uses a gaussian distribution for label smoothing zero_pad: use 0 as the probabitlity of the padding in the smoothed labels. By setting this, we replicate the numeric calculation of tensor2tensor, which doesn't set the <BOS> token in the vocabulary. Returns: the cross entropy loss. """ with tf.name_scope("smoothing_cross_entropy", values=[logits, labels]): # Low confidence is given to all non-true labels, uniformly. if zero_pad: low_confidence = (1.0 - confidence) / tf.to_float(vocab_size - 2) else: low_confidence = (1.0 - confidence) / tf.to_float(vocab_size - 1) if gaussian and confidence > 0.0: labels = tf.cast(labels, tf.float32) normal_dist = tf.distributions.Normal(loc=labels, scale=confidence) soft_targets = normal_dist.prob( tf.cast(tf.range(vocab_size), tf.float32)\ [:, None, None]) # Reordering soft_targets from [vocab_size, batch_size, ?] # to match logits: [batch_size, ?, vocab_size] soft_targets = tf.transpose(soft_targets, perm=[1, 2, 0]) else: soft_targets = tf.one_hot( tf.cast(labels, tf.int32), depth=vocab_size, on_value=confidence, off_value=low_confidence, dtype=logits.dtype) if zero_pad: soft_targets = tf.concat([tf.expand_dims(\ tf.zeros_like(labels, dtype=tf.float32), 2),\ soft_targets[:, :, 1:]], -1) if hasattr(tf.nn, 'softmax_cross_entropy_with_logits_v2'): cross_entropy_fn = tf.nn.softmax_cross_entropy_with_logits_v2 else: cross_entropy_fn = tf.nn.softmax_cross_entropy_with_logits return cross_entropy_fn( logits=logits, labels=soft_targets)
d0374cb850d25975c5e882335933c18da9647382
3,653,637
from anyway.app_and_db import db def get_db_matching_location_interurban(latitude, longitude) -> dict: """ extracts location from db by closest geo point to location found, using road number if provided and limits to requested resolution :param latitude: location latitude :param longitude: location longitude """ def get_bounding_box(latitude, longitude, distance_in_km): latitude = math.radians(latitude) longitude = math.radians(longitude) radius = 6371 # Radius of the parallel at given latitude parallel_radius = radius * math.cos(latitude) lat_min = latitude - distance_in_km / radius lat_max = latitude + distance_in_km / radius lon_min = longitude - distance_in_km / parallel_radius lon_max = longitude + distance_in_km / parallel_radius rad2deg = math.degrees return rad2deg(lat_min), rad2deg(lon_min), rad2deg(lat_max), rad2deg(lon_max) try: except ModuleNotFoundError: pass distance_in_km = 5 lat_min, lon_min, lat_max, lon_max = get_bounding_box(latitude, longitude, distance_in_km) baseX = lon_min baseY = lat_min distanceX = lon_max distanceY = lat_max polygon_str = "POLYGON(({0} {1},{0} {3},{2} {3},{2} {1},{0} {1}))".format( baseX, baseY, distanceX, distanceY ) query_obj = ( db.session.query(AccidentMarkerView) .filter(AccidentMarkerView.geom.intersects(polygon_str)) .filter(AccidentMarkerView.accident_year >= 2014) .filter(AccidentMarkerView.provider_code != BE_CONST.RSA_PROVIDER_CODE) .filter(not_(AccidentMarkerView.road_segment_name == None)) ) markers = pd.read_sql_query(query_obj.statement, query_obj.session.bind) geod = Geodesic.WGS84 # relevant_fields = resolution_dict[resolution] # markers = db.get_markers_for_location_extraction() markers["geohash"] = markers.apply( # pylint: disable=maybe-no-member lambda x: geohash.encode(x["latitude"], x["longitude"], precision=4), axis=1 ) # pylint: disable=maybe-no-member markers_orig = markers.copy() # pylint: disable=maybe-no-member markers = markers.loc[(markers["road1"] != None)] # pylint: disable=maybe-no-member if markers.count()[0] == 0: markers = markers_orig # FILTER BY GEOHASH curr_geohash = geohash.encode(latitude, longitude, precision=4) if markers.loc[markers["geohash"] == curr_geohash].count()[0] > 0: markers = markers.loc[markers["geohash"] == curr_geohash].copy() # CREATE DISTANCE FIELD markers["dist_point"] = markers.apply( lambda x: geod.Inverse(latitude, longitude, x["latitude"], x["longitude"])["s12"], axis=1 ).replace({np.nan: None}) most_fit_loc = ( markers.loc[markers["dist_point"] == markers["dist_point"].min()].iloc[0].to_dict() ) final_loc = {} for field in ["road1", "road_segment_name"]: loc = most_fit_loc[field] if loc not in [None, "", "nan"]: if not (isinstance(loc, np.float64) and np.isnan(loc)): final_loc[field] = loc return final_loc
6e3bd7153cc555954768dd34a5a1b0090510a834
3,653,638
import re def get(settings_obj, key, default=None, callback=None): """ Return a Sublime Text plugin setting value. Parameters: settings_obj - a sublime.Settings object or a dictionary containing settings key - the name of the setting default - the default value to return if the key value is not found. callback - a callback function that, if provided, will be called with the found and default values as parameters. """ # Parameter validation if not isinstance(settings_obj, (dict, sublime.Settings)): raise AttributeError("Invalid settings object") if not isinstance(key, str): raise AttributeError("Invalid callback function") if callback is not None and not hasattr(callback, '__call__'): raise AttributeError("Invalid callback function") setting = settings_obj.get(key, default) final_val = None if isinstance(setting, dict) and "#multiconf#" in setting: reject_item = False for entry in setting["#multiconf#"]: reject_item = False if isinstance(entry, dict) and len(entry) else True k, v = entry.popitem() if reject_item: continue for qual in re.compile(QUALIFIERS).finditer(k): if Qualifications.exists(qual.group(1)): reject_item = not Qualifications.eval_qual(qual.group(1), qual.group(2)) else: reject_item = True if reject_item: break if not reject_item: final_val = v break if reject_item: final_val = default else: final_val = setting return callback(final_val, default) if callback else final_val
b1bab5380cb94fb6493431b8732d9c963e9f1f14
3,653,639
import json def parse_json_confing(config_file): """Parse JSON for config JSON will can look like this: { "request_type": "server", "comboBase": "www.cdn.com" "base": "/base/path", //build directory "js_path": "js", //path relative to base "css_path": "path/to/css", //path relative to base; note: combo loader will try and search/replace images in CSS files "filter": DEBUG|MIN } Otherwise uses the *.ini file to load the server config """ f = open(config_file, 'r') content = f.read() f.close() config = json.loads(content) #parse through options to load files by file name missing_keys = [] for key in required_config_keys: if key not in config: missing_keys.append(key) if missing_keys: raise Exception("Required keys are missing in config :: required are: %s ::: config is missing: %s" % (required_config_keys, missing_keys)) config['request_type'] = REQUEST_TYPES[config['request_type'].lower()] return config
28f7f0cd41f31524450b03e0af806d79c857666e
3,653,640
def site(): """Main front-end web application""" html = render.html("index") return html
0b8e144a6c366692c51a3fb5431d73fb9ed0e8c1
3,653,641
def parse_vad_label(line, frame_size: float = 0.032, frame_shift: float = 0.008): """Parse VAD information in each line, and convert it to frame-wise VAD label. Args: line (str): e.g. "0.2,3.11 3.48,10.51 10.52,11.02" frame_size (float): frame size (in seconds) that is used when extarcting spectral features frame_shift (float): frame shift / hop length (in seconds) that is used when extarcting spectral features Returns: frames (List[int]): frame-wise VAD label Examples: >>> label = parse_vad_label("0.3,0.5 0.7,0.9") [0, ..., 0, 1, ..., 1, 0, ..., 0, 1, ..., 1] >>> print(len(label)) 110 NOTE: The output label length may vary according to the last timestamp in `line`, which may not correspond to the real duration of that sample. For example, if an audio sample contains 1-sec silence at the end, the resulting VAD label will be approximately 1-sec shorter than the sample duration. Thus, you need to pad zeros manually to the end of each label to match the number of frames in the feature. E.g.: >>> feature = extract_feature(audio) # frames: 320 >>> frames = feature.shape[1] # here assumes the frame dimention is 1 >>> label = parse_vad_label(vad_line) # length: 210 >>> import numpy as np >>> label_pad = np.pad(label, (0, np.maximum(frames - len(label), 0)))[:frames] """ frame2time = lambda n: n * frame_shift + frame_size / 2 frames = [] frame_n = 0 for time_pairs in line.split(): start, end = map(float, time_pairs.split(",")) assert end > start, (start, end) while frame2time(frame_n) < start: frames.append(0) frame_n += 1 while frame2time(frame_n) <= end: frames.append(1) frame_n += 1 return frames
658a2a00b8b0b2cfdb83b649d2f87fcf23cbb6b4
3,653,642
def preprocess_image(img, img_width, img_height): """Preprocesses the image before feeding it into the ML model""" x = get_square_image(img) x = np.asarray(img.resize((img_width, img_height))).astype(np.float32) x_transposed = x.transpose((2,0,1)) x_batchified = np.expand_dims(x_transposed, axis=0) return x_batchified
50540e81da95651d22dec83271257657d7978f79
3,653,643
def Pose_2_KUKA(H): """Converts a pose (4x4 matrix) to an XYZABC KUKA target (Euler angles), required by KUKA KRC controllers. :param H: pose :type H: :class:`.Mat` .. seealso:: :class:`.Mat`, :func:`~robodk.TxyzRxyz_2_Pose`, :func:`~robodk.Pose_2_TxyzRxyz`, :func:`~robodk.Pose_2_ABB`, :func:`~robodk.Pose_2_Adept`, :func:`~robodk.Pose_2_Comau`, :func:`~robodk.Pose_2_Fanuc`, :func:`~robodk.Pose_2_KUKA`, :func:`~robodk.Pose_2_Motoman`, :func:`~robodk.Pose_2_Nachi`, :func:`~robodk.Pose_2_Staubli`, :func:`~robodk.Pose_2_UR`, :func:`~robodk.quaternion_2_pose` """ x = H[0, 3] y = H[1, 3] z = H[2, 3] if (H[2, 0]) > (1.0 - 1e-10): p = -pi / 2 r = 0 w = atan2(-H[1, 2], H[1, 1]) elif (H[2, 0]) < (-1.0 + 1e-10): p = pi / 2 r = 0 w = atan2(H[1, 2], H[1, 1]) else: p = atan2(-H[2, 0], sqrt(H[0, 0] * H[0, 0] + H[1, 0] * H[1, 0])) w = atan2(H[1, 0], H[0, 0]) r = atan2(H[2, 1], H[2, 2]) return [x, y, z, w * 180 / pi, p * 180 / pi, r * 180 / pi]
5c15c450b9be728e1c0c8727066485ec0176711c
3,653,644
from typing import Optional def skip_regenerate_image(request: FixtureRequest) -> Optional[str]: """Enable parametrization for the same cli option""" return _request_param_or_config_option_or_default(request, 'skip_regenerate_image', None)
5d621202d0b72da53994b217f570bd86ccd5ada2
3,653,645
def parse_config(tool_name, key_col_name, value_col_name): """Parses the "execute" field for the given tool from installation config file. Parameters: tool_name: Tool name to search from file. Raises: STAPLERerror if config file does not exists. STAPLERerror if tool value can not be read from file. STAPLERerror if tool value was an empty string. Returns: String containing the user specified run command, None if no special command has been defined. """ # Return None for the generic_base class, as it should not be in the # config file in any case try: run_command = read_value_from_multi_table(CONFIG_FILE_PATH, tool_name, key_col_name, value_col_name) except STAPLERerror: print 'Error when reading installation configuration file for ' \ 'tool {0}'.format(tool_name) logging.error('Error when reading installation configuration file ' 'for the tool {0}'.format(tool_name)) raise if run_command == 'none': raise NotConfiguredError() if run_command == '': raise STAPLERerror('Error! Empty value for tool {0} was found from ' 'installation configuration file !):\n{1}'.format(tool_name, CONFIG_FILE_PATH)) return run_command
bd80078cbd488bafb8ba9ba46464460a12761b2f
3,653,646
import ntpath def path_leaf(path): """ Extracts file name from given path :param str path: Path be extracted the file name from :return str: File name """ head, tail = ntpath.split(path) return tail or ntpath.basename(head)
98ef27b218fdb5003ac988c42aff163d1067021f
3,653,647
from typing import List def delete_cache_clusters( cluster_ids: List[str], final_snapshot_id: str = None, configuration: Configuration = None, secrets: Secrets = None, ) -> List[AWSResponse]: """ Deletes one or more cache clusters and creates a final snapshot Parameters: cluster_ids: list: a list of one or more cache cluster ids final_snapshot_id: str: an identifier to give the final snapshot """ client = aws_client("elasticache", configuration, secrets) cache_clusters = describe_cache_clusters(cluster_ids, client) results = [] for c in cache_clusters: logger.debug("Deleting Cache Cluster: %s." % c["CacheClusterId"]) params = dict(CacheClusterId=c["CacheClusterId"]) if final_snapshot_id: params["FinalSnapshotIdentifier"] = final_snapshot_id results.append(client.delete_cache_cluster(**params)["CacheCluster"]) return results
6c3e013eaedc0590b6eee7f50a9ce47f92ed57fc
3,653,648
import torch def define_styleGenerator(content_nc: int, style_nc: int, n_c: int, n_blocks=4, norm='instance', use_dropout=False, padding_type='zero', cbam=False, gpu_ids=[]): """ This ResNet applies the encoded style from the style tensor onto the given content tensor. Parameters: ---------- - content_nc (int): number of channels in the content tensor - style_nc (int): number of channels in the style tensor - n_c (int): number of channels used inside the network - n_blocks (int): number of Resnet blocks - norm_layer: normalization layer - use_dropout: (boolean): if use dropout layers - padding_type (str): the name of padding layer in conv layers: reflect | replicate | zero - cbam (boolean): If true, use the Convolution Block Attention Module - gpu_ids: [int]: GPU ids available to this network. Default = [] """ use_gpu = len(gpu_ids) > 0 norm_layer = get_norm_layer(norm_type=norm) if use_gpu: assert(torch.cuda.is_available()) styleGenerator = StyleGenerator(content_nc, style_nc, n_c, n_blocks=n_blocks, norm_layer=norm_layer, use_dropout=use_dropout, padding_type=padding_type, cbam=False) init_weights(styleGenerator, "kaiming", activation='leaky_relu') if len(gpu_ids): return nn.DataParallel(styleGenerator, device_ids=gpu_ids) else: return styleGenerator
ed996a2dbd1d2375a248582db21397ee051b5f25
3,653,649
def answer(): """ answer """ # logger M_LOG.info("answer") if "answer" == flask.request.form["type"]: # save answer gdct_data["answer"] = {"id": flask.request.form["id"], "type": flask.request.form["type"], "sdp": flask.request.form["sdp"]} # return ok return flask.Response(status=200) # return return flask.Response(status=400)
055a590107e30e4cd582e658e2f62fcba975f3dc
3,653,650
def load_requirements(): """ Helps to avoid storing requirements in more than one file""" reqs = parse_requirements('requirements-to-freeze.txt', session=False) reqs_list = [str(ir.req) for ir in reqs] return reqs_list
4dcde55604cc8fc08a4b57ad1e776612eed18808
3,653,651
import warnings import os import types import six import sys def discover(type=None, regex=None, paths=None): """Find and return available plug-ins This function looks for files within paths registered via :func:`register_plugin_path` and those added to `PYBLISHPLUGINPATH`. It determines *type* - :class:`Selector`, :class:`Validator`, :class:`Extractor` or :class:`Conform` - based on whether it matches it's corresponding regular expression; e.g. "$validator_*^" for plug-ins of type Validator. Arguments: type (str, optional): !DEPRECATED! Only return plugins of specified type. E.g. validators, extractors. In None is specified, return all plugins. Available options are "selectors", validators", "extractors", "conformers", "collectors" and "integrators". regex (str, optional): Limit results to those matching `regex`. Matching is done on classes, as opposed to filenames, due to a file possibly hosting multiple plugins. paths (list, optional): Paths to discover plug-ins from. If no paths are provided, all paths are searched. """ if type is not None: warnings.warn("type argument has been deprecated and does nothing") if regex is not None: warnings.warn("discover(): regex argument " "has been deprecated and does nothing") plugins = dict() # Include plug-ins from registered paths for path in paths or plugin_paths(): path = os.path.normpath(path) if not os.path.isdir(path): continue for fname in os.listdir(path): if fname.startswith("_"): continue abspath = os.path.join(path, fname) if not os.path.isfile(abspath): continue mod_name, mod_ext = os.path.splitext(fname) if not mod_ext == ".py": continue module = types.ModuleType(mod_name) module.__file__ = abspath try: with open(abspath) as f: six.exec_(f.read(), module.__dict__) # Store reference to original module, to avoid # garbage collection from collecting it's global # imports, such as `import os`. sys.modules[abspath] = module except Exception as err: log.debug("Skipped: \"%s\" (%s)", mod_name, err) continue for plugin in plugins_from_module(module): if plugin.__name__ in plugins: log.debug("Duplicate plug-in found: %s", plugin) continue plugin.__module__ = module.__file__ plugins[plugin.__name__] = plugin # Include plug-ins from registration. # Directly registered plug-ins take precedence. for plugin in registered_plugins(): if plugin.__name__ in plugins: log.debug("Duplicate plug-in found: %s", plugin) continue plugins[plugin.__name__] = plugin plugins = list(plugins.values()) sort(plugins) # In-place return plugins
9e680498a1ed6e05c84fe77858431400be889191
3,653,652
def next_permutation(a): """Generate the lexicographically next permutation inplace. https://en.wikipedia.org/wiki/Permutation#Generation_in_lexicographic_order Return false if there is no next permutation. """ # Find the largest index i such that a[i] < a[i + 1]. If no such # index exists, the permutation is the last permutation for i in reversed(range(len(a) - 1)): if a[i] < a[i + 1]: break # found else: # no break: not found a.reverse() return False # no next permutation # Find the largest index j greater than i such that a[i] < a[j] j = next(j for j in reversed(range(i + 1, len(a))) if a[i] < a[j]) # Swap the value of a[i] with that of a[j] a[i], a[j] = a[j], a[i] # Reverse sequence from a[i + 1] up to and including the final element a[n] a[i + 1:] = reversed(a[i + 1:]) return True
b6246d53b5e0ac0e28aa5afda03d7756657a40bf
3,653,653
from numpy.linalg import norm def normalize(v): """ Calculate normalized vector :param v: input vector :return: normalized vector """ return v/norm(v)
0ade14b6136e5f55410f6d4cc3fb5b466fa60566
3,653,654
import re def replace_hyphen_by_romaji(text): """ 長音「ー」などを仮名に置換する。 """ # error check if len(text) < 2: return "" while "-" in list(text) or "~" in list(text): text_ = text if (text[0] == "-" or text[0] == "~") and len(text) >= 2: text = text[2:] continue text = re.sub(r"(?P<vowel>[aeiou])[-~][-~]", r"\g<vowel>x\g<vowel>", text) # "-" を 2文字 text = re.sub(r"A[-~][-~]", r"Axa", text) text = re.sub(r"E[-~][-~]", r"Exe", text) text = re.sub(r"O[-~][-~]", r"Oxo", text) text = re.sub(r"U[-~][-~]", r"Uxu", text) if text_ == text: break # 変化しなかったら終わり return text
9e2d7216bbd751f49ed54519f5eaf8d516ae8025
3,653,655
def aucroc_ic50(df,threshold=500): """ Compute AUC ROC for predictions and targets in DataFrame, based on a given threshold Parameters ---------- df : pandas.DataFrame with predictons in column "preds" and targets in column "targs" in nM threshold: float, binding affinty threshold for binders in nM Returns -------- numpy.nan or float """ df =df[~df["preds"].isnull()] is_binder = df["targs"] >= threshold if is_binder.mean()==1.0 or is_binder.mean()==0.0 or np.isnan(is_binder.mean()): return np.nan else: return roc_auc_score(1.0*is_binder,df["preds"])
d4535bc493bcaca45fa9ba739135261b9d514aa2
3,653,656
def infer_getattr(node, context=None): """Understand getattr calls If one of the arguments is an Uninferable object, then the result will be an Uninferable object. Otherwise, the normal attribute lookup will be done. """ obj, attr = _infer_getattr_args(node, context) if ( obj is util.Uninferable or attr is util.Uninferable or not hasattr(obj, "igetattr") ): return util.Uninferable try: return next(obj.igetattr(attr, context=context)) except (StopIteration, InferenceError, AttributeInferenceError): if len(node.args) == 3: # Try to infer the default and return it instead. try: return next(node.args[2].infer(context=context)) except InferenceError as exc: raise UseInferenceDefault from exc raise UseInferenceDefault
593435273bf57430ab96034772ef38694a491813
3,653,657
def get_plugin(molcapsule: 'PyObject *', plug_no: 'int') -> "PyObject *": """get_plugin(molcapsule, plug_no) -> PyObject *""" return _libpymolfile.get_plugin(molcapsule, plug_no)
b66687947619808a410d603df70895845afb4d16
3,653,658
def fake_redis_con(): """ Purpose: Create Fake Redis Connection To Test With Args: N/A Return: fake_redis_con (Pytest Fixture (FakeRedis Connection Obj)): Fake redis connection that simulates redis functionality for testing """ return fakeredis.FakeStrictRedis()
10d8e340e60e3d591473e942b2273871e6dcaebe
3,653,659
import inspect def verbose(function, *args, **kwargs): """Improved verbose decorator to allow functions to override log-level Do not call this directly to set global verbosrity level, instead use set_log_level(). Parameters ---------- function - function Function to be decorated to allow for overriding global verbosity level Returns ------- dec - function The decorated function """ try: arg_names = [parameter.name for parameter in inspect.signature(function).parameters.values() if (parameter.kind == parameter.POSITIONAL_OR_KEYWORD)] except: arg_names = inspect.getargspec(function).args if len(arg_names) > 0 and arg_names[0] == 'self': default_level = getattr(args[0], 'verbose', None) else: default_level = None if('verbose' in arg_names): verbose_level = args[arg_names.index('verbose')] else: verbose_level = default_level if verbose_level is not None: old_level = set_log_level(verbose_level, True) # set it back if we get an exception try: ret = function(*args, **kwargs) except: set_log_level(old_level) raise set_log_level(old_level) return ret else: ret = function(*args, **kwargs) return ret
7c2b2d8e827b6d60120b764fe964aa7e9c7b3f41
3,653,660
import torch def bittensor_dtype_to_torch_dtype(bdtype): """ Translates between bittensor.dtype and torch.dtypes. Args: bdtype (bittensor.dtype): bittensor.dtype to translate. Returns: dtype: (torch.dtype): translated torch.dtype. """ if bdtype == bittensor.proto.DataType.FLOAT32: dtype = torch.float32 elif bdtype == bittensor.proto.DataType.FLOAT64: dtype = torch.float64 elif bdtype == bittensor.proto.DataType.INT32: dtype = torch.int32 elif bdtype == bittensor.proto.DataType.INT64: dtype = torch.int64 else: raise DeserializationException( 'Unknown bittensor.Dtype or no equivalent torch.dtype for bittensor.dtype = {}' .format(bdtype)) return dtype
b0d6ccae56ed871224c8c45bd8aaff61846c99fa
3,653,661
def read_all(dataset, table): """Read all data from the API, convert to pandas dataframe""" return _read_from_json( CFG.path.replace("data", dataset=dataset, table=table, converter="path") )
d40016f8d8356795b9f6451b165410c25a79627c
3,653,662
def compute_spectrum_welch(sig, fs, avg_type='mean', window='hann', nperseg=None, noverlap=None, f_range=None, outlier_percent=None): """Compute the power spectral density using Welch's method. Parameters ----------- sig : 1d or 2d array Time series. fs : float Sampling rate, in Hz. avg_type : {'mean', 'median'}, optional Method to average across the windows: * 'mean' is the same as Welch's method, taking the mean across FFT windows. * 'median' uses median across FFT windows instead of the mean, to minimize outlier effects. window : str or tuple or array_like, optional, default: 'hann' Desired window to use. See scipy.signal.get_window for a list of available windows. If array_like, the array will be used as the window and its length must be nperseg. nperseg : int, optional Length of each segment, in number of samples. If None, and window is str or tuple, is set to 1 second of data. If None, and window is array_like, is set to the length of the window. noverlap : int, optional Number of points to overlap between segments. If None, noverlap = nperseg // 8. f_range : list of [float, float], optional Frequency range to sub-select from the power spectrum. outlier_percent : float, optional The percentage of outlier values to be removed. Must be between 0 and 100. Returns ------- freqs : 1d array Frequencies at which the measure was calculated. spectrum : 1d or 2d array Power spectral density. Examples -------- Compute the power spectrum of a simulated time series using Welch's method: >>> from neurodsp.sim import sim_combined >>> sig = sim_combined(n_seconds=10, fs=500, ... components={'sim_powerlaw': {}, 'sim_oscillation': {'freq': 10}}) >>> freqs, spec = compute_spectrum_welch(sig, fs=500) """ # Calculate the short time Fourier transform with signal.spectrogram nperseg, noverlap = check_spg_settings(fs, window, nperseg, noverlap) freqs, _, spg = spectrogram(sig, fs, window, nperseg, noverlap) # Throw out outliers if indicated if outlier_percent is not None: spg = discard_outliers(spg, outlier_percent) # Average across windows spectrum = get_avg_func(avg_type)(spg, axis=-1) # Trim spectrum, if requested if f_range: freqs, spectrum = trim_spectrum(freqs, spectrum, f_range) return freqs, spectrum
e7856e370d7783628afdea9777a693c4c72e2dfd
3,653,663
def _function_set_name(f): """ return the name of a function (not the module) @param f function @return name .. versionadded:: 1.1 """ name = f.__name__ return name.split(".")[-1]
e1b73fbc520c7d9745872b0cd19766d42c027d15
3,653,664
from typing import Sequence from pathlib import Path from typing import Optional from typing import Callable from typing import Set def _notes_from_paths( paths: Sequence[Path], wiki_name: str, callback: Optional[Callable[[int, int], None]]) -> Set[TwNote]: """ Given an iterable of paths, compile the notes found in all those tiddlers. :param paths: The paths of the tiddlers to generate notes for. :param wiki_name: The name/id of the wiki these notes are from. :param callback: Optional callable passing back progress. See :func:`find_notes`. :return: A set of all the notes found in the tiddler files passed. """ notes = set() for index, tiddler in enumerate(paths, 0): with open(tiddler, 'rb') as f: tid_text = f.read().decode() tid_name = tiddler.name[:tiddler.name.find(f".{RENDERED_FILE_EXTENSION}")] notes.update(_notes_from_tiddler(tid_text, wiki_name, tid_name)) if callback is not None and not index % 50: callback(index+1, len(paths)) if callback is not None: callback(len(paths), len(paths)) return notes
d522aaf2db500864eba78a4f2bd0fdfbf83051f0
3,653,665
def load_matrix(file_matrix, V): """load matrix :param file_matrix: path of pre-trained matrix (output file) :param V: vocab size :return: matrix(list) """ matrix = [[0 for _ in range(V)] for _ in range(V)] with open(file_matrix) as fp: for line in fp: target_id, context_id_values = line.strip().split("\t") context_id_values = context_id_values.split() for context_id_value in context_id_values: context_id, value = context_id_value.split(":") matrix[int(target_id)][int(context_id)] += float(value) return matrix
0a7aa27638bdc223d9860b9e39aa9b6089e59a0f
3,653,666
def add(*args): """Adding list of values""" return sum(args)
9bc68771c10b537f0727e76cc07297e7d0311a5d
3,653,667
import pytz from datetime import datetime def get_chart_dates(df, start_date=None, end_date=None, utc=True, auto_start=None, auto_end=None): """ Get dates for chart functions. More info on date string formats at: https://strftime.org/ Parameters: df : The dataframe for the chart, needed to acertain start and end dates, if none are provided. start_date : The start date for the entire series to be contained in the chart (start of max range). end_date : The end date for the entire series to be contained in the chart (end of max range). auto_start : The start of the default range to display on charts, until a user clicks a differnt range. auto_end : The end of the default range to display on charts, until a user clicks a differnt range. """ if utc: utc_now = pytz.utc.localize(datetime.utcnow()) utc_now.isoformat() utc_td_dmy_str = utc_now.strftime("%d-%m-%Y") utc_td_ymd_str = utc_now.strftime('%Y-%m-%d') t = utc_now t_dmy_str = utc_td_dmy_str t_ymd_str = utc_td_ymd_str elif not utc: now = datetime.now() td_dmy_str = now.strftime("%d-%m-%Y") td_ymd_str = now.strftime('%Y-%m-%d') t = now t_dmy_str = td_dmy_str t_ymd_str = td_ymd_str # End date: if end_date == None: end = df.index.max() chart_end = end.strftime("%d-%m-%Y") elif (end_date != None) and (isinstance(end_date, str)): end = datetime.strptime(end_date, '%Y-%m-%d') chart_end = end.strftime("%d-%m-%Y") elif (end_date != None) and (type(end_date) == datetime): end = end_date chart_end = end.strftime("%d-%m-%Y") elif (end_date != None) and (type(end_date) == date): end = end_date chart_end = end.strftime("%d-%m-%Y") elif isinstance(end_date, pd.Timestamp): end = pd.to_datetime(end_date) chart_end = end.strftime("%d-%m-%Y") # Start date: if start_date == None: start = df.index.min() chart_start = start.strftime("%d-%m-%Y") elif (end_date != None) and (isinstance(end_date, str)): end = datetime.strptime(end_date, '%Y-%m-%d') chart_end = end.strftime("%d-%m-%Y") elif (end_date != None) and (type(end_date) == datetime): end = end_date chart_end = end.strftime("%d-%m-%Y") elif (end_date != None) and (type(end_date) == date): end = end_date chart_end = end.strftime("%d-%m-%Y") elif isinstance(end_date, pd.Timestamp): end = pd.to_datetime(end_date) chart_end = end.strftime("%d-%m-%Y") # Auto end if auto_end == None: auto_end = t_ymd_str elif auto_end == 'yst': at_end = t - timedelta(days=1) auto_end = at_end.strftime('%Y-%m-%d') elif (auto_end != None) and (isinstance(auto_end, str)): at_end = datetime.strptime(auto_end, '%Y-%m-%d') auto_end = at_end.strftime('%Y-%m-%d') elif (auto_end != None) and (type(auto_end) == datetime): at_end = auto_end auto_end = at_end.strftime('%Y-%m-%d') elif (auto_end != None) and (type(auto_end) == date): at_end = auto_end auto_end = at_end.strftime('%Y-%m-%d') elif isinstance(auto_end, pd.Timestamp): at_end = pd.to_datetime(auto_end) auto_end = at_end.strftime('%Y-%m-%d') # Auto start if auto_start == None or auto_start == 'ytd': at_st = first_day_of_current_year(time=False, utc=False) auto_start = at_st.strftime('%Y-%m-%d') elif auto_start == '1yr': at_st = t - timedelta(days=365) auto_start = at_st.strftime('%Y-%m-%d') elif (auto_start != None) and (isinstance(auto_start, str)): at_start = datetime.strptime(auto_start, '%Y-%m-%d') auto_start = at_start.strftime('%Y-%m-%d') elif (auto_start != None) and (type(auto_start) == datetime): at_start = auto_start auto_start = at_start.strftime('%Y-%m-%d') elif (auto_start != None) and (type(auto_start) == date): at_start = auto_start auto_start = at_start.strftime('%Y-%m-%d') elif isinstance(auto_start, pd.Timestamp): at_start = pd.to_datetime(auto_start) auto_start = at_start.strftime('%Y-%m-%d') return chart_start, chart_end, auto_start, auto_end
603b8e2cea59a52104941da7f3526e4c38b94c16
3,653,668
import time import sys from pathlib import Path import os import yaml def run_benchmark_suite(analyser, suite, verbose, debug, timeout, files, bench): """ Run an analyzer (like Mythril) on a benchmark suite. :param analyser: BaseAnalyser child instance :param suite: Name of test suite :param verbose: Verbosity :param debug: Whether debug is on :param timeout: Test execution timeout :param files: When True, prints list of solidity files and exits :param bench: When not None, gives a list of solidity files to filter on :return: """ print("Using {} {}".format(analyser.get_name(), analyser.version)) testsuite_conf = get_benchmark_yaml(project_root_dir, suite, analyser.get_name(), debug) benchmark_files = gather_benchmark_files(code_root_dir, suite, testsuite_conf['benchmark_subdir']) if not benchmark_files: print("No benchmark files found in suite {}".format(suite)) return 1 if bench: benchmark_files = [path for path in benchmark_files if basename(path) in bench] if not benchmark_files: print("No benchmark files found in suite {} after filtering {}" .format(suite, bench)) return 1 out_data = { 'analyzer': analyser.get_name(), 'date': time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()), 'version': analyser.version, 'suite': testsuite_conf['suite'], } for field in 'benchmark_subdir benchmark_link benchmark_url_dir'.split(): out_data[field] = testsuite_conf[field] # Zero counters unconfigured = invalid_execution = error_execution = 0 ignored_benchmarks = unfound_issues = benchmarks = 0 timed_out = expected = 0 total_time = 0.0 out_data['benchmark_files'] = benchmark_files if files: print("Benchmark suite {} contains {} files:".format(suite, len(benchmark_files))) for bench_name in benchmark_files: print("\t", bench_name) sys.exit(0) print("Running {} benchmark suite".format(suite)) out_data['benchmarks'] = {} for sol_file in benchmark_files: benchmarks += 1 print('-' * 40) print("Checking {}".format(sol_file)) # Generate path to solidity file sol_path = Path(sol_file) test_name = str(sol_path.parent / sol_path.stem) # Read expected data and initialize output variables expected_data = testsuite_conf.get(test_name, None) run_opts = expected_data.get('options', []) bench_data = out_data['benchmarks'][test_name] = {} if expected_data: bench_data['bug_type'] = expected_data.get('bug_type', 'Unknown') bench_data['expected_data'] = expected_data run_time = expected_data.get('run_time', timeout) if expected_data.get('ignore', None): # Test case ignored print('Benchmark "{}" marked for ignoring; reason: {}' .format(test_name, expected_data['reason'])) ignored_benchmarks += 1 bench_data['result'] = 'Ignored' bench_data['elapsed_str'] = 'ignored' continue elif timeout < run_time: # When the code is too long, we skip it in the YAML print('Benchmark "{}" skipped because it is noted to take a long time; ' '{} seconds' .format(test_name, run_time)) ignored_benchmarks += 1 bench_data['result'] = 'Too Long' bench_data['elapsed_str'] = secs_to_human(run_time) continue try: res = analyser.run_test(sol_file, run_opts) except AnalyserError as e: print("{} invocation:\n\t{}\n failed with return code {}.\n\tError: {}" .format(analyser.get_name(), e.cmd, e.returncode, str(e))) invalid_execution += 1 bench_data['elapsed_str'] = 'errored' bench_data['result'] = 'Errored' bench_data['execution_returncode'] = e.returncode continue except AnalyserTimeoutError as e: elapsed_str = secs_to_human(e.elapsed) print('Benchmark "{}" timed out after {}'.format(test_name, elapsed_str)) timed_out += 1 bench_data['elapsed'] = e.elapsed bench_data['elapsed_str'] = elapsed_str bench_data['execution_returncode'] = 0 bench_data['result'] = 'Timed Out' continue elapsed_str = secs_to_human(res.elapsed) bench_data['elapsed'] = res.elapsed bench_data['elapsed_str'] = elapsed_str bench_data['execution_returncode'] = 0 total_time += res.elapsed print(elapsed_str) if not expected_data: unconfigured += 1 bench_data['result'] = 'Unconfigured' print('Benchmark "{}" results not configured, ' 'so I cannot pass judgement on this'.format(test_name)) pp.pprint(res.issues) print("=" * 30) if unconfigured > 5: break continue if res.failed: print('Benchmark "{}" errored'.format(test_name)) bench_data['result'] = 'Unconfigured' bench_data['error'] = res.error print(bench_data['error']) error_execution += 1 continue bench_data['issues'] = res.issues if not res.issues: if (not expected_data['has_bug']) or expected_data['has_bug'] == 'benign': print("No problems found and none expected") bench_data['result'] = 'True Negative' expected += 1 continue else: print("No problems found when issues were expected") bench_data['result'] = 'False Negative' error_execution += 1 continue else: if not expected_data['has_bug']: print("Found a problem where none was expected") bench_data['result'] = 'False Positive' error_execution += 1 elif expected_data['has_bug'] == 'benign': print("Found a benign problem") bench_data['result'] = 'Benign' expected += 1 continue # The test has a bug, and analysis terminated normally # finding some sort of problem. Did we detect the right problem? expected_issues = expected_data.get('issues', []) if len(expected_issues) != len(res.issues): print("Expecting to find {} issue(s), got {}" .format(len(expected_issues), len(res.issues))) bench_data['result'] = 'Wrong Vulnerability' error_execution += 1 pp.pprint(res.issues) print("=" * 30) continue unfound_issues = res.compare_issues(test_name, expected_issues) benchmark_success = unfound_issues == 0 bench_data['benchmark_success'] = benchmark_success bench_data['result'] = 'True Positive' if benchmark_success: expected += 1 print('Benchmark "{}" checks out'.format(test_name)) if verbose: for num, issue in enumerate(res.issues): print(" Issue {1}. {2} {0[title]} " "at address {0[address]}:\n\t{0[code]}" .format(issue, bench_data['bug_type'], num)) print('-' * 40) print("\nSummary: {} benchmarks; {} expected results, {} unconfigured, {} aborted abnormally, " "{} unexpected results, {} timed out, {} ignored.\n" .format(benchmarks, expected, unconfigured, invalid_execution, error_execution, timed_out, ignored_benchmarks)) total_time_str = secs_to_human(total_time) out_data['total_time'] = total_time out_data['total_time_str'] = secs_to_human(total_time) print("Total elapsed execution time: {}".format(total_time_str)) for field in """expected unconfigured invalid_execution error_execution timed_out ignored_benchmarks""".split(): out_data[field] = locals()[field] out_data['total_time'] = total_time out_data['benchmark_count'] = benchmarks benchdir = code_root_dir.parent / 'benchdata' / suite os.makedirs(benchdir, exist_ok=True) with open(benchdir / (analyser.get_name() + '.yaml'), 'w') as fp: yaml.dump(out_data, fp)
a6e2833805b8b4034215709851ce3a16cfd1f13d
3,653,669
def Eip1(name, ospaces, index_key=None): """ Return the tensor representation of a Fermion ionization name (string): name of the tensor ospaces (list): list of occupied spaces """ terms = [] for os in ospaces: i = Idx(0, os) sums = [Sigma(i)] tensors = [Tensor([i], name)] operators = [FOperator(i, False)] e1 = Term(1, sums, tensors, operators, [], index_key=index_key) terms.append(e1) return Expression(terms)
3b118106e0c0839549edb5556215241bd3b5f8d4
3,653,670
import logging def load_rtma_data(rtma_data, bbox): """ Load relevant RTMA fields and return them :param rtma_data: a dictionary mapping variable names to local paths :param bbox: the bounding box of the data :return: a tuple containing t2, rh, lats, lons """ gf = GribFile(rtma_data['temp'])[1] lats, lons = gf.latlons() # bbox format: minlat, minlon, maxlat, maxlon i1, i2, j1, j2 = find_region_indices(lats, lons, bbox[0], bbox[2], bbox[1], bbox[3]) t2 = np.ma.array(gf.values())[i1:i2,j1:j2] # temperature at 2m in K td = np.ma.array(GribFile(rtma_data['td'])[1].values())[i1:i2,j1:j2] # dew point in K precipa = np.ma.array(GribFile(rtma_data['precipa'])[1].values())[i1:i2,j1:j2] # precipitation hgt = np.ma.array(GribFile('static/ds.terrainh.bin')[1].values())[i1:i2,j1:j2] logging.info('t2 min %s max %s' % (np.min(t2),np.max(t2))) logging.info('td min %s max %s' % (np.min(td),np.max(td))) logging.info('precipa min %s max %s' % (np.min(precipa),np.max(precipa))) logging.info('hgt min %s max %s' % (np.min(hgt),np.max(hgt))) # compute relative humidity rh = 100*np.exp(17.625*243.04*(td - t2) / (243.04 + t2 - 273.15) / (243.0 + td - 273.15)) return td, t2, rh, precipa, hgt, lats[i1:i2,j1:j2], lons[i1:i2,j1:j2]
1e97228b613dc42fb51c29ace44c306ea81052cb
3,653,671
import traceback import six def serialize_remote_exception(failure_info, log_failure=True): """Prepares exception data to be sent over rpc. Failure_info should be a sys.exc_info() tuple. """ tb = traceback.format_exception(*failure_info) failure = failure_info[1] if log_failure: LOG.error(_("Returning exception %s to caller"), six.text_type(failure)) LOG.error(tb) kwargs = {} if hasattr(failure, 'kwargs'): kwargs = failure.kwargs # NOTE(matiu): With cells, it's possible to re-raise remote, remote # exceptions. Lets turn it back into the original exception type. cls_name = str(failure.__class__.__name__) mod_name = str(failure.__class__.__module__) if (cls_name.endswith(_REMOTE_POSTFIX) and mod_name.endswith(_REMOTE_POSTFIX)): cls_name = cls_name[:-len(_REMOTE_POSTFIX)] mod_name = mod_name[:-len(_REMOTE_POSTFIX)] data = { 'class': cls_name, 'module': mod_name, 'message': six.text_type(failure), 'tb': tb, 'args': failure.args, 'kwargs': kwargs } json_data = jsonutils.dumps(data) return json_data
2ba794797362b7761a0dc6cbf58851a60a50cc0c
3,653,672
import itertools import shlex def combine_arg_list_opts(opt_args): """Helper for processing arguments like impalad_args. The input is a list of strings, each of which is the string passed into one instance of the argument, e.g. for --impalad_args="-foo -bar" --impalad_args="-baz", the input to this function is ["-foo -bar", "-baz"]. This function combines the argument lists by tokenised each string into separate arguments, if needed, e.g. to produce the output ["-foo", "-bar", "-baz"]""" return list(itertools.chain(*[shlex.split(arg) for arg in opt_args]))
77cfc6fa54201083c2cb058b8a9493b7d020273e
3,653,673
def in_data(): """Na funçao `in_data` é tratado os dados da matriz lida do arquivo txt.""" points = {} i, j = map(int, file.readline().split(' ')) for l in range(i): line = file.readline().split(' ') if len(line)==j: for colun in range(len(line)): if line[colun].find("\n")!= -1: line[colun] = line[colun][-2] if line[colun] not in '0' : points[line[colun]] = (l, colun) else: raise ValueError('Incosistence number of coluns in line. ') return points
423b96cda6802fdfb23a36aa486b7e067999a60d
3,653,674
def doom_action_space_extended(): """ This function assumes the following list of available buttons: TURN_LEFT TURN_RIGHT MOVE_FORWARD MOVE_BACKWARD MOVE_LEFT MOVE_RIGHT ATTACK """ space = gym.spaces.Tuple(( Discrete(3), # noop, turn left, turn right Discrete(3), # noop, forward, backward Discrete(3), # noop, strafe left, strafe right Discrete(2), # noop, attack )) return space
27ceab538f9a7102724a81ae1f692340c3b5e2e6
3,653,675
def svn_auth_provider_invoke_first_credentials(*args): """ svn_auth_provider_invoke_first_credentials(svn_auth_provider_t _obj, void provider_baton, apr_hash_t parameters, char realmstring, apr_pool_t pool) -> svn_error_t """ return _core.svn_auth_provider_invoke_first_credentials(*args)
951d2554df8efa4e392668c743f2b3f51cab2f48
3,653,676
def kill_process(device, process="tcpdump", pid=None, sync=True, port=None): """Kill any active process :param device: lan or wan :type device: Object :param process: process to kill, defaults to tcpdump :type process: String, Optional :param pid: process id to kill, defaults to None :type pid: String, Optional :param sync: Marked False if sync should not be executed;defaults to True :type sync: Boolean,optional :param port: port number to kill :type port: int :return: Console output of sync sendline command after kill process :rtype: string """ if pid: device.sudo_sendline("kill %s" % pid) elif port: device.sudo_sendline(r"kill $(lsof -t -i:%s)" % str(port)) else: device.sudo_sendline("killall %s" % process) device.expect(device.prompt) if sync: device.sudo_sendline("sync") retry_on_exception(device.expect, (device.prompt,), retries=5, tout=60) return device.before
be3947e624d1d2e8ca4015480a07bde67475c721
3,653,677
def get_sentence(soup, ets_series, cache, get_verb=False): """ Given an ETS example `ets_series`, find the corresponding fragment, and retrieve the sentence corresponding to the ETS example. """ frg = load_fragment(soup, ets_series.text_segment_id, cache) sentence = frg.find('s', {'n': ets_series.sentence_number}) if get_verb: tokenized, raw_tokens = tokenize_vuamc(sentence, raw=True) # Offset starts from 1 verb = raw_tokens[ets_series['word_offset'] - 1].lower() return tokenized, raw_tokens, verb tokenized, raw_tokens = tokenize_vuamc(sentence, raw=True) return tokenized, raw_tokens
1a39307d973a5fb93fea7b100f03d0797af1f1ef
3,653,678
def PreAuiNotebook(*args, **kwargs): """PreAuiNotebook() -> AuiNotebook""" val = _aui.new_PreAuiNotebook(*args, **kwargs) val._setOORInfo(val) return val
29400857cdca1fa42058d4200111bd7eeae8410b
3,653,679
def get_nsx_security_group_id(session, cluster, neutron_id): """Return the NSX sec profile uuid for a given neutron sec group. First, look up the Neutron database. If not found, execute a query on NSX platform as the mapping might be missing. NOTE: Security groups are called 'security profiles' on the NSX backend. """ nsx_id = nsx_db.get_nsx_security_group_id(session, neutron_id) if not nsx_id: # Find security profile on backend. # This is a rather expensive query, but it won't be executed # more than once for each security group in Neutron's lifetime nsx_sec_profiles = secgrouplib.query_security_profiles( cluster, '*', filters={'tag': neutron_id, 'tag_scope': 'q_sec_group_id'}) # Only one result expected # NOTE(salv-orlando): Not handling the case where more than one # security profile is found with the same neutron port tag if not nsx_sec_profiles: LOG.warn(_("Unable to find NSX security profile for Neutron " "security group %s"), neutron_id) return elif len(nsx_sec_profiles) > 1: LOG.warn(_("Multiple NSX security profiles found for Neutron " "security group %s"), neutron_id) nsx_sec_profile = nsx_sec_profiles[0] nsx_id = nsx_sec_profile['uuid'] with session.begin(subtransactions=True): # Create DB mapping nsx_db.add_neutron_nsx_security_group_mapping( session, neutron_id, nsx_id) return nsx_id
0b02a7f90d2e9e9d5917612280ed00ebfcab7f93
3,653,680
def customiseGlobalTagForOnlineBeamSpot(process): """Customisation of GlobalTag for Online BeamSpot - edits the GlobalTag ESSource to load the tags used to produce the HLT beamspot - these tags are not available in the Offline GT, which is the GT presently used in HLT+RECO tests - not loading these tags (i.e. not using this customisation) does not result in a runtime error, but it leads to an HLT beamspot different to the one obtained when running HLT alone """ if hasattr(process, 'GlobalTag'): if not hasattr(process.GlobalTag, 'toGet'): process.GlobalTag.toGet = cms.VPSet() process.GlobalTag.toGet += [ cms.PSet( record = cms.string('BeamSpotOnlineLegacyObjectsRcd'), tag = cms.string('BeamSpotOnlineLegacy') ), cms.PSet( record = cms.string('BeamSpotOnlineHLTObjectsRcd'), tag = cms.string('BeamSpotOnlineHLT') ) ] return process
8d0a8a0fa8e48e597dc4be910c6d9281e5ab4ae2
3,653,681
def path_to_filename(username, path_to_file): """ Converts a path formated as path/to/file.txt to a filename, ie. path_to_file.txt """ filename = '{}_{}'.format(username, path_to_file) filename = filename.replace('/','_') print(filename) return filename
a29e98db8ac4cd7f39e0f0e7fc1f76e72f5fa398
3,653,682
from typing import List def _convert_artist_format(artists: List[str]) -> str: """Returns converted artist format""" formatted = "" for x in artists: formatted += x + ", " return formatted[:-2]
66f8afb0eb09e9a66eaa728c28576bb0e5a496d3
3,653,683
def slerp(val, low, high): """ Spherical interpolation. val has a range of 0 to 1. From Tom White 2016 :param val: interpolation mixture value :param low: first latent vector :param high: second latent vector :return: """ if val <= 0: return low elif val >= 1: return high elif np.allclose(low, high): return low omega = np.arccos(np.dot(low/np.linalg.norm(low), high/np.linalg.norm(high))) so = np.sin(omega) return np.sin((1.0-val)*omega) / so * low + np.sin(val*omega)/so * high
499b192a90475fc3b4a888270159e98cbfa449fd
3,653,684
import json async def verify_input_body_is_json( request: web.Request, handler: Handler ) -> web.StreamResponse: """ Middleware to verify that input body is of json format """ if request.can_read_body: try: await request.json() except json.decoder.JSONDecodeError: raise web.HTTPBadRequest(reason="Malformed JSON.") return await handler(request)
7c424b941d3a86e95029f60759b0f47c3d1c44d3
3,653,685
def svn_repos_get_logs4(*args): """ svn_repos_get_logs4(svn_repos_t repos, apr_array_header_t paths, svn_revnum_t start, svn_revnum_t end, int limit, svn_boolean_t discover_changed_paths, svn_boolean_t strict_node_history, svn_boolean_t include_merged_revisions, apr_array_header_t revprops, svn_repos_authz_func_t authz_read_func, svn_log_entry_receiver_t receiver, apr_pool_t pool) -> svn_error_t """ return _repos.svn_repos_get_logs4(*args)
6363e6846e7a1788eef769b529e641c14b4f0525
3,653,686
def linreg_predict(model, X, v=False): """ Prediction with linear regression yhat[i] = E[y|X[i, :]], model] v[i] = Var[y|X[i, :], model] """ if 'preproc' in model: X = preprocessor_apply_to_test(model['preproc'], X) yhat = X.dot(model['w']) return yhat
5b326bf06b8061e86c5b4ebc5cf2d5e43cadcd1c
3,653,687
def parse_hostportstr(hostportstr): """ Parse hostportstr like 'xxx.xxx.xxx.xxx:xxx' """ host = hostportstr.split(':')[0] port = int(hostportstr.split(':')[1]) return host, port
7d67b548728d8cc159a7baa3e5f419bf7cbbc4d3
3,653,688
def sigmoid_grad_input(x_input, grad_output): """sigmoid nonlinearity gradient. Calculate the partial derivative of the loss with respect to the input of the layer # Arguments x_input: np.array of size `(n_objects, n_in)` grad_output: np.array of size `(n_objects, n_in)` dL / df # Output the partial derivative of the loss with respect to the input of the function np.array of size `(n_objects, n_in)` dL / dh """ ################# ### YOUR CODE ### ################# output = [] for x in x_input: one = (1/(1+np.exp(-x))) two = (np.exp(-x)/(1+np.exp(-x))) output.append(one*two) output = np.asarray(output*grad_output) return output
f397cdb3c9608fa09c5053e27e57525e2a8e3ba5
3,653,689
def f_is3byte(*args): """f_is3byte(flags_t F, void ?) -> bool""" return _idaapi.f_is3byte(*args)
9fb9f351a4d595c7ecde83492d92911cd646bc0a
3,653,690
def make_desired_disp(vertices, DeformType = DispType.random, num_of_vertices = -1): """ DispType.random: Makes a random displacement field. The first 3 degrees of freedom are assumed to be zero in order to fix rotation and translation of the lattice. DispType.isotropic: Every point moves towards the origin with an amount propotional to the distance from the origin """ if(num_of_vertices < 1): get_num_of_verts(vertices) if(DeformType == DispType.random): return normalizeVec(npr.rand(2*num_of_vertices)) elif(DeformType == DispType.isotropic): return normalizeVec(vertices.flatten()) elif(DeformType == DispType.explicit_1): return np.vstack ((np.array([[0.0, 0.0], [0, -2], [-1, -1]]), npr.rand(num_of_vertices - 3, 2))).flatten() elif(DeformType == DispType.explicit_2): return np.vstack ((np.array([[0.0, 0.0], [0, 0], [-0.5 + 1.5*np.sin(np.pi/6), 0.3 - 1.5*np.cos(np.pi/6)]]), npr.rand(num_of_vertices - 3, 2))).flatten()
90697baa3879f22cb400c1da0923fc611d43a72c
3,653,691
import os import subprocess def unix_sort_ranks(corpus: set, tmp_folder_path: str): """ Function that takes a corpus sorts it with the unix sort -n command and generates the global ranks for each value in the corpus. Parameters ---------- corpus: set The corpus (all the unique values from every column) tmp_folder_path: str The path of the temporary folder that will serve as a cache for the run Returns ------- dict The ranks in the form of k: value, v: the rank of the value """ unsorted_file_path = os.path.join(tmp_folder_path, 'unsorted_file.txt') sorted_file_path = os.path.join(tmp_folder_path, 'sorted_file.txt') with open(unsorted_file_path, 'w') as out: for var in corpus: print(str(var), file=out) with open(sorted_file_path, 'w') as f: if os.name == 'nt': subprocess.call(['sort', unsorted_file_path], stdout=f) else: sort_env = os.environ.copy() sort_env['LC_ALL'] = 'C' subprocess.call(['sort', '-n', unsorted_file_path], stdout=f, env=sort_env) rank = 1 ranks = [] with open(sorted_file_path, 'r') as f: txt = f.read() for var in txt.splitlines(): ranks.append((convert_data_type(var.replace('\n', '')), rank)) rank = rank + 1 return dict(ranks)
03602684aca066fae93caa95b368641561e16c2e
3,653,692
def do_filter(): """Vapoursynth filtering""" opstart_ep10 = 768 ncop = JPBD_NCOP.src_cut ep10 = JPBD_10.src_cut ncop = lvf.rfs(ncop, ep10[opstart_ep10:], [(0, 79), (1035, 1037)]) return ncop
30d605e2267875eaaa4506bc27b0df380a0e48d1
3,653,693
def test_returns_less_than_expected_errors(configured_test_manager): """A function that doesn't return the same number of objects as specified in the stage outputs should throw an OutputSignatureError.""" @stage([], ["test1", "test2"]) def output_stage(record): return "hello world" record = Record(configured_test_manager, None) with pytest.raises(OutputSignatureError): output_stage(record)
6f88de961911f6bc862619e67f2d72a520f2ca90
3,653,694
import pickle def xgb(validate = True): """ Load XGB language detection model. Parameters ---------- validate: bool, optional (default=True) if True, malaya will check model availability and download if not available. Returns ------- LANGUAGE_DETECTION : malaya._models._sklearn_model.LANGUAGE_DETECTION class """ if validate: check_file(PATH_LANG_DETECTION['xgb'], S3_PATH_LANG_DETECTION['xgb']) else: if not check_available(PATH_LANG_DETECTION['xgb']): raise Exception( 'language-detection/xgb is not available, please `validate = True`' ) try: with open(PATH_LANG_DETECTION['xgb']['vector'], 'rb') as fopen: vector = pickle.load(fopen) with open(PATH_LANG_DETECTION['xgb']['model'], 'rb') as fopen: model = pickle.load(fopen) except: raise Exception( "model corrupted due to some reasons, please run malaya.clear_cache('language-detection/xgb') and try again" ) return LANGUAGE_DETECTION(model, lang_labels, vector, mode = 'xgb')
87de42a5957facbc057ecf024334b307df09b19f
3,653,695
import csv import math def get_current_data(csv_file): """ Gathers and returns list of lists of current information based in hourly data from NOAA's National Data Buoy Center archived data. Returned list format is [current depths, current speeds, current directions]. Input parameter is any CSV or text file with the same formatting at the NDBC website. """ current_speed = [] current_dir = [] with open(csv_file) as data_file: reader = csv.reader(data_file, delimiter=' ') next(reader) # skips header line of CSV file next(reader) for row in reader: while '' in row: row.remove('') current_depth = float(row[5]) try: current_current_speed = float(row[7]) except ValueError: current_current_speed = np.nan current_current_dir = 360 - int(row[6]) if math.isclose(current_current_speed, 99.): current_current_speed = np.nan if math.isclose(current_current_dir, -639) or current_current_dir == 'MM': current_current_dir = np.nan current_speed.append(float(current_current_speed)) current_dir.append(float(current_current_dir)) current_data = {'Current Speed': current_speed, 'Current Direction': current_dir} current_data = pd.DataFrame(data=current_data) return current_data, current_depth
f217d66a40466f8bcc590f0cc61fc8c3687b63da
3,653,696
from typing import Optional from typing import Union from typing import Sequence from typing import List def _get_batched_jittered_initial_points( model: Model, chains: int, initvals: Optional[Union[StartDict, Sequence[Optional[StartDict]]]], random_seed: int, jitter: bool = True, jitter_max_retries: int = 10, ) -> Union[np.ndarray, List[np.ndarray]]: """Get jittered initial point in format expected by NumPyro MCMC kernel Returns ------- out: list of ndarrays list with one item per variable and number of chains as batch dimension. Each item has shape `(chains, *var.shape)` """ random_seed = np.random.default_rng(random_seed).integers(2**30, size=chains) assert len(random_seed) == chains initial_points = _init_jitter( model, initvals, seeds=random_seed, jitter=jitter, jitter_max_retries=jitter_max_retries, ) initial_points = [list(initial_point.values()) for initial_point in initial_points] if chains == 1: initial_points = initial_points[0] else: initial_points = [np.stack(init_state) for init_state in zip(*initial_points)] return initial_points
2ba3573f26922cec0cd4a76646bc1d5ad96051b4
3,653,697
import warnings import copy def load_schema(url, resolver=None, resolve_references=False, resolve_local_refs=False): """ Load a schema from the given URL. Parameters ---------- url : str The path to the schema resolver : callable, optional A callback function used to map URIs to other URIs. The callable must take a string and return a string or `None`. This is useful, for example, when a remote resource has a mirror on the local filesystem that you wish to use. resolve_references : bool, optional If `True`, resolve all `$ref` references. resolve_local_refs : bool, optional If `True`, resolve all `$ref` references that refer to other objects within the same schema. This will automatically be handled when passing `resolve_references=True`, but it may be desirable in some cases to control local reference resolution separately. This parameter is deprecated. """ if resolve_local_refs is True: warnings.warn( "The 'resolve_local_refs' parameter is deprecated.", AsdfDeprecationWarning ) if resolver is None: # We can't just set this as the default in load_schema's definition # because invoking get_default_resolver at import time leads to a circular import. resolver = extension.get_default_resolver() # We want to cache the work that went into constructing the schema, but returning # the same object is treacherous, because users who mutate the result will not # expect that they're changing the schema everywhere. return copy.deepcopy( _load_schema_cached(url, resolver, resolve_references, resolve_local_refs) )
b937d56eb7b23a530758327fbf463adb63be4cf4
3,653,698
def fastaDecodeHeader(fastaHeader): """Decodes the fasta header """ return fastaHeader.split("|")
06f0af70765670dafa0b558867e2d9094c3d928b
3,653,699