content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
from xmodule.modulestore.store_utilities import DETACHED_XBLOCK_TYPES def serialize_item(item): """ Args: item: an XBlock Returns: fields: a dictionary of an XBlock's field names and values block_type: the name of the XBlock's type (i.e. 'course' or 'problem') """ # convert all fields to a dict and filter out parent and children field fields = { field: field_value.read_from(item) for (field, field_value) in item.fields.items() if field not in ['parent', 'children'] } course_key = item.scope_ids.usage_id.course_key block_type = item.scope_ids.block_type # set or reset some defaults fields['edited_on'] = str(getattr(item, 'edited_on', '')) fields['display_name'] = item.display_name_with_default fields['org'] = course_key.org fields['course'] = course_key.course fields['run'] = course_key.run fields['course_key'] = str(course_key) fields['location'] = str(item.location) fields['block_type'] = block_type fields['detached'] = block_type in DETACHED_XBLOCK_TYPES if block_type == 'course': # prune the checklists field if 'checklists' in fields: del fields['checklists'] # record the time this command was run fields['time_last_dumped_to_neo4j'] = str(timezone.now()) return fields, block_type
426e5e83644ca2f1a81491e7e0a65a67cca26f15
1,952
def gen_outfile_name(args): """Generate a name for the output file based on the input args. Parameters ---------- args : argparse argparse object to print """ return args.outfile + gen_identifier(args)
6a91c26de3ae3ec39a2095434ccc18feb9fed699
1,953
def check_vg_tags(game_id): """Returns a user's tags.""" if game_id: user_id = session.get('user_id') user_query = VgTag.query.join(Tag).filter(Tag.user_id == user_id) # Only display user's tags for a specific game. vg_tags = user_query.filter(VgTag.game_id == game_id).all() return vg_tags else: return None
1eed3e9a58a21a79ae5502a67bde0c409af71785
1,954
def load_fits(path): """ load the fits file Parameters ---------- path: string, location of the fits file Output ------ data: numpy array, of stokes images in (row, col, wv, pol) header: hdul header object, header of the fits file """ hdul_tmp = fits.open(f'{path}') data = np.asarray(hdul_tmp[0].data, dtype = np.float32) header = hdul_tmp[0].header return data, header
f0040e9ef3c8b2e7e4136f0ef7a7a2f9370a3653
1,955
def get_image_path(cfg, metadata, prefix='diag', suffix='image', metadata_id_list='default',): """ Produce a path to the final location of the image. The cfg is the opened global config, metadata is the metadata dictionairy (for the individual dataset file) """ ##### if metadata_id_list == 'default': metadata_id_list = ['project', 'dataset', 'mip', 'exp', 'ensemble', 'field', 'short_name', 'preprocessor', 'diagnostic', 'start_year', 'end_year', ] path = folder(cfg['plot_dir']) if prefix: path += prefix + '_' # Check that the keys are in the dict. intersection = [va for va in metadata_id_list if va in metadata.keys()] path += '_'.join([str(metadata[b]) for b in intersection]) if suffix: path += '_' + suffix image_extention = get_image_format(cfg) if path.find(image_extention) == -1: path += image_extention logger.info("Image path will be: %s", path) return path
0c725311db7b3290923f6206cb2bb4d382644e12
1,956
def ProjectNameToBinding(project_name, tag_value, location=None): """Returns the binding name given a project name and tag value. Requires binding list permission. Args: project_name: project name provided, fully qualified resource name tag_value: tag value to match the binding name to location: region or zone Returns: binding_name Raises: InvalidInputError: project not found """ service = ServiceFns['tagBindings']() with endpoints.CrmEndpointOverrides(location): req = ListResourceFns['tagBindings'](parent=project_name) response = service.List(req) for bn in response.tagBindings: if bn.tagValue == tag_value: return bn.name raise InvalidInputError( 'Binding not found for parent [{}], tagValue [{}]'.format( project_name, tag_value))
00966f8b74378b905fe5b3c4e5a6716a5d4f71bf
1,957
def degrees_of_freedom(s1, s2, n1, n2): """ Compute the number of degrees of freedom using the Satterhwaite Formula @param s1 The unbiased sample variance of the first sample @param s2 The unbiased sample variance of the second sample @param n1 Thu number of observations in the first sample @param n2 The number of observations in the second sample """ numerator = (s1**2/n1 + s2**2/n2)**2 denominator = ((s1**2/n1)**2)/(n1-1) + ((s2**2/n2)**2)/(n2-1) degrees_of_freedom = numerator/denominator return degrees_of_freedom
5f076e33584c61dca4410b7ed47feb0043ec97cb
1,958
def get_range_to_list(range_str): """ Takes a range string (e.g. 123-125) and return the list """ start = int(range_str.split('-')[0]) end = int(range_str.split('-')[1]) if start > end: print("Your range string is wrong, the start is larger than the end!", range_str) return range(start, end+1)
a88d9780ac2eba1d85ae70c1861f6a3c74991e5c
1,960
import base64 def get_saml_assertion(server, session, access_token, id_token=None): """ Exchange access token to saml token to connect to VC Sample can be found at https://github.com/vmware/vsphere-automation-sdk-python/blob/master/samples/vsphere/oauth/exchange_access_id_token_for_saml.py """ stub_config = StubConfigurationFactory.new_std_configuration( get_requests_connector( session=session, url=HTTP_ENDPOINT.format(server) ) ) oauth_security_context = create_oauth_security_context(access_token) stub_config.connector.set_security_context(oauth_security_context) token_exchange = TokenExchange(stub_config) exchange_spec = token_exchange.ExchangeSpec( grant_type=token_exchange.TOKEN_EXCHANGE_GRANT, subject_token_type=token_exchange.ACCESS_TOKEN_TYPE, actor_token_type=token_exchange.ID_TOKEN_TYPE, requested_token_type=token_exchange.SAML2_TOKEN_TYPE, actor_token=id_token, subject_token=access_token) response = token_exchange.exchange(exchange_spec) saml_token = response.access_token # convert saml token to saml assertion samlAssertion = etree.tostring( etree.XML(base64.decodebytes( bytes(saml_token, 'utf-8') )) ).decode('utf-8') return samlAssertion
174400720340fb831d6a62728b48555db7349b95
1,961
import html def display_value(id, value): """ Display a value in a selector-like style. Parameters ---------- id: int Id of the value to be displayed """ return html.div( { "class": "py-3 pl-3 w-full border-[1px] sm:w-[48%] md:w-[121px] bg-nav rounded-[3px] md:mr-2 my-4 before:content-[''] before:border-[6px] before:border-[transparent] before:top-1/2 before:right-5 before:-translate-y-0.5 before:absolute xl:w-[14%]", }, html.h3( {"value": id}, value, ), )
aeb3ceeeb8a2048beb8df7f5d3e6027d90df4739
1,963
def helmholtz_adjoint_double_layer_regular( test_point, trial_points, test_normal, trial_normals, kernel_parameters ): """Helmholtz adjoint double layer for regular kernels.""" wavenumber_real = kernel_parameters[0] wavenumber_imag = kernel_parameters[1] npoints = trial_points.shape[1] dtype = trial_points.dtype factor_real = _np.empty(npoints, dtype=dtype) factor_imag = _np.empty(npoints, dtype=dtype) output_real = _np.empty(npoints, dtype=dtype) output_imag = _np.empty(npoints, dtype=dtype) diff = _np.empty((3, npoints), dtype=dtype) dist = _np.zeros(npoints, dtype=dtype) laplace_grad = _np.zeros(npoints, dtype=dtype) m_inv_4pi = dtype.type(M_INV_4PI) for i in range(3): for j in range(npoints): diff[i, j] = test_point[i] - trial_points[i, j] dist[j] += diff[i, j] * diff[i, j] for j in range(npoints): dist[j] = _np.sqrt(dist[j]) for i in range(3): for j in range(npoints): laplace_grad[j] += diff[i, j] * test_normal[i] for j in range(npoints): laplace_grad[j] *= m_inv_4pi / (dist[j] * dist[j] * dist[j]) factor_real[j] = _np.cos(wavenumber_real * dist[j]) * laplace_grad[j] factor_imag[j] = _np.sin(wavenumber_real * dist[j]) * laplace_grad[j] if wavenumber_imag != 0: for j in range(npoints): factor_real[j] *= _np.exp(-wavenumber_imag * dist[j]) factor_imag[j] *= _np.exp(-wavenumber_imag * dist[j]) for j in range(npoints): output_real[j] = (-1 - wavenumber_imag * dist[j]) * factor_real[ j ] - wavenumber_real * dist[j] * factor_imag[j] output_imag[j] = wavenumber_real * dist[j] * factor_real[j] + factor_imag[j] * ( -1 - wavenumber_imag * dist[j] ) return output_real + 1j * output_imag
6b640e2b7b02e124d893452b8437bfdf6f4af1ec
1,964
def crt(s): """ Solve the system given by x == v (mod k), where (k, v) goes over all key-value pairs of the dictionary s. """ x, n = 0, 1 for q, r in s.items(): x += n * ((r-x) * inverse(n, q) % q) n *= q return x
6bcd489f9096cb780c935dd30ea90663d91f854f
1,966
def create_new_tf_session(**kwargs): """Get default session or create one with a given config""" sess = tf.get_default_session() if sess is None: sess = make_session(**kwargs) sess.__enter__() assert tf.get_default_session() return sess
1520f330fe7939c997588cf3d8c63265610baa23
1,967
import typing import re def MaybeGetHexShaOfLastExportedCommit( repo: git.Repo, head_ref: str = "HEAD") -> typing.List[str]: """The the SHA1 of the most recently exported commit. Args: repo: The repo to iterate over. head_ref: The starting point for iteration, e.g. the commit closest to head. Returns: The hex SHA1 of the last exported commited, else None. """ export_re = re.compile(r'\n\[Exported from ([a-fA-F0-9]{40})\]') try: for commit in repo.iter_commits(head_ref): if '\n[Exported from ' in commit.message: match = export_re.search(commit.message) assert match return match.group(1) except git.GitCommandError: # Raise if no HEAD, i.e. no commits. pass return None
1d6afe688567ffe245e9aabe753c90e6baf22bfe
1,968
def get_inchi(ID): """This function accept UNIQUE-ID and return InChI string of a certain compound""" inchi = df_cpd['INCHI'][ID] return inchi
2420a73c2a5e21348c6efde7cd6bcde0cc0c0c00
1,969
from typing import Optional def pad_to_multiple(array: Array, factor: int, axis: int, mode: Optional[str] = 'constant', constant_values=0) -> Array: """Pads `array` on a given `axis` to be a multiple of `factor`. Padding will be concatenated to the end of the axis only, not the beginning. If the length along `axis` is already a multiple of `factor`, this is effectively a no-op. Args: array: Array with rank >= 1 to pad. factor: Positive integer factor to pad for. axis: A valid axis in `array` to pad. mode: The padding mode to use according to `jnp.pad`. Defaults to 'constant'. See `jax.numpy.pad` documentation for more. constant_values: For 'constant' mode, the pad value to use within `jnp.pad`. Defaults to 0. Returns: The padded Array result. """ array = jnp.asarray(array) if factor < 1: raise ValueError(f'`factor` must be positive but got {factor}.') rank = array.ndim if axis < -rank or axis >= rank: raise ValueError( f'`axis` ({axis}) out of bounds for `array` rank ({rank}).') axis_len = array.shape[axis] pad_len = -axis_len % factor pad_width = [(0, 0)] * rank pad_width[axis] = (0, pad_len) kwargs = {} if mode == 'constant': kwargs['constant_values'] = constant_values return jnp.pad(array=array, pad_width=pad_width, mode=mode, **kwargs)
5164e124dc270a47ef8f8b1512cdefe796904791
1,971
import math def define_request( dataset, query=None, crs="epsg:4326", bounds=None, bounds_crs="EPSG:3005", sortby=None, pagesize=10000, ): """Define the getfeature request parameters required to download a dataset References: - http://www.opengeospatial.org/standards/wfs - http://docs.geoserver.org/stable/en/user/services/wfs/vendor.html - http://docs.geoserver.org/latest/en/user/tutorials/cql/cql_tutorial.html """ # validate the table name and find out how many features it holds table = validate_name(dataset) n = bcdata.get_count(table, query=query) wfs = WebFeatureService(url=bcdata.OWS_URL, version="2.0.0") geom_column = wfs.get_schema("pub:" + table)["geometry_column"] # DataBC WFS getcapabilities says that it supports paging, # and the spec says that responses should include 'next URI' # (section 7.7.4.4.1).... # But I do not see any next uri in the responses. Instead of following # the paged urls, for datasets with >10k records, just generate urls # based on number of features in the dataset. chunks = math.ceil(n / pagesize) # if making several requests, we need to sort by something if chunks > 1 and not sortby: sortby = get_sortkey(table) # build the request parameters for each chunk param_dicts = [] for i in range(chunks): request = { "service": "WFS", "version": "2.0.0", "request": "GetFeature", "typeName": table, "outputFormat": "json", "SRSNAME": crs, } if sortby: request["sortby"] = sortby # build the CQL based on query and bounds # (the bbox param shortcut is mutually exclusive with CQL_FILTER) if query and not bounds: request["CQL_FILTER"] = query if bounds: b0, b1, b2, b3 = [str(b) for b in bounds] bnd_query = f"bbox({geom_column}, {b0}, {b1}, {b2}, {b3}, '{bounds_crs}')" if not query: request["CQL_FILTER"] = bnd_query else: request["CQL_FILTER"] = query + " AND " + bnd_query if chunks > 1: request["startIndex"] = i * pagesize request["count"] = pagesize param_dicts.append(request) return param_dicts
215b39a606bfa7fc6736e8b2f61bf9c298412b36
1,973
from typing import List from typing import Tuple import torch def get_bert_input( examples: List[tuple], ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """Convert input list to torch tensor. Args: examples: (input_id_list, ) Returns: attention_mask, input_ids_tensor, token_type_ids_tensor """ input_ids = examples[0] token_type_ids = examples[1] max_seq_len = min(max(len(input_id) for input_id in input_ids), MAX_SEQ_LEN) input_ids_tensor = torch.zeros((len(input_ids), max_seq_len), dtype=torch.long) token_type_ids_tensor = torch.zeros_like(input_ids_tensor) attention_mask = torch.ones_like(input_ids_tensor) for i, input_id in enumerate(input_ids): cur_seq_len = len(input_id) if cur_seq_len <= max_seq_len: input_ids_tensor[i, :cur_seq_len] = torch.tensor(input_id, dtype=torch.long) token_type_ids_tensor[i, :cur_seq_len] = torch.tensor( token_type_ids[i], dtype=torch.long ) attention_mask[i, cur_seq_len:] = 0 else: input_ids_tensor[i] = torch.tensor( input_id[: max_seq_len - 1] + [102], dtype=torch.long ) token_type_ids_tensor[i] = torch.tensor( token_type_ids[i][:max_seq_len], dtype=torch.long ) return attention_mask, input_ids_tensor, token_type_ids_tensor
954d0990d5cd5f28d588c472f7d7d48ecc4b3eb2
1,974
import io import traceback def _format_exception(e: BaseException): """ Shamelessly stolen from stdlib's logging module. """ with io.StringIO() as sio: traceback.print_exception(e.__class__, e, e.__traceback__, None, sio) return sio.getvalue().strip()
d80f60634a9862ca282b1c7ccf63ae8e945ffdc9
1,975
import json def batch_deploy(blueprint_id, parent_deployments, group_id=None, new_deployment_ids=None, inputs=None, labels=None, **_): """ Create deployments for a batch from a single blueprint. :param blueprint_id: The blueprint, which has already been uploaded. :type blueprint_id: str :param parent_deployments: A list of parent deployments. :type parent_deployments: list :param group_id: the new group ID. :type group_id: str :param new_deployment_ids: a list of new deployment names. :type new_deployment_ids: list :param inputs: A list of inputs to the new deployments. :type inputs: list :param labels: A list of labels to the new deployments. :type labels: list :return: group_id :rtype: str """ if not isinstance(parent_deployments, list): # If someone sends a list in the CLI, # it will not be properly formatted. try: parent_deployments = json.loads(parent_deployments) except json.JSONDecodeError: raise NonRecoverableError( 'The parent_deployments parameter is not properly formatted. ' 'Proper format is a list, a {t} was provided: {v}.'.format( t=type(parent_deployments), v=parent_deployments)) group_id = group_id or generate_group_id_from_blueprint( blueprint_id) new_deployment_ids = new_deployment_ids or \ generate_deployment_ids_from_group_id(group_id, parent_deployments) inputs = generate_inputs_from_deployments(inputs, parent_deployments) labels = labels or generate_labels_from_inputs(inputs) create_deployments( group_id, blueprint_id, new_deployment_ids, inputs, labels) return group_id
8128e39c94bfc15a5b75d3a88274720b52d8d900
1,976
import json def compute_task_def(build, settings, fake_build): """Returns a swarming task definition for the |build|. Args: build (model.Build): the build to generate the task definition for. build.proto.infra and build.proto.input.properties must be initialized. settings (service_config_pb2.SettingsCfg): global settings. fake_build (bool): False if the build is not going to be actually created in buildbucket. This is used by led that only needs the definition of the task that *would be* used for a new build like this. Returns a task_def dict. Corresponds to JSON representation of https://cs.chromium.org/chromium/infra/luci/appengine/swarming/swarming_rpcs.py?q=NewTaskRequest&sq=package:chromium&g=0&l=438 """ assert isinstance(build, model.Build), type(build) assert isinstance(fake_build, bool), type(fake_build) assert build.proto.HasField('infra') assert build.proto.input.HasField('properties') assert isinstance(settings, service_config_pb2.SettingsCfg) sw = build.proto.infra.swarming task = { 'name': 'bb-%d-%s' % (build.proto.id, build.builder_id), 'tags': _compute_tags(build, settings), 'priority': str(sw.priority), 'task_slices': _compute_task_slices(build, settings), } if build.proto.number: # pragma: no branch task['name'] += '-%d' % build.proto.number if sw.task_service_account: # pragma: no branch # Don't pass it if not defined, for backward compatibility. task['service_account'] = sw.task_service_account if not fake_build: # pragma: no branch | covered by swarmbucketapi_test.py task['pubsub_topic'] = 'projects/%s/topics/swarming' % ( app_identity.get_application_id() ) task['pubsub_userdata'] = json.dumps( { 'build_id': build.proto.id, 'created_ts': utils.datetime_to_timestamp(utils.utcnow()), 'swarming_hostname': sw.hostname, }, sort_keys=True, ) return task
7071960148ed391b42a4b7ad1e4ed4e6d0c10713
1,977
def draw_bs_pairs(x, y, func, size=1): """Perform pairs bootstrap for replicates.""" # Set up array of indices to sample from: inds inds = np.arange(len(x)) # Initialize replicates bs_replicates = np.empty(size) # Generate replicates for i in range(size): bs_inds = np.random.choice(inds, len(inds)) bs_x, bs_y = x[bs_inds], y[bs_inds] bs_replicates[i] = func(bs_x, bs_y) return bs_replicates
f0b05241f567570dd96ed97340d5075b8ccb5a7b
1,979
def has_hole(feature): """ Detects the number of holes in a shapely polygon or multipolygon. Parameters ---------- feature : shapely Polygon or Multipolygon polygon to be analyzed for holes Returns ------- int number of holes """ if feature.geom_type == 'Polygon': num_holes = len(feature.interiors) elif feature.geom_type == 'MultiPolygon': num_holes = np.sum([len(x.interiors) for x in feature]) return num_holes
e854d7a4902e66ec95479816662a145e184ee8af
1,980
def linder_table(file=None, **kwargs): """Load Linder Model Table Function to read in isochrone models from Linder et al. 2019. Returns an astropy Table. Parameters ---------- age : float Age in Myr. If set to None, then an array of ages from the file is used to generate dictionary. If set, chooses the closest age supplied in table. file : string Location and name of COND file. See isochrones stored at https://phoenix.ens-lyon.fr/Grids/. Default is model.AMES-Cond-2000.M-0.0.JWST.Vega """ # Default file to read and load if file is None: base_dir = conf.PYNRC_PATH + 'linder/isochrones/' file = base_dir + 'BEX_evol_mags_-3_MH_0.00.dat' with open(file) as f: content = f.readlines() content = [x.strip('\n') for x in content] cnames = content[2].split(',') cnames = [name.split(':')[1] for name in cnames] ncol = len(cnames) content_arr = [] for line in content[4:]: arr = np.array(line.split()).astype(np.float) if len(arr)>0: content_arr.append(arr) content_arr = np.array(content_arr) # Convert to Astropy Table tbl = Table(rows=content_arr, names=cnames) return tbl
ff6b187009c8bbcef8ae604095c289429863907e
1,981
def json_redirect(request, url, **kwargs): """ Returns a JSON response for redirecting to a new URL. This is very specific to this project and depends on the JavaScript supporting the result that is returned from this method. """ if not request.is_ajax(): raise PermissionDenied("Must be an AJAX request.") return JsonResponse({'url': url}, **kwargs)
7fbafcfc400c733badc26fcb97bc3a61f4c49f74
1,982
def unauthenticatedClient(): """Retorna um api client sem ninguém autenticado""" return APIClient()
b821a7c1e11a398eee691ca43be54d5aca00d213
1,983
import re def get_known_disk_attributes(model): """Get known NVMe/SMART attributes (model specific), returns str.""" known_attributes = KNOWN_DISK_ATTRIBUTES.copy() # Apply model-specific data for regex, data in KNOWN_DISK_MODELS.items(): if re.search(regex, model): for attr, thresholds in data.items(): if attr in known_attributes: known_attributes[attr].update(thresholds) else: known_attributes[attr] = thresholds # Done return known_attributes
39ece3213996b201d1109d7787bcd8fed859235b
1,985
def get_one_exemplar_per_class_proximity(proximity): """ unpack proximity object into X, y and random_state for picking exemplars. ---- Parameters ---- proximity : Proximity object Proximity like object containing the X, y and random_state variables required for picking exemplars. ---- Returns ---- result : function function choosing one exemplar per class """ return get_one_exemplar_per_class(proximity.X, proximity.y, proximity.random_state)
eeb46d07a757d6b06432369f26f5f2391d9b14cd
1,986
def annotation_layers(state): """Get all annotation layer names in the state Parameters ---------- state : dict Neuroglancer state as a JSON dict Returns ------- names : list List of layer names """ return [l["name"] for l in state["layers"] if l["type"] == "annotation"]
98dee6b821fbfe2dd449859400c2166ba694025f
1,987
def describe_bvals(bval_file) -> str: """Generate description of dMRI b-values.""" # Parse bval file with open(bval_file, "r") as file_object: raw_bvals = file_object.read().splitlines() # Flatten list of space-separated values bvals = [ item for sublist in [line.split(" ") for line in raw_bvals] for item in sublist ] bvals = sorted([int(v) for v in set(bvals)]) bvals = [num_to_str(v) for v in bvals] bval_str = list_to_str(bvals) bval_str = "b-values of {} acquired".format(bval_str) return bval_str
1d19c71d9422a37f425c833df52d9b1936195660
1,988
def weight_update4(weights, x_white, bias1, lrate1, b_exp): """ Update rule for infomax This function recieves parameters to update W1 * Input weights : unmixing matrix (must be a square matrix) x_white: whitened data bias1: current estimated bias lrate1: current learning rate b_exp : experiment * Output weights : updated mixing matrix bias: updated bias lrate1: updated learning rate """ NCOMP, NVOX = (x_white.shape) block1 = (int(np.floor(np.sqrt(NVOX / 3)))) last1 = (int(np.fix((NVOX/block1-1)*block1+1))) if not b_exp : permute1 = permutation(NVOX) else : permute1 = range(NVOX) for start in range(0, last1, block1): if start + block1 < NVOX: tt2 = (start + block1 ) else: tt2 = (NVOX) block1 = (NVOX - start) unmixed = (np.dot(weights, x_white[:, permute1[start:tt2]]) + bias1) logit = 1 / (1 + np.exp(-unmixed)) weights = (weights + lrate1 * np.dot( block1 * np.eye(NCOMP) + np.dot( (1-2*logit), unmixed.T), weights)) bias1 = (bias1 + lrate1 * (1-2*logit).sum(axis=1).reshape(bias1.shape)) # Checking if W blows up if (np.isnan(weights)).any() or np.max(np.abs(weights)) > MAX_WEIGHT: # ("Weight is outside the range. Restarting.") weights = (np.eye(NCOMP)) bias1 = (np.zeros((NCOMP, 1))) error = 1 if lrate1 > 1e-6 and \ matrix_rank(x_white) < NCOMP: a = 1 # ("Data 1 is rank defficient" # ". I cannot compute " + # str(NCOMP) + " components.") return (None, None, None, 1) if lrate1 < 1e-6: a = 1 # ("Weight matrix may" # " not be invertible...") return (None, None, None, 1) break else: error = 0 return (weights, bias1, lrate1, error)
6c2d5c6610724787b4e8c8fb42569265e4b13d76
1,989
def Dijkstra(graph, source): """ Dijkstra's algorithm for shortest path between two vertices on a graph. Arguments --------- graph -- directed graph; object of Graph class source -- start vertex >>> graph = Graph() >>> graph.addVertex("A") >>> conns = [ ("A", "B"), ("A", "C"), ("B", "C"), ("C", "D") ] >>> for va, vb in conns: ... graph.addConn(va, vb) >>> dists = Dijkstra(graph, 'A') >>> dists['D'] 2 """ dist = {} pq = pQ.BinaryHeap() for node in graph: if node != source: dist[node] = float('inf') else: dist[node] = 0 pq.insert((dist[node], node)) while not pq.isEmpty(): current = pq.delMin() for next_node in graph.getConns(current[1]): new_dist = current[0] + 1 if new_dist < dist[next_node]: dist[next_node] = new_dist pq.editHeap(next_node, (dist[next_node], next_node)) return dist
9585c13c5504cdbff62494c2d5d97655c2281c34
1,990
def annealing_epsilon(episode: int, min_e: float, max_e: float, target_episode: int) -> float: """Return an linearly annealed epsilon Epsilon will decrease over time until it reaches `target_episode` (epsilon) | max_e ---|\ | \ | \ | \ min_e ---|____\_______________(episode) | target_episode slope = (min_e - max_e) / (target_episode) intercept = max_e e = slope * episode + intercept Args: episode (int): Current episode min_e (float): Minimum epsilon max_e (float): Maximum epsilon target_episode (int): epsilon becomes the `min_e` at `target_episode` Returns: float: epsilon between `min_e` and `max_e` """ slope = (min_e - max_e) / (target_episode) intercept = max_e return max(min_e, slope * episode + intercept)
fab650085f271f1271025e23f260eb18e645a9ba
1,991
import jsonschema def ExtendWithDefault(validator_class): """Takes a validator and makes it set default values on properties. Args: validator_class: A class to add our overridden validators to Returns: A validator_class that will set default values and ignore required fields """ validate_properties = validator_class.VALIDATORS['properties'] def SetDefaultsInProperties(validator, user_schema, user_properties, parent_schema): SetDefaults(validator, user_schema or {}, user_properties, parent_schema, validate_properties) return jsonschema.validators.extend( validator_class, {PROPERTIES: SetDefaultsInProperties, REQUIRED: IgnoreKeyword})
42ab80b2c52e474a354589eb4c6041450cf23fd2
1,992
def coach_input_line(call, school, f): """ Returns a properly formatted line about a coach. :param call: (String) The beginning of the line, includes the gender, sport, and school abbreviation. :param school:(String) The longform name of the school. :param f: (String) The input line from the user. :return: (String) A properly formatted line with all necessary information about a coach. """ f = f.split("\t") newCall = f[2].split(" ") for item in newCall: call += item[0].lower() print(call) print(f[2]) return f"{call}\t{school}'s {coachformat(f[2])}, {f[0]} {f[1]},\t{f[0]} {f[1]},\t{f[1]}\n"
762127ac058949af890c2ef7f19b924642cc4c39
1,993
def pad_seq(seq, max_length, PAD=0): """ :param seq: list of int, :param max_length: int, :return seq: list of int, """ seq += [PAD for i in range(max_length - len(seq))] return seq
bb61677bc658e22b317e3d5fb10f7c85a84200d0
1,994
def complex_domain(spectrogram): """ Complex Domain. Parameters ---------- spectrogram : :class:`Spectrogram` instance :class:`Spectrogram` instance. Returns ------- complex_domain : numpy array Complex domain onset detection function. References ---------- .. [1] Juan Pablo Bello, Chris Duxbury, Matthew Davies and Mark Sandler, "On the use of phase and energy for musical onset detection in the complex domain", IEEE Signal Processing Letters, Volume 11, Number 6, 2004. """ # take the sum of the absolute changes return np.asarray(np.sum(np.abs(_complex_domain(spectrogram)), axis=1))
10248ca5bb291326018934d654b2fee6a8a972d0
1,995
import torch def toOneHot(action_space, actions): """ If action_space is "Discrete", return a one hot vector, otherwise just return the same `actions` vector. actions: [batch_size, 1] or [batch_size, n, 1] If action space is continuous, just return the same action vector. """ # One hot encoding buffer that you create out of the loop and just keep reusing if action_space.__class__.__name__ == "Discrete": nr_actions = action_space.n actions_onehot_dim = list(actions.size()) actions_onehot_dim[-1] = nr_actions actions = actions.view(-1, 1).long() action_onehot = torch.FloatTensor(actions.size(0), nr_actions) return_variable = False if isinstance(actions, Variable): actions = actions.data return_variable = True # In your for loop action_onehot.zero_() if actions.is_cuda: action_onehot = action_onehot.cuda() action_onehot.scatter_(1, actions, 1) if return_variable: action_onehot = Variable(action_onehot) action_onehot.view(*actions_onehot_dim) return action_onehot else: return actions.detach()
bad47c1f55795d16bdcd67aac67b4ae40a40363c
1,996
def find_triangle(n): """Find the first triangle number with N divisors.""" t, i = 1, 1 while True: i += 1 t += i if len(divisors(t)) > n: return t
b74e0e8fd869b4d9a9ae1fe83299f32eaa848e9a
1,997
import requests def get_main_page_soup(home_url): """ parse main page soup""" user_agent= 'Mozilla / 5.0 (Windows NT 10.0; Win64; x64) AppleWebKit / 537.36(KHTML, ' \ 'like Gecko) Chrome / 64.0.3282.140 Safari / 537.36 Edge / 18.17763 ' headers = {'User-agent':user_agent} # request to javbus res = requests.get(home_url, headers=headers, timeout=20) res.raise_for_status() # init beautiful soup soup = bs4.BeautifulSoup(res.text, 'lxml') return soup
6100fa9b669ee498dea354418b3816bbc46b3b26
1,998
def gen_task4() -> np.ndarray: """Task 4: main corner of a triangle.""" canv = blank_canvas() r, c = np.random.randint(GRID-2, size=2, dtype=np.int8) syms = rand_syms(6) # 6 symbols for triangle # Which orientation? We'll create 4 rand = np.random.rand() if rand < 0.25: # top left rows, cols = [r, r, r, r+1, r+1, r+2], [c, c+1, c+2, c, c+1, c] elif rand < 0.50: # top right rows, cols = [r, r, r, r+1, r+1, r+2], [c+2, c, c+1, c+1, c+2, c+2] elif rand < 0.75: # bottom left rows, cols = [r+2, r, r+1, r+1, r+2, r+2], [c, c, c, c+1, c+1, c+2] else: # bottom right rows, cols = [r+2, r, r+1, r+1, r+2, r+2], [c+2, c+2, c+1, c+2, c, c+1] canv[rows, cols] = syms return [4, syms[0]], canv
d367af38a74fd57eb86d001103a1f8656b395209
1,999
def pytest_funcarg__testname(request): """ The testname as string, or ``None``, if no testname is known. This is the parameter added by the test generation hook, or ``None`` if no parameter was set, because test generation didn't add a call for this test. """ return getattr(request, 'param', None)
87444cda36635b21c27d260835f96670d6b2d215
2,000
def notes_to_editor_view(notes): """Convert notes object content to more readble view Args: notes (list): list of note object Returns: list: list of note object """ for note in notes: note.content = to_editor(note.content) return notes
44dfa40fb0bf3c5c3c2aafb2731583b6e13d8853
2,002
def normalization(arr, normalize_mode, norm_range = [0,1]): """ Helper function: Normalizes the image based on the specified mode and range Args: arr: numpy array normalize_mode: either "whiten", "normalize_clip", or "normalize" representing the type of normalization to use norm_range: (Optional) Specifies the range for the numpy array values Returns: A normalized array based on the specifications """ # reiniating the batch_size dimension if normalize_mode == "whiten": return whiten(arr) elif normalize_mode == "normalize_clip": return normalize_clip(arr, norm_range = norm_range) elif normalize_mode == "normalize": return minmax_normalize(arr, norm_range = norm_range) else: return NotImplementedError("Please use the supported modes.")
8400419db77c2f76ba63999ecae89eb3fbdfae6d
2,003
def draw_mask(img, mask, col, alpha=0.4, show_border=True, border_thick=0): """Visualizes a single binary mask.""" was_pil = isinstance(img, (Image.Image)) img = np.array(img) img = img.astype(np.float32) idx = np.nonzero(mask) img[idx[0], idx[1], :] *= 1.0 - alpha img[idx[0], idx[1], :] += alpha * col if border_thick: contours, hierarchy = cv2.findContours( mask.copy(), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE) cv2.drawContours(img, contours, -1, _WHITE, border_thick, cv2.LINE_AA) img = img.astype(np.uint8) return Image.fromarray(img) if was_pil else img
047bfc2f26ed38c28ff31f46746542a5d56182c4
2,004
def build_md_page(page_info: parser.PageInfo) -> str: """Given a PageInfo object, return markdown for the page. Args: page_info: Must be a `parser.FunctionPageInfo`, `parser.ClassPageInfo`, or `parser.ModulePageInfo`. Returns: Markdown for the page Raises: ValueError: if `page_info` is an instance of an unrecognized class """ if isinstance(page_info, parser.ClassPageInfo): return ClassPageBuilder(page_info).build() if isinstance(page_info, parser.FunctionPageInfo): return FunctionPageBuilder(page_info).build() if isinstance(page_info, parser.ModulePageInfo): return ModulePageBuilder(page_info).build() if isinstance(page_info, parser.TypeAliasPageInfo): return TypeAliasPageBuilder(page_info).build() raise ValueError(f'Unknown Page Info Type: {type(page_info)}')
86ed4f8e1b9b733f45e827c65b067295a9a2ff06
2,005
from typing import Optional def transpose(data: NodeInput, input_order: NodeInput, name: Optional[str] = None) -> Node: """Return a node which transposes the data in the input tensor. @param data: The input tensor to be transposed @param input_order: Permutation of axes to be applied to the input tensor @return Transpose node """ return _get_node_factory_opset1().create("Transpose", as_nodes(data, input_order))
bc84792893352cdd235efd9e33fdc53cadd6521f
2,006
def find_opposite_reader(card_reader_list, find): """Returns the card reader on the opposite side of the door for the card reader in find""" for c in card_reader_list: if c.room_a == find.room_b and c.room_b == find.room_a: return c raise (Exception("No reader on opposite side found"))
8a70b9b35174be62f3ca816f385b4c29a6ebebe8
2,007
def tag_from_clark(name): """Get a human-readable variant of the XML Clark notation tag ``name``. For a given name using the XML Clark notation, return a human-readable variant of the tag name for known namespaces. Otherwise, return the name as is. """ match = CLARK_TAG_REGEX.match(name) if match and match.group("namespace") in NAMESPACES_REV: args = {"ns": NAMESPACES_REV[match.group("namespace")], "tag": match.group("tag")} return "%(ns)s:%(tag)s" % args return name
948ea17b017926353a37d2ceab031751146e445a
2,008
def build_k_indices(y, k_fold, seed): """ Randomly partitions the indices of the data set into k groups Args: y: labels, used for indexing k_fold: number of groups after the partitioning seed: the random seed value Returns: k_indices: an array of k sub-indices that are randomly partitioned """ num_rows = y.shape[0] interval = int(num_rows / k_fold) np.random.seed(seed) indices = np.random.permutation(num_rows) k_indices = [indices[k * interval: (k + 1) * interval] for k in range(k_fold)] return np.array(k_indices)
3d5684ef59bc1ac0abeca243c394499258be5b54
2,009
def get_parent(obj, default=_marker): """Returns the container the object was traversed via. Returns None if the object is a containment root. Raises TypeError if the object doesn't have enough context to get the parent. """ if IRoot.providedBy(obj): return None parent = aq_parent(aq_inner(obj)) if parent is not None: return parent if default != _marker: return default raise TypeError("Not enough context information to get parent", obj)
a6c53ddd4a8bfb81f211737edf1da12688a3f4e2
2,010
import numpy def MRR(logits, target): """ Compute mean reciprocal rank. :param logits: 2d array [batch_size x rel_docs_per_query] :param target: 2d array [batch_size x rel_docs_per_query] :return: mean reciprocal rank [a float value] """ assert logits.shape == target.shape sorted, indices = numpy.sort(logits, 1)[::-1], numpy.argsort(-logits, 1) reciprocal_rank = 0 for i in range(indices.shape[0]): for j in range(indices.shape[1]): if target[i, indices[i, j]] == 1: reciprocal_rank += 1.0 / (j + 1) break return reciprocal_rank / indices.shape[0]
eb9249bf0e3942aeb01b148a0db28c3e5f9dd00a
2,011
def range(starts, limits=None, deltas=1, dtype=None, name=None, row_splits_dtype=dtypes.int64): """Returns a `RaggedTensor` containing the specified sequences of numbers. Each row of the returned `RaggedTensor` contains a single sequence: ```python ragged.range(starts, limits, deltas)[i] == tf.range(starts[i], limits[i], deltas[i]) ``` If `start[i] < limits[i] and deltas[i] > 0`, then `output[i]` will be an empty list. Similarly, if `start[i] > limits[i] and deltas[i] < 0`, then `output[i]` will be an empty list. This behavior is consistent with the Python `range` function, but differs from the `tf.range` op, which returns an error for these cases. Examples: >>> tf.ragged.range([3, 5, 2]).to_list() [[0, 1, 2], [0, 1, 2, 3, 4], [0, 1]] >>> tf.ragged.range([0, 5, 8], [3, 3, 12]).to_list() [[0, 1, 2], [], [8, 9, 10, 11]] >>> tf.ragged.range([0, 5, 8], [3, 3, 12], 2).to_list() [[0, 2], [], [8, 10]] The input tensors `starts`, `limits`, and `deltas` may be scalars or vectors. The vector inputs must all have the same size. Scalar inputs are broadcast to match the size of the vector inputs. Args: starts: Vector or scalar `Tensor`. Specifies the first entry for each range if `limits` is not `None`; otherwise, specifies the range limits, and the first entries default to `0`. limits: Vector or scalar `Tensor`. Specifies the exclusive upper limits for each range. deltas: Vector or scalar `Tensor`. Specifies the increment for each range. Defaults to `1`. dtype: The type of the elements of the resulting tensor. If not specified, then a value is chosen based on the other args. name: A name for the operation. row_splits_dtype: `dtype` for the returned `RaggedTensor`'s `row_splits` tensor. One of `tf.int32` or `tf.int64`. Returns: A `RaggedTensor` of type `dtype` with `ragged_rank=1`. """ row_splits_dtype = dtypes.as_dtype(row_splits_dtype) if limits is None: starts, limits = 0, starts with ops.name_scope(name, 'RaggedRange', [starts, limits, deltas]) as name: starts = ops.convert_to_tensor(starts, dtype=dtype, name='starts') limits = ops.convert_to_tensor(limits, dtype=dtype, name='limits') deltas = ops.convert_to_tensor(deltas, dtype=dtype, name='deltas') # infer dtype if not explicitly provided if dtype is None: starts, limits, deltas = _infer_matching_dtype( [starts, limits, deltas], [dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64]) result = gen_ragged_math_ops.ragged_range( starts, limits, deltas, Tsplits=row_splits_dtype, name=name) return ragged_tensor.RaggedTensor.from_row_splits( result.rt_dense_values, result.rt_nested_splits, validate=False)
177c956844596b5125c288db8859a38ecf4e8b80
2,012
def ecef2enuv(u, v, w, lat0, lon0, deg=True): """ for VECTOR i.e. between two points input ----- x,y,z [meters] target ECEF location [0,Infinity) """ if deg: lat0 = radians(lat0) lon0 = radians(lon0) t = cos(lon0) * u + sin(lon0) * v uEast = -sin(lon0) * u + cos(lon0) * v wUp = cos(lat0) * t + sin(lat0) * w vNorth = -sin(lat0) * t + cos(lat0) * w return uEast, vNorth, wUp
b9b6adb9232407043927cdbc0c2cec4f0b9b50a2
2,013
def interpolate_ray_dist(ray_dists, order='spline'): """ interpolate ray distances :param [float] ray_dists: :param str order: degree of interpolation :return [float]: >>> vals = np.sin(np.linspace(0, 2 * np.pi, 20)) * 10 >>> np.round(vals).astype(int).tolist() [0, 3, 6, 8, 10, 10, 9, 7, 5, 2, -2, -5, -7, -9, -10, -10, -8, -6, -3, 0] >>> vals[3:7] = -1 >>> vals[16:] = -1 >>> vals_interp = interpolate_ray_dist(vals, order=3) >>> np.round(vals_interp).astype(int).tolist() [0, 3, 6, 9, 10, 10, 8, 7, 5, 2, -2, -5, -7, -9, -10, -10, -10, -8, -4, 1] >>> vals_interp = interpolate_ray_dist(vals, order='spline') >>> np.round(vals_interp).astype(int).tolist() [0, 3, 6, 8, 9, 10, 9, 7, 5, 2, -2, -5, -7, -9, -10, -10, -9, -7, -5, -3] >>> vals_interp = interpolate_ray_dist(vals, order='cos') >>> np.round(vals_interp).astype(int).tolist() [0, 3, 6, 8, 10, 10, 9, 7, 5, 2, -2, -5, -7, -9, -10, -10, -8, -6, -3, 0] """ x_space = np.arange(len(ray_dists)) ray_dists = np.array(ray_dists) missing = ray_dists == -1 x_train = x_space[ray_dists != -1] x_train_ext = np.hstack((x_train - len(x_space), x_train, x_train + len(x_space))) y_train = ray_dists[ray_dists != -1] y_train_ext = np.array(y_train.tolist() * 3) if isinstance(order, int): # model = pipeline.make_pipeline(preprocessing.PolynomialFeatures(order), # linear_model.Ridge()) # model.fit(x_space[ray_dists != -1], ray_dists[ray_dists != -1]) # ray_dists[ray_dists == -1] = model.predict(x_space[ray_dists == -1]) z = np.polyfit(x_train, y_train, order) fn_interp = np.poly1d(z) ray_dists[missing] = fn_interp(x_space[missing]) elif order == 'spline': uinterp_us = interpolate.InterpolatedUnivariateSpline(x_train_ext, y_train_ext) ray_dists[missing] = uinterp_us(x_space[missing]) elif order == 'cos': def _fn_cos(x, t): return x[0] + x[1] * np.sin(x[2] + x[3] * t) def _fn_cos_residual(x, t, y): return _fn_cos(x, t) - y x0 = np.array([np.mean(y_train), (y_train.max() - y_train.min()) / 2., 0, len(x_space) / np.pi]) lsm_res = optimize.least_squares(_fn_cos_residual, x0, gtol=1e-1, # loss='soft_l1', f_scale=0.1, args=(x_train, y_train)) ray_dists[missing] = _fn_cos(lsm_res.x, x_space[missing]) return ray_dists
f1ef1906fd2871e995355a7dd8818a946eefe1e3
2,014
def distance(left, right, pairwise=pairwise['prod'], distance_function=None): """ Calculate the distance between two *k*-mer profiles. :arg left, right: Profiles to calculate distance between. :return: The distance between `left` and `right`. :rtype: float """ if not distance_function: return multiset(left, right, pairwise) return distance_function(left, right)
1be9b2777cf58bf52e2e33d6c39ed3655edc2354
2,015
def _rec_get_all_imports_exports(fips_dir, proj_dir, result) : """recursively get all imported projects, their exported and imported modules in a dictionary object: project-1: url: git-url (not valid for first, top-level project) exports: header-dirs: [ ] conditional-header-dirs: dir: cmake-if condition string lib-dirs: [ ] defines: def-key: def-val ... modules : mod: dir mod: dir ... imports: name: git: [git-url] branch: [optional: branch or tag] cond: [optional: cmake-if condition string conditionally including the dependency] name: ... ... ... :param fips_dir: absolute fips directory :param proj_dir: absolute project directory :param result: in/out current result :returns: bool success, and modified result dictionary """ success = True ws_dir = util.get_workspace_dir(fips_dir) proj_name = util.get_project_name_from_dir(proj_dir) if proj_name not in result : imports = get_imports(fips_dir, proj_dir) exports = get_exports(proj_dir) for dep_proj_name in imports : if dep_proj_name not in result : dep_proj_dir = util.get_project_dir(fips_dir, dep_proj_name) dep_url = imports[dep_proj_name]['git'] success, result = _rec_get_all_imports_exports(fips_dir, dep_proj_dir, result) # break recursion on error if not success : return success, result result[proj_name] = {} result[proj_name]['proj_dir'] = proj_dir result[proj_name]['imports'] = imports result[proj_name]['exports'] = exports # done return success, result
66c0d25d27559e6841bcfced49646f5a711bfeb3
2,016
from typing import Optional from typing import Sequence def get_database_cluster(name: Optional[str] = None, tags: Optional[Sequence[str]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDatabaseClusterResult: """ Provides information on a DigitalOcean database cluster resource. ## Example Usage ```python import pulumi import pulumi_digitalocean as digitalocean example = digitalocean.get_database_cluster(name="example-cluster") pulumi.export("databaseOutput", example.uri) ``` :param str name: The name of the database cluster. """ __args__ = dict() __args__['name'] = name __args__['tags'] = tags if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('digitalocean:index/getDatabaseCluster:getDatabaseCluster', __args__, opts=opts, typ=GetDatabaseClusterResult).value return AwaitableGetDatabaseClusterResult( database=__ret__.database, engine=__ret__.engine, host=__ret__.host, id=__ret__.id, maintenance_windows=__ret__.maintenance_windows, name=__ret__.name, node_count=__ret__.node_count, password=__ret__.password, port=__ret__.port, private_host=__ret__.private_host, private_network_uuid=__ret__.private_network_uuid, private_uri=__ret__.private_uri, region=__ret__.region, size=__ret__.size, tags=__ret__.tags, uri=__ret__.uri, urn=__ret__.urn, user=__ret__.user, version=__ret__.version)
edc9d4e0264e90a1491a809c40e2cf2961699d80
2,017
def tesla_loadhook(h, *args, **kwargs): """ Converts a load hook into an application processor. >>> app = auto_application() >>> def f(*args, **kwargs): "something done before handling request" ... >>> app.add_processor(loadhook(f, *args, **kwargs)) """ def processor(handler): h(*args, **kwargs) return handler() return processor
65743cd9220ddef40294cde0f4f6566ae9235772
2,020
def force_unicode(s, encoding='utf-8', strings_only=False, errors='strict'): #pragma: no cover """ Force a string to be unicode. If strings_only is True, don't convert (some) non-string-like objects. Originally copied from the Django source code, further modifications have been made. Original copyright and license: Copyright (c) Django Software Foundation and individual contributors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of Django nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ if strings_only and is_protected_type(s): return s if not isinstance(s, str,): if hasattr(s, '__unicode__'): s = str(s) else: try: s = str(str(s), encoding, errors) except UnicodeEncodeError: if not isinstance(s, Exception): raise # If we get to here, the caller has passed in an Exception # subclass populated with non-ASCII data without special # handling to display as a string. We need to handle this # without raising a further exception. We do an # approximation to what the Exception's standard str() # output should be. s = ' '.join([force_unicode(arg, encoding, strings_only, errors) for arg in s]) elif not isinstance(s, str): # Note: We use .decode() here, instead of unicode(s, encoding, # errors), so that if s is a SafeString, it ends up being a # SafeUnicode at the end. s = s.decode(encoding, errors) return s
61992707364bfbb3e714bb52005a417387f8d7de
2,021
def extractYoloInfo(yolo_output_format_data): """ Extract box, objectness, class from yolo output format data """ box = yolo_output_format_data[..., :6] conf = yolo_output_format_data[..., 6:7] category = yolo_output_format_data[..., 7:] return box, conf, category
ff28a5ce5490c61722ca06b0e09b9bd85ee7e111
2,022
def bbox_diou(bboxes1, bboxes2): """ Complete IoU @param bboxes1: (a, b, ..., 4) @param bboxes2: (A, B, ..., 4) x:X is 1:n or n:n or n:1 @return (max(a,A), max(b,B), ...) ex) (4,):(3,4) -> (3,) (2,1,4):(2,3,4) -> (2,3) """ bboxes1_area = bboxes1[..., 2] * bboxes1[..., 3] bboxes2_area = bboxes2[..., 2] * bboxes2[..., 3] bboxes1_coor = tf.concat( [ bboxes1[..., :2] - bboxes1[..., 2:] * 0.5, bboxes1[..., :2] + bboxes1[..., 2:] * 0.5, ], axis=-1, ) bboxes2_coor = tf.concat( [ bboxes2[..., :2] - bboxes2[..., 2:] * 0.5, bboxes2[..., :2] + bboxes2[..., 2:] * 0.5, ], axis=-1, ) left_up = tf.maximum(bboxes1_coor[..., :2], bboxes2_coor[..., :2]) right_down = tf.minimum(bboxes1_coor[..., 2:], bboxes2_coor[..., 2:]) inter_section = tf.maximum(right_down - left_up, 0.0) inter_area = inter_section[..., 0] * inter_section[..., 1] union_area = bboxes1_area + bboxes2_area - inter_area iou = tf.math.divide_no_nan(inter_area, union_area) enclose_left_up = tf.minimum(bboxes1_coor[..., :2], bboxes2_coor[..., :2]) enclose_right_down = tf.maximum( bboxes1_coor[..., 2:], bboxes2_coor[..., 2:] ) enclose_section = enclose_right_down - enclose_left_up c_2 = enclose_section[..., 0] ** 2 + enclose_section[..., 1] ** 2 center_diagonal = bboxes2[..., :2] - bboxes1[..., :2] rho_2 = center_diagonal[..., 0] ** 2 + center_diagonal[..., 1] ** 2 diou = iou - tf.math.divide_no_nan(rho_2, c_2) return diou
f32e4a289f437494fd738c1128d6e7c7a8e02c7e
2,023
def showp1rev(context, mapping): """Integer. The repository-local revision number of the changeset's first parent, or -1 if the changeset has no parents. (DEPRECATED)""" ctx = context.resource(mapping, b'ctx') return ctx.p1().rev()
2c843d5476a8e5b43fa8ac31351de633c5fa3d6c
2,024
def erp_pretax(t,ma,st,ra,par): """ early retirement pension (efterløn) pretax""" # initialize ERP = np.zeros(1) # pre two year period if par.T_erp <= t < par.T_two_year: if ra == 1: priv = priv_pension(ma,st,par) ERP[:] = np.maximum(0,par.ERP_high - 0.6*0.05*np.maximum(0, priv - par.ERP_low)) # two year period elif par.T_two_year <= t < par.T_oap: # two year rule is satisfied if ra == 0: ERP[:] = par.ERP_2 # two year rule not satisfied elif ra == 1: priv = priv_pension(ma,st,par) ERP[:] = np.maximum(0,par.ERP_high - 0.6*0.05*np.maximum(0, priv - par.ERP_low)) # return return ERP
d9a3142236aa942f8c86db1c484e57e4fc7ee278
2,025
def add_missing_cmd(command_list): """Adds missing cmd tags to the given command list.""" # E.g.: given: # ['a', '0', '0', '0', '0', '0', '0', '0', # '0', '0', '0', '0', '0', '0', '0'] # Converts to: # [['a', '0', '0', '0', '0', '0', '0', '0'], # ['a', '0', '0', '0', '0', '0', '0', '0']] # And returns a string that joins these elements with spaces. cmd_tag = command_list[0] args = command_list[1:] final_cmds = [] for arg_batch in grouper(args, NUM_ARGS[cmd_tag]): final_cmds.append([cmd_tag] + list(arg_batch)) if not final_cmds: # command has no args (e.g.: 'z') final_cmds = [[cmd_tag]] return final_cmds
190884575d0110f06088b9be70008da56c279344
2,027
def replace_umlauts(s: str) -> str: """ Replace special symbols with the letters with umlauts (ä, ö and ü) :param s: string with the special symbols (::) :return: edited string """ out = s.replace('A::', 'Ä').replace('O::', 'Ö').replace('U::', 'Ü').replace('a::', 'ä').replace('o::', 'ö') \ .replace('u::', 'ü') return out
8fad1f1017a3fd860d7e32fd191dd060b75a7bb8
2,028
def bandstructure_flow(workdir, scf_input, nscf_input, dos_inputs=None, manager=None, flow_class=Flow, allocate=True): """ Build a :class:`Flow` for band structure calculations. Args: workdir: Working directory. scf_input: Input for the GS SCF run. nscf_input: Input for the NSCF run (band structure run). dos_inputs: Input(s) for the NSCF run (dos run). manager: :class:`TaskManager` object used to submit the jobs Initialized from manager.yml if manager is None. flow_class: Flow subclass allocate: True if the flow should be allocated before returning. Returns: :class:`Flow` object """ flow = flow_class(workdir, manager=manager) work = BandStructureWork(scf_input, nscf_input, dos_inputs=dos_inputs) flow.register_work(work) # Handy aliases flow.scf_task, flow.nscf_task, flow.dos_tasks = work.scf_task, work.nscf_task, work.dos_tasks if allocate: flow.allocate() return flow
f3515fdfa8c719c8b91a8f76a04d468e545d6f23
2,029
def resnet_50(num_classes, data_format='channels_first', pruning_method=None): """Returns the ResNet model for a given size and number of output classes.""" return resnet_50_generator( block_fn=bottleneck_block_, lst_layers=[3, 4, 6, 3], num_classes=num_classes, pruning_method=pruning_method, data_format=data_format)
4962f9a4cf4aaaf0052941279c8156e29b2cb639
2,031
import json import base64 def read_amuselabs_data(s): """ Read in an amuselabs string, return a dictionary of data """ # Data might be base64'd or not try: data = json.loads(s) except json.JSONDecodeError: s1 = base64.b64decode(s) data = json.loads(s1) ret = {} # metadata # technically these can be codewords but i've never seen one kind = "crossword" width, height = data['w'], data['h'] ret['metadata'] = { 'width': width , 'height': height , 'kind': kind , 'author': data.get('author') , 'title': data.get('title') , 'copyright': data.get('copyright') , 'noClueCells': True # no notepad? } # grid grid = [] box = data['box'] cellInfos = data.get('cellInfos', []) # Reshape cellInfos to make lookup easier markup = {} for c in cellInfos: markup[(c['x'], c['y'])] = c for y in range(height): for x in range(width): cell = {'x': x, 'y': y, 'value': None} if box[x][y] == '\x00': cell['isBlock'] = True else: cell['solution'] = box[x][y] style = {} if markup.get((x, y)): thisMarkup = markup[(x, y)] if thisMarkup.get('isCircled'): style['shapebg'] = 'circle' if thisMarkup.get('isVoid'): cell['isBlock'] = False cell['isVoid'] = True bar_string = '' for letter, side in {'B': 'bottom', 'R': 'right'}.items(): if thisMarkup.get(f'{side}Wall'): bar_string += letter if bar_string: style['barred'] = bar_string cell['style'] = style grid.append(cell) ret['grid'] = grid # clues placed_words = data['placedWords'] across_words = [word for word in placed_words if word['acrossNotDown']] down_words = [word for word in placed_words if not word['acrossNotDown']] # sorting is probably unnecessary across_words = sorted(across_words, key=lambda x: (x['y'], x['x'])) down_words = sorted(down_words, key=lambda x: (x['y'], x['x'])) across_clues = [{'number': str(x['clueNum']), 'clue': x['clue']['clue']} for x in across_words] down_clues = [{'number': str(x['clueNum']), 'clue': x['clue']['clue']} for x in down_words] ret['clues'] = [{'title': 'Across', 'clues': across_clues}, {'title': 'Down', 'clues': down_clues}] return ret
f9c2fb2807d1003261bec7b58e4ba025aac65a6a
2,032
def calinski_harabasz(dataset_values:DatasetValues): """Calinski, T.; Harabasz, J. (1974). A dendrite method for cluster analysis. Communications in Statistics - Theory and Methods, v.3, n.1, p.1�27. The objective is maximize value [0, +Inf]""" if dataset_values.K == 1: return 0 return calinski_harabasz_score(dataset_values.data, dataset_values.cluster_labels)
c8231971350d22d1067056c53838f0536ae03e77
2,033
from re import IGNORECASE def parse_version(version): """ input version string of the form: 'Major.Minor.Patch+CommitHash' like: '0.1.5+95ffef4' ------ or ------ '0.1.0' returns version_info tuple of the form: (major,minor,patch,hash) like: (0, 1, 5, '95ffef4') -------- or -------- (0, 1, 0, '') """ matches = match( '(?P<major>[0-9]+)\.(?P<minor>[0-9]+)\.(?P<patch>[0-9]+)(g(?P<hash>[a-z0-9]*))?', version, IGNORECASE ) if matches: major = int(matches.group('major')) minor = int(matches.group('minor')) patch = int(matches.group('patch')) hash = matches.group('hash') or '' return (major,minor,patch,hash) else: raise ValueError("Version string, '%s' could not be parsed. It should be of the form: 'Major.Minor.Patch+CommitHash'." % version)
cc9b326e498991092a494458d4f98cce7bbb28f9
2,034
def _location_sensitive_score(W_query, W_fil, W_keys): """Impelements Bahdanau-style (cumulative) scoring function. This attention is described in: J. K. Chorowski, D. Bahdanau, D. Serdyuk, K. Cho, and Y. Ben- gio, “Attention-based models for speech recognition,” in Ad- vances in Neural Information Processing Systems, 2015, pp. 577–585. ############################################################################# hybrid attention (content-based + location-based) f = F * α_{i-1} energy = dot(v_a, tanh(W_keys(h_enc) + W_query(h_dec) + W_fil(f) + b_a)) ############################################################################# Args: W_query: Tensor, shape '[batch_size, 1, attention_dim]' to compare to location features. W_location: processed previous alignments into location features, shape '[batch_size, max_time, attention_dim]' W_keys: Tensor, shape '[batch_size, max_time, attention_dim]', typically the encoder outputs. Returns: A '[batch_size, max_time]' attention score (energy) """ # Get the number of hidden units from the trailing dimension of keys dtype = W_query.dtype num_units = W_keys.shape[-1].value or array_ops.shape(W_keys)[-1] v_a = tf.get_variable( "attention_variable_projection", shape=[num_units], dtype=dtype, initializer=tf.contrib.layers.xavier_initializer(), trainable=True, ) print(v_a) b_a = tf.get_variable( "attention_bias", shape=[num_units], dtype=dtype, initializer=tf.zeros_initializer(), ) return tf.reduce_sum(v_a * tf.tanh(W_keys + W_query + W_fil + b_a), [2])
f3daa106f6ac819ef5037a221e2cd768d6810642
2,035
def get_streamdecks(): """ Retrieves all connected streamdecks """ streamdecks = DeviceManager().enumerate() return streamdecks
f649fe4404ec6be71cdb4f9cd5805738e1d0b823
2,036
import six def clean_string(text): """ Remove Lucene reserved characters from query string """ if isinstance(text, six.string_types): return text.translate(UNI_SPECIAL_CHARS).strip() return text.translate(None, STR_SPECIAL_CHARS).strip()
5387d76d4dc47997eac751538670cc426d854449
2,037
def convert_single_example(example_index, example, label_size, max_seq_length, tokenizer, max_qa_length): """Loads a data file into a list of `InputBatch`s.""" # RACE is a multiple choice task. To perform this task using AlBERT, # we will use the formatting proposed in "Improving Language # Understanding by Generative Pre-Training" and suggested by # @jacobdevlin-google in this issue # https://github.com/google-research/bert/issues/38. # # Each choice will correspond to a sample on which we run the # inference. For a given RACE example, we will create the 4 # following inputs: # - [CLS] context [SEP] choice_1 [SEP] # - [CLS] context [SEP] choice_2 [SEP] # - [CLS] context [SEP] choice_3 [SEP] # - [CLS] context [SEP] choice_4 [SEP] # The model will output a single value for each input. To get the # final decision of the model, we will run a softmax over these 4 # outputs. if isinstance(example, classifier_utils.PaddingInputExample): return classifier_utils.InputFeatures( example_id=0, input_ids=[[0] * max_seq_length] * label_size, input_mask=[[0] * max_seq_length] * label_size, segment_ids=[[0] * max_seq_length] * label_size, label_id=0, is_real_example=False) else: context_tokens = tokenizer.tokenize(example.context_sentence) if example.start_ending is not None: start_ending_tokens = tokenizer.tokenize(example.start_ending) all_input_tokens = [] all_input_ids = [] all_input_mask = [] all_segment_ids = [] for ending in example.endings: # We create a copy of the context tokens in order to be # able to shrink it according to ending_tokens context_tokens_choice = context_tokens[:] if example.start_ending is not None: ending_tokens = start_ending_tokens + tokenizer.tokenize(ending) else: ending_tokens = tokenizer.tokenize(ending) # Modifies `context_tokens_choice` and `ending_tokens` in # place so that the total length is less than the # specified length. Account for [CLS], [SEP], [SEP] with # "- 3" ending_tokens = ending_tokens[- max_qa_length:] if len(context_tokens_choice) + len(ending_tokens) > max_seq_length - 3: context_tokens_choice = context_tokens_choice[: ( max_seq_length - 3 - len(ending_tokens))] tokens = ["[CLS]"] + context_tokens_choice + ( ["[SEP]"] + ending_tokens + ["[SEP]"]) segment_ids = [0] * (len(context_tokens_choice) + 2) + [1] * ( len(ending_tokens) + 1) input_ids = tokenizer.convert_tokens_to_ids(tokens) input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. padding = [0] * (max_seq_length - len(input_ids)) input_ids += padding input_mask += padding segment_ids += padding assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length all_input_tokens.append(tokens) all_input_ids.append(input_ids) all_input_mask.append(input_mask) all_segment_ids.append(segment_ids) label = example.label if example_index < 5: tf.logging.info("*** Example ***") tf.logging.info("id: {}".format(example.example_id)) for choice_idx, (tokens, input_ids, input_mask, segment_ids) in \ enumerate(zip(all_input_tokens, all_input_ids, all_input_mask, all_segment_ids)): tf.logging.info("choice: {}".format(choice_idx)) tf.logging.info("tokens: {}".format(" ".join(tokens))) tf.logging.info( "input_ids: {}".format(" ".join(map(str, input_ids)))) tf.logging.info( "input_mask: {}".format(" ".join(map(str, input_mask)))) tf.logging.info( "segment_ids: {}".format(" ".join(map(str, segment_ids)))) tf.logging.info("label: {}".format(label)) return classifier_utils.InputFeatures( example_id=example.example_id, input_ids=all_input_ids, input_mask=all_input_mask, segment_ids=all_segment_ids, label_id=label )
385f5f2801a41e0216e8a8c22d089e986bb55588
2,038
from typing import Tuple from typing import Optional def _single_optimal_block(x: NDArray) -> Tuple[float, float]: """ Compute the optimal window length for a single series Parameters ---------- x : ndarray The data to use in the optimal window estimation Returns ------- stationary : float Estimated optimal window length for stationary bootstrap circular : float Estimated optimal window length for circular bootstrap """ nobs = x.shape[0] eps = x - x.mean(0) b_max = np.ceil(min(3 * np.sqrt(nobs), nobs / 3)) kn = max(5, int(np.log10(nobs))) m_max = int(np.ceil(np.sqrt(nobs))) + kn # Find first collection of kn autocorrelations that are insignificant cv = 2 * np.sqrt(np.log10(nobs) / nobs) acv = np.zeros(m_max + 1) abs_acorr = np.zeros(m_max + 1) opt_m: Optional[int] = None for i in range(m_max + 1): v1 = eps[i + 1 :] @ eps[i + 1 :] v2 = eps[: -(i + 1)] @ eps[: -(i + 1)] cross_prod = eps[i:] @ eps[: nobs - i] acv[i] = cross_prod / nobs abs_acorr[i] = np.abs(cross_prod) / np.sqrt(v1 * v2) if i >= kn: if np.all(abs_acorr[i - kn : i] < cv) and opt_m is None: opt_m = i - kn m = 2 * max(opt_m, 1) if opt_m is not None else m_max m = min(m, m_max) g = 0.0 lr_acv = acv[0] for k in range(1, m + 1): lam = 1 if k / m <= 1 / 2 else 2 * (1 - k / m) g += 2 * lam * k * acv[k] lr_acv += 2 * lam * acv[k] d_sb = 2 * lr_acv ** 2 d_cb = 4 / 3 * lr_acv ** 2 b_sb = ((2 * g ** 2) / d_sb) ** (1 / 3) * nobs ** (1 / 3) b_cb = ((2 * g ** 2) / d_cb) ** (1 / 3) * nobs ** (1 / 3) b_sb = min(b_sb, b_max) b_cb = min(b_cb, b_max) return b_sb, b_cb
7de0221ddc654d4f9e8ddd56d65f688c096a7784
2,040
def predict(params, X): """ Using the learned parameters, predicts a class for each example in X Arguments: parameters -- python dictionary containing your parameters X -- input data of size (n_x, m) Returns predictions -- vector of predictions of our model (red: 0 / blue: 1) """ # Computes probabilities using forward propagation, and classifies to 0/1 using 0.5 as the threshold. A2, cache = forward_propagation(X, params) predictions = np.round(A2) return predictions
c647114ad415b2ae6c75f2fe2e207bf279775131
2,041
def response(request): """ 返回相应对象 :param request: :return: """ json_str = '{"name": "张三", "age": 18}' # 整体是个字符串 response = HttpResponse(json_str, content_type="application/json", status=200) response["dev"] = "aGrass0825" # 向响应头中添加内容 return response
a44b35682ff8f5de168711730a10056653319512
2,042
def nest_to_flat_dict(nest): """Convert a nested structure into a flat dictionary. Args: nest: A nested structure. Returns: flat_dict: A dictionary with strings keys that can be converted back into the original structure via `flat_dict_to_nest`. """ flat_sequence = tf.nest.flatten(nest) return {str(k): v for k, v in enumerate(flat_sequence)}
f74308fc4f7c0b97d6524faea65915263a8ced9b
2,043
def plot_with_front(gen, front, title, fname): """ plot with front: Print the generation gen and front, highlighting front as the pareto front on the graph. Parameters: gen: The generation to plot. front: The pareto front extracted from generation gen title: Plot Title fname: path to output file for plot image. """ fig, ax = subplots() plot_inds(ax,gen,'Non-Dominant') plot_inds(ax,front,'Dominant') ax.set_title(title) ax.legend() fig.savefig(fname) return [fig, ax]
6556a22c6484e4c96f79a14a770cca934f50e274
2,046
def find_closest_positive_divisor(a, b): """Return non-trivial integer divisor (bh) of (a) closest to (b) in abs(b-bh) such that a % bh == 0""" assert a>0 and b>0 if a<=b: return a for k in range(0, a-b+1): bh = b + k if bh>1 and a % bh == 0: return bh bh = b - k if bh>1 and a % bh == 0: return bh return a
1a68e1767680f82db232095806adfe1c27fb956e
2,047
def simplify_stl_names(decl): """Take common STL/Standard Library names and simplify them to help make the stack trace look more readable and less like the graphics in the matrix. """ p = simplify_template_call(decl) if p == []: return decl return p[0] + '<' + ', '.join(p[1:-1]) + '>::' + p[-1]
53ea9c18e47ce4a7d922db74efdc45646441ea49
2,048
from typing import Sequence from typing import Union from typing import Callable from typing import Optional from typing import Tuple def sample_switching_models( models: Sequence, usage_seq: Sequence, X: Union[None, Sequence, Callable] = None, initial_conditions: Optional[Tuple[Sequence, Sequence]] = None, return_input: bool = False, ) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]: """ Sample from a non-stationary stochastic processes that switches between different ARMA models at given times. This functions sets the models' `history_` attribute appropriately to ensure consistency across time. Parameters ---------- models Sequence of models to use. usage_seq Sequence identifying the model to use at each time steps. Models are labeled from `0` to `len(models) - 1`. X If given, this overrides the input source for the models. If it is a sequence, it should be at least as long as `len(usage_seq)`. initial_conditions A tuple, `(initial_y, initial_x)`, of recent samples of the output and input sequences used to seed the simulation. If these are not provided, they are assumed equal to zero. return_input If true, returns both output and input. If false (the default), returns only the output. Returns a sequence `Y` of generated samples. If `return_input` is true, returns a tuple `(Y, X)` of generated output samples and input samples. If the `U` parameter was used and was a sequence, the output `X` simply mirrors the input. """ # check the inputs if len(models) == 0: raise ValueError("No models given.") if np.min(usage_seq) < 0 or np.max(usage_seq) >= len(models): raise ValueError("Invalid entry in usage_seq vector.") # handle vector X if X is not None and not callable(X): if len(X) < len(usage_seq): raise ValueError("Not enough input values in X.") X_ret = X X = sources.Stream(X) have_X_ret = True else: X_ret = np.zeros(len(usage_seq)) have_X_ret = False # handle default initial conditions if initial_conditions is None: initial_conditions = ([], []) # generate the samples Y_ret = np.zeros(len(usage_seq)) usage_rle = rle_encode(usage_seq) ptr = 0 for model_id, n_samples in usage_rle: model = models[model_id] # ensure proper history if ptr >= model.p: history_y = np.copy(Y_ret[ptr - model.p : ptr]) else: n_left = model.p - ptr if len(initial_conditions[0]) >= n_left: history_y = np.hstack((initial_conditions[0][-n_left:], Y_ret[:ptr])) else: history_y = np.hstack( ( np.zeros(n_left - len(initial_conditions[0])), initial_conditions[0], Y_ret[:ptr], ) ) if ptr >= model.q: history_x = np.copy(X_ret[ptr - model.q : ptr]) else: n_left = model.q - ptr if len(initial_conditions[1]) >= n_left: history_x = np.hstack((initial_conditions[1][-n_left:], X_ret[:ptr])) else: history_x = np.hstack( ( np.zeros(n_left - len(initial_conditions[1])), initial_conditions[1], X_ret[:ptr], ) ) model.history_ = (history_y, history_x) # generate and store the samples from this model crt_y, crt_x = model.transform(n_samples, X=X, return_input=True) Y_ret[ptr : ptr + n_samples] = crt_y if not have_X_ret: X_ret[ptr : ptr + n_samples] = crt_x ptr += n_samples if return_input: return Y_ret, X_ret else: return Y_ret
472e20968fe835b01da57c4a0abab376c006094b
2,049
def eval_per_class(c_dets, c_truths, overlap_thresh=0.5, eval_phrase=False): """ Evaluation for each class. Args: c_dets: A dictionary of all detection results. c_truths: A dictionary of all ground-truth annotations. overlap_thresh: A float of the threshold used in IoU matching. Returns: scores_all: A list of numpy float array collecting the confidence scores of both truth positives and false positives in each image. tp_fp_labels_all: A list of numpy float array collecting the true positives (=1) and false positives (=0) labels in each image. num_gt_all: An integer of the total number of valid ground-truth boxes. """ num_gt_all = sum([len(c_truths[l]) for l in c_truths]) scores_all = [] tp_fp_labels_all = [] img_keys = [] for key in c_dets: img_keys.append(key) img_det = c_dets[key] num_det = len(img_det) scores = np.array([det['score'] for det in img_det]) tp_fp_labels = np.zeros(num_det, dtype=bool) if key not in c_truths or all(scores<0): # detections not in ground truth or detections have negative image level label, classified as false positives scores_all.append(scores) tp_fp_labels_all.append(tp_fp_labels) continue img_gt = c_truths[key] if eval_phrase: ious = np.array([[IoU(d['rect'], g['rect']) for g in img_gt] for d in img_det]) else: ious = np.array([[min(IoU(d['subject_rect'], g['subject_rect']), IoU(d['object_rect'], g['object_rect'])) for g in img_gt] for d in img_det]) if ious.shape[1] > 0: max_overlap_gt_ids = np.argmax(ious, axis=1) is_gt_box_detected = np.zeros(ious.shape[1], dtype=bool) for i in range(num_det): gt_id = max_overlap_gt_ids[i] if ious[i, gt_id] >= overlap_thresh: if not is_gt_box_detected[gt_id]: tp_fp_labels[i] = True is_gt_box_detected[gt_id] = True # if ious.shape[1] > 0: # max_overlap_gt_ids = np.argsort(-1*ious, axis=1) # is_gt_box_detected = np.zeros(ious.shape[1], dtype=bool) # for i in range(num_det): # for gt_id in max_overlap_gt_ids[i, :]: # if ious[i, gt_id] >= overlap_thresh: # if not is_gt_box_detected[gt_id]: # tp_fp_labels[i] = True # is_gt_box_detected[gt_id] = True # break # else: # break # num_gt = len(img_gt) # if ious.shape[1] > 0: # max_overlap_det_ids = np.argsort(-1*ious, axis=0) # is_det_box_used = np.zeros(ious.shape[0], dtype=bool) # for i in range(num_gt): # for det_id in max_overlap_det_ids[:, i]: # if ious[det_id, i] >= overlap_thresh: # if not is_det_box_used[det_id]: # tp_fp_labels[det_id] = True # is_det_box_used[det_id] = True # break # else: # break scores_all.append(scores) tp_fp_labels_all.append(tp_fp_labels) return scores_all, tp_fp_labels_all, num_gt_all, img_keys
7884255c6fb45d6cb01b88edd5017d134f0344b0
2,050
def define_components(mod): """ Adds components to a Pyomo abstract model object to describe unit commitment for projects. Unless otherwise stated, all power capacity is specified in units of MW and all sets and parameters are mandatory. -- Commit decision, limits, and headroom -- CommitProject[(proj, t) in PROJ_DISPATCH_POINTS] is a decision variable of how much capacity (MW) from each project to commit in each timepoint. By default, this operates in continuous mode. Include the project.unitcommit.discrete module to force this to operate with discrete unit commitment. proj_max_commit_fraction[(proj, t) in PROJ_DISPATCH_POINTS] describes the maximum commit level as a fraction of available capacity (capacity that is built and expected to be available for commitment; derated by annual expected outage rate). This has limited use cases, but could be used to simulate outages (scheduled or non-scheduled) in a production-cost simulation. This optional parameter has a default value of 1.0, indicating that all available capacity can be commited. If you wish to have discrete unit commitment, I advise overriding the default behavior and specifying a more discrete treatment of outages. proj_min_commit_fraction[(proj, t) in PROJ_DISPATCH_POINTS] describes the minimum commit level as a fraction of available capacity. This is useful for describing must-run plants that ensure reliable grid operations, and for forcing hydro plants operate at some minimal level to maintain streamflow. This can also be used to specify baseload plants that must be run year-round. This optional parameter will default to proj_max_commit_fraction for generation technologies marked baseload and 0 for all other generators. CommitLowerLimit[(proj, t) in PROJ_DISPATCH_POINTS] is an expression that describes the minimum capacity that must be committed. This is derived from installed capacity and proj_min_commit_fraction. CommitUpperLimit[(proj, t) in PROJ_DISPATCH_POINTS] is an expression that describes the maximum capacity available for commitment. This is derived from installed capacity and proj_max_commit_fraction. Enforce_Commit_Lower_Limit[(proj, t) in PROJ_DISPATCH_POINTS] and Enforce_Commit_Upper_Limit[(proj, t) in PROJ_DISPATCH_POINTS] are constraints that limit CommitProject to the upper and lower bounds defined above. CommitLowerLimit <= CommitProject <= CommitUpperLimit CommitSlackUp[(proj, t) in PROJ_DISPATCH_POINTS] is an expression that describes the amount of additional capacity available for commitment: CommitUpperLimit - CommitProject CommitSlackDown[(proj, t) in PROJ_DISPATCH_POINTS] is an expression that describes the amount of committed capacity that could be taken offline: CommitProject - CommitLowerLimit -- Startup and Shutdown -- The capacity started up or shutdown is completely determined by the change in CommitProject from one hour to the next, but we can't calculate these directly directly within the linear program because linear programs don't have if statements. Instead, we'll define extra decision variables that are tightly constrained. Since startup incurs costs and shutdown does not, the linear program will not simultaneously set both of these to non-zero values. Startup[(proj, t) in PROJ_DISPATCH_POINTS] is a decision variable describing how much additional capacity was brought online in a given timepoint. Committing additional capacity incurs startup costs for fossil plants from fuel requirements as well as additional O&M costs. Shutdown[(proj, t) in PROJ_DISPATCH_POINTS] is a decision variable describing how much committed capacity to take offline in a given timepoint. Commit_Startup_Shutdown_Consistency[(proj, t) in PROJ_DISPATCH_POINTS] is a constraint that forces consistency between commitment decision from one hour to the next with startup and shutdown. g_startup_fuel[g in FUEL_BASED_GEN] describes fuel requirements of starting up additional generation capacity expressed in units of MMBTU / MW. This optional parameter has a default value of 0. proj_startup_fuel[proj in FUEL_BASED_PROJECTS] is the same as g_startup_fuel except on a project basis. This optional parameter defaults to g_startup_fuel. g_startup_om[g in GENERATION_TECHNOLOGIES] describes operations and maintenance costs incured from starting up additional generation capacity expressed in units of $base_year / MW. This could represent direct maintenance requirements or some overall depreciation rate from accelerated wear and tear. This optional parameter has a default value of 0. proj_startup_om[proj in PROJECTS] is the same as g_startup_om except on a project basis. This optional parameter defaults to g_startup_om. Total_Startup_OM_Costs[t in TIMEPOINTS] is an expression for passing total startup O&M costs to the sys_cost module. -- Dispatch limits based on committed capacity -- g_min_load_fraction[g] describes the minimum loading level of a generation technology as a fraction of committed capacity. Many fossil plants - especially baseload - have a minimum run level which should be stored here. Note that this is only applied to committed capacity. This is an optional parameter that defaults to 1 for generation technologies marked baseload and 0 for all other generators. This parameter is only relevant when considering unit commitment so it is defined here rather than the gen_tech module. proj_min_cap_factor[(proj, t) in PROJ_DISPATCH_POINTS] describes the minimum loadding level for each project and timepoint as a fraction of committed capacity. This is an optional parameter that defaults to g_min_load_fraction, which in turn defaults to 0. You may wish to vary this by timepoint to establish minimum flow rates for hydropower, to specify thermal demand for a cogeneration project, or specify must-run reliability constraints in a geographically or temporally detailed model. This could also be used to constrain dispatch of distributed solar resources that cannot be curtailed by the system operator. DispatchLowerLimit[(proj, t) in PROJ_DISPATCH_POINTS] and DispatchUpperLimit[(proj, t) in PROJ_DISPATCH_POINTS] are expressions that define the lower and upper bounds of dispatch. Lower bounds are calculated as CommitProject * proj_min_cap_factor, and upper bounds are calculated relative to committed capacity and renewable resource availability. Enforce_Dispatch_Lower_Limit[(proj, t) in PROJ_DISPATCH_POINTS] and Enforce_Dispatch_Upper_Limit[(proj, t) in PROJ_DISPATCH_POINTS] are constraints that limit DispatchProj to the upper and lower bounds defined above. DispatchLowerLimit <= DispatchProj <= DispatchUpperLimit DispatchSlackUp[(proj, t) in PROJ_DISPATCH_POINTS] is an expression that describes the amount of additional commited capacity available for dispatch: DispatchUpperLimit - DispatchProj DispatchSlackDown[(proj, t) in PROJ_DISPATCH_POINTS] is an expression that describes the amount by which dispatch could be lowered, that is how much downramp potential each project has in each timepoint: DispatchProj - DispatchLowerLimit """ # Commitment decision, bounds and associated slack variables mod.CommitProject = Var( mod.PROJ_DISPATCH_POINTS, within=NonNegativeReals) mod.proj_max_commit_fraction = Param( mod.PROJ_DISPATCH_POINTS, within=PercentFraction, default=lambda m, proj, t: 1.0) mod.proj_min_commit_fraction = Param( mod.PROJ_DISPATCH_POINTS, within=PercentFraction, default=lambda m, proj, t: ( m.proj_max_commit_fraction[proj, t] if proj in m.BASELOAD_PROJECTS else 0.0)) mod.CommitLowerLimit = Expression( mod.PROJ_DISPATCH_POINTS, initialize=lambda m, proj, t: ( m.ProjCapacityTP[proj, t] * m.proj_availability[proj] * m.proj_min_commit_fraction[proj, t])) mod.CommitUpperLimit = Expression( mod.PROJ_DISPATCH_POINTS, initialize=lambda m, proj, t: ( m.ProjCapacityTP[proj, t] * m.proj_availability[proj] * m.proj_max_commit_fraction[proj, t])) mod.Enforce_Commit_Lower_Limit = Constraint( mod.PROJ_DISPATCH_POINTS, rule=lambda m, proj, t: ( m.CommitLowerLimit[proj, t] <= m.CommitProject[proj, t])) mod.Enforce_Commit_Upper_Limit = Constraint( mod.PROJ_DISPATCH_POINTS, rule=lambda m, proj, t: ( m.CommitProject[proj, t] <= m.CommitUpperLimit[proj, t])) mod.CommitSlackUp = Expression( mod.PROJ_DISPATCH_POINTS, initialize=lambda m, proj, t: ( m.CommitUpperLimit[proj, t] - m.CommitProject[proj, t])) mod.CommitSlackDown = Expression( mod.PROJ_DISPATCH_POINTS, initialize=lambda m, proj, t: ( m.CommitProject[proj, t] - m.CommitLowerLimit[proj, t])) # Startup & Shutdown mod.Startup = Var( mod.PROJ_DISPATCH_POINTS, within=NonNegativeReals) mod.Shutdown = Var( mod.PROJ_DISPATCH_POINTS, within=NonNegativeReals) mod.Commit_Startup_Shutdown_Consistency = Constraint( mod.PROJ_DISPATCH_POINTS, rule=lambda m, pr, t: ( m.CommitProject[pr, m.tp_previous[t]] + m.Startup[pr, t] - m.Shutdown[pr, t] == m.CommitProject[pr, t])) mod.g_startup_fuel = Param(mod.FUEL_BASED_GEN, default=0.0) mod.g_startup_om = Param(mod.GENERATION_TECHNOLOGIES, default=0.0) mod.proj_startup_fuel = Param( mod.FUEL_BASED_PROJECTS, default=lambda m, pr: m.g_startup_fuel[m.proj_gen_tech[pr]]) mod.proj_startup_om = Param( mod.PROJECTS, default=lambda m, pr: m.g_startup_om[m.proj_gen_tech[pr]]) # Startup costs need to be divided over the duration of the # timepoint because it is a one-time expenditure in units of $ # but cost_components_tp requires an hourly cost rate in $ / hr. mod.Total_Startup_OM_Costs = Expression( mod.TIMEPOINTS, initialize=lambda m, t: sum( m.proj_startup_om[proj] * m.Startup[proj, t] / m.tp_duration_hrs[t] for (proj, t2) in m.PROJ_DISPATCH_POINTS if t == t2)) mod.cost_components_tp.append('Total_Startup_OM_Costs') # Dispatch limits relative to committed capacity. mod.g_min_load_fraction = Param( mod.GENERATION_TECHNOLOGIES, within=PercentFraction, default=lambda m, g: 1.0 if m.g_is_baseload[g] else 0.0) mod.proj_min_load_fraction = Param( mod.PROJ_DISPATCH_POINTS, default=lambda m, pr, t: m.g_min_load_fraction[m.proj_gen_tech[pr]]) mod.DispatchLowerLimit = Expression( mod.PROJ_DISPATCH_POINTS, initialize=lambda m, pr, t: ( m.CommitProject[pr, t] * m.proj_min_load_fraction[pr, t])) def DispatchUpperLimit_expr(m, pr, t): if pr in m.VARIABLE_PROJECTS: return m.CommitProject[pr, t] * m.prj_max_capacity_factor[pr, t] else: return m.CommitProject[pr, t] mod.DispatchUpperLimit = Expression( mod.PROJ_DISPATCH_POINTS, initialize=DispatchUpperLimit_expr) mod.Enforce_Dispatch_Lower_Limit = Constraint( mod.PROJ_DISPATCH_POINTS, rule=lambda m, proj, t: ( m.DispatchLowerLimit[proj, t] <= m.DispatchProj[proj, t])) mod.Enforce_Dispatch_Upper_Limit = Constraint( mod.PROJ_DISPATCH_POINTS, rule=lambda m, proj, t: ( m.DispatchProj[proj, t] <= m.DispatchUpperLimit[proj, t])) mod.DispatchSlackUp = Expression( mod.PROJ_DISPATCH_POINTS, initialize=lambda m, proj, t: ( m.DispatchUpperLimit[proj, t] - m.DispatchProj[proj, t])) mod.DispatchSlackDown = Expression( mod.PROJ_DISPATCH_POINTS, initialize=lambda m, proj, t: ( m.DispatchProj[proj, t] - m.DispatchLowerLimit[proj, t]))
4ad0aae0df9a3953309138dfbc138f944efba74e
2,051
def adjustwithin(df, pCol, withinCols, method='holm'): """Apply multiplicity adjustment to a "stacked" pd.DataFrame, adjusting within groups defined by combinations of unique values in withinCols Parameters ---------- df : pd.DataFrame Stacked DataFrame with one column of pvalues and other columns to define groups for adjustment. pCol : str Column containing pvalues. withinCols : list Columns used to define subgroups/families for adjustment. method : str An adjustment method for sm.stats.multipletests. Use 'holm' for Holm-Bonferroni FWER-adj and 'fdr_bh' for Benjamini and Hochberg FDR-adj Returns ------- adjSeries : pd.Series Same shape[0] as df containing adjusted pvalues/adjpvalues.""" def _transformFunc(ser, method): nonNan = ~ser.isnull() if nonNan.sum() >= 1: rej, adjp, alphas, alphab = sm.stats.multipletests(ser.loc[nonNan].values, method=method) out = ser.copy(deep=True) out.loc[nonNan] = adjp return out else: return ser if not len(withinCols) == 0: gby = df[[pCol] + withinCols].groupby(withinCols) adjDf = gby.transform(partial(_transformFunc, method=method)) # adjDf = df.drop(pCol, axis=1).join(adjDf) else: adjDf = pd.Series(adjustnonnan(df.loc[:, pCol], method=method), index=df.index, name='adjusted-pvalue') return adjDf
4040c53def07ce5353c111036887b5df4666684c
2,052
def parse_url_query_params(url, fragment=True): """Parse url query params :param fragment: bool: flag is used for parsing oauth url :param url: str: url string :return: dict """ parsed_url = urlparse(url) if fragment: url_query = parse_qsl(parsed_url.fragment) else: url_query = parse_qsl(parsed_url.query) # login_response_url_query can have multiple key url_query = dict(url_query) return url_query
252d2ccfb2fb15db041e97908c982dae9bf3c1ef
2,053
import torch import math def sample_random_lightdirs(num_rays, num_samples, upper_only=False): """Randomly sample directions in the unit sphere. Args: num_rays: int or tensor shape dimension. Number of rays. num_samples: int or tensor shape dimension. Number of samples per ray. upper_only: bool. Whether to sample only on the upper hemisphere. Returns: lightdirs: [R, S, 3] float tensor. Random light directions sampled from the unit sphere for each sampled point. """ if upper_only: min_z = 0 else: min_z = -1 phi = torch.rand(num_rays, num_samples) * (2 * math.pi) # [R, S] cos_theta = torch.rand(num_rays, num_samples) * (1 - min_z) + min_z # [R, S] theta = torch.acos(cos_theta) # [R, S] x = torch.sin(theta) * torch.cos(phi) y = torch.sin(theta) * torch.sin(phi) z = torch.cos(theta) lightdirs = torch.cat((x[..., None], y[..., None], z[..., None]), dim=-1) # [R, S, 3] return lightdirs
7f7657ff66d0cffea6892dffdf49ba6b52b9def9
2,054
def gaussgen(sigma): """ Function to generate Gaussian kernels, in 1D, 2D and 3D. Source code in MATLAB obtained from Qiyuan Tian, Stanford University, September 2015 :param sigma: Sigma for use in generating Gaussian kernel (see defaults in generate_FSL_structure_tensor) :return: Gaussian kernel with dimensions of sigma. """ halfsize = np.ceil(3 * max(sigma)); x = range(np.single(-halfsize), np.single(halfsize + 1)); dim = len(sigma); if dim == 1: x = x.astype(float); k = np.exp(-x ** 2 / (2 * sigma ^ 2)); elif dim == 2: [X, Y] = np.meshgrid(x, x); X = X.astype(float); Y = Y.astype(float); k = np.exp(-X ** 2 / (2 * sigma[0] ** 2)) * np.exp(-Y ** 2 / (2 * sigma[1] ** 2)); elif dim == 3: [X, Y, Z] = np.meshgrid(x, x, x); X = X.transpose(0, 2, 1); # Obtained through vigorous testing (see below...) Y = Y.transpose(2, 0, 1); Z = Z.transpose(2, 1, 0); X = X.astype(float); Y = Y.astype(float); Z = Z.astype(float); k = np.exp(-X ** 2 / (2 * sigma[0] ** 2)) * np.exp(-Y ** 2 / (2 * sigma[1] ** 2)) * np.exp( -Z ** 2 / (2 * sigma[2] ** 2)); else: print 'Only supports up to dimension 3' return np.divide(k, np.sum(np.abs(k)));
7673e3fb8ddbb7bbb646331a24380581a7af9617
2,055
import types from typing import List def metrics_specs_from_keras( model_name: Text, model_loader: types.ModelLoader, ) -> List[config.MetricsSpec]: """Returns metrics specs for metrics and losses associated with the model.""" model = model_loader.construct_fn() if model is None: return [] metric_names = [] metrics = [] if hasattr(model, 'loss_functions'): # Legacy keras metrics separate the losses from the metrics and store them # under loss_functions. The first name in metric_names is always 'loss' # followed by the loss_function names (prefixed by output_name if multiple # outputs) and then followed by the metric names (also prefixed by output # name). Note that names in loss_functions will not have any output name # prefixes (if used) while the metrics will so we need to use the names in # metric_names for matching with outputs not the names in the functions. metric_names = model.metrics_names metrics.extend(model.loss_functions) metrics.extend(model.metrics) if len(metric_names) > len(metrics) and metric_names[0] == 'loss': metric_names = metric_names[1:] elif hasattr(model, 'compiled_loss') and hasattr(model, 'compiled_metrics'): # In the new keras metric setup the metrics include the losses (in the form # of a metric type not a loss type) and the metrics_names align with the # names in the metric classes. The metrics itself contains compiled_loss, # compiled_metrics, and custom metrics (added via add_metric). Since we only # care about compiled metrics we use these APIs instead. Note that the # overall loss metric is an average of the other losses which doesn't take # y_true, y_pred as inputs so it can't be calculated via standard inputs so # we remove it. metrics.extend(model.compiled_loss.metrics[1:]) metrics.extend(model.compiled_metrics.metrics) metric_names = [m.name for m in metrics] specs = [] # Need to check if model.output_names exists because the keras Sequential # model doesn't always contain output_names (b/150510258). if hasattr(model, 'output_names') and len(model.output_names) > 1: unmatched_metrics = {m for m in metrics} for output_name in model.output_names: per_output_metrics = [] for (name, metric) in zip(metric_names, metrics): if name.startswith(output_name + '_'): per_output_metrics.append(metric) unmatched_metrics.remove(metric) if per_output_metrics: specs.extend( metric_specs.specs_from_metrics( metrics=per_output_metrics, model_names=[model_name], output_names=[output_name], include_example_count=False, include_weighted_example_count=False)) metrics = list(unmatched_metrics) if metrics: specs.extend( metric_specs.specs_from_metrics( metrics=metrics, model_names=[model_name], include_example_count=False, include_weighted_example_count=False)) return specs
fd471d20782507e983abec5610115e83c59ed7e0
2,056
def __main__(recipe, params): """ Main code: should only call recipe and params (defined from main) :param recipe: :param params: :return: """ # ---------------------------------------------------------------------- # Main Code # ---------------------------------------------------------------------- # This is just a test if 'TEXT' in params['INPUTS']: if params['INPUTS']['TEXT'] not in ['None', None, '']: WLOG(params, '', params['INPUTS']['TEXT']) # ---------------------------------------------------------------------- # End of main code # ---------------------------------------------------------------------- return core.return_locals(params, locals())
3e9fc1006457be759e1e0b05f36c00297f0c5f4c
2,057
def AICrss(n, k, rss): """Calculate the Akaike Information Criterion value, using: - n: number of observations - k: number of parameters - rss: residual sum of squares """ return n * log((2 * pi) / n) + n + 2 + n * log(rss) + 2 * k
988345930a8544d2979b99d6400198d3a59fa85c
2,058
from typing import Optional def get_username_from_access_token(token: str, secret_key: str, algorithm: str) -> Optional[str]: """ Decodes a token and returns the "sub" (= username) of the decoded token :param token: JWT access token :param secret_key: The secret key that should be used for token decoding :param algorithm: The algorith that should be used for token decoding (like HS256) :return: Username """ try: payload = jwt.decode(token, secret_key, algorithms=[algorithm]) username: str = payload.get("sub") if not username: raise credentials_exception return username except (JWTError, ExpiredSignatureError, JWTClaimsError): raise credentials_exception
461ce205b43961af25c77af4d3902d1342bba32a
2,061
def date_handler(obj): """make datetime object json serializable. Notes ----- Taken from here: https://tinyurl.com/yd84fqlw """ if hasattr(obj, 'isoformat'): return obj.isoformat() else: raise TypeError
741867e05e1b5f3e9d0e042b3b1576fb61ab0219
2,063
def has_type(typestmt, names): """Return type with name if `type` has name as one of its base types, and name is in the `names` list. otherwise, return None.""" if typestmt.arg in names: return typestmt for t in typestmt.search('type'): # check all union's member types r = has_type(t, names) if r is not None: return r typedef = getattr(typestmt, 'i_typedef', None) if typedef is not None and getattr(typedef, 'i_is_circular', None) is False: t = typedef.search_one('type') if t is not None: return has_type(t, names) return None
d534331df62f76efdcbb93be52eb57ee600a7783
2,064
def generic_ecsv(file_name, column_mapping=None, **kwargs): """ Read a spectrum from an ECSV file, using generic_spectrum_from_table_loader() to try to figure out which column is which. The ECSV columns must have units, as `generic_spectrum_from_table_loader` depends on this to determine the meaning of the columns. For manual control over the column to spectrum mapping, use the ASCII loader. Parameters ---------- file_name: str The path to the ECSV file. column_mapping : dict A dictionary describing the relation between the ECSV file columns and the arguments of the `Spectrum1D` class, along with unit information. The dictionary keys should be the ECSV file column names while the values should be a two-tuple where the first element is the associated `Spectrum1D` keyword argument, and the second element is the unit for the ECSV file column:: column_mapping = {'FLUX': ('flux': 'Jy')} Returns ------- data: Spectrum1D The spectrum that is represented by the data in this table. """ table = Table.read(file_name, format='ascii.ecsv') if column_mapping is None: return generic_spectrum_from_table(table, **kwargs) return spectrum_from_column_mapping(table, column_mapping)
0c9ac3a8d31a449e698907e02ad4715868844403
2,065
def parse_valuation_line(s, encoding=None): """ Parse a line in a valuation file. Lines are expected to be of the form:: noosa => n girl => {g1, g2} chase => {(b1, g1), (b2, g1), (g1, d1), (g2, d2)} :param s: input line :type s: str :param encoding: the encoding of the input string, if it is binary :type encoding: str :return: a pair (symbol, value) :rtype: tuple """ if encoding is not None: s = s.decode(encoding) pieces = _VAL_SPLIT_RE.split(s) symbol = pieces[0] value = pieces[1] # check whether the value is meant to be a set if value.startswith('{'): value = value[1:-1] tuple_strings = _TUPLES_RE.findall(value) # are the set elements tuples? if tuple_strings: set_elements = [] for ts in tuple_strings: ts = ts[1:-1] element = tuple(_ELEMENT_SPLIT_RE.split(ts)) set_elements.append(element) else: set_elements = _ELEMENT_SPLIT_RE.split(value) value = set(set_elements) return symbol, value
aebd7ca9e4e321069a04536f281230b5cd23cceb
2,066
import requests from bs4 import BeautifulSoup from datetime import datetime def scrape_dailykos(keywords=KEYWORDS): """ Scrapes news article titles from dailykos.com """ dk_request = requests.get('https://www.dailykos.com') dk_homepage = dk_request.content dk_soup = BeautifulSoup(dk_homepage, 'html.parser') dk_tags = dk_soup.find_all('div', class_='cell-wrapper') dk_links = ['https://www.dailykos.com' + tag.find('a')['href'] for tag in dk_tags] dk_links = [link for link in dk_links if any(keyword in link for keyword in keywords)] # get article titles and dates dk_titles = [] dk_dates = [] for link in dk_links: # prep article content article = requests.get(link) article_content = article.content soup_article = BeautifulSoup(article_content, 'html5lib') # get article title dk_titles.append(soup_article.find('title').get_text()) # get publication date date = str(soup_article.find('span', class_='timestamp')) dk_dates.append(date[len(date) - 21:-7]) # format dates dk_dates = [datetime.datetime.strptime(date, '%B %d, %Y').strftime('%Y-%m-%d') for date in dk_dates] # assembling data dailykos_data = pd.DataFrame.from_dict({ 'publisher': 'dailykos', 'date': dk_dates, 'link': dk_links, 'article_title': dk_titles }) dailykos_data.drop_duplicates(inplace=True) return dailykos_data
a6b5cbffce87f75c7561bc8939247f80bb10ae11
2,067
def parse_rows(m: utils.Matrix[str]) -> pd.DataFrame: """Parse rows to DataFrame, expecting specific columns and types.""" if len(m) < 2: logger.error('More than one line expected in {}'.format(str(m))) return pd.DataFrame() # parse data rows and add type casting cols = len(m[0]) df = pd.DataFrame([row for row in m[1:] if len(row) == cols], columns=m[0]) pairs = (('Market Value', utils.str_to_float), ('Weight (%)', utils.str_to_float), ('Notional Value', utils.str_to_float), ('Shares', utils.str_to_int), ('Price', utils.str_to_float), ('FX Rate', utils.str_to_float), ('Accrual Date', utils.parse_date_name) ) for col, f in pairs: try: df[col] = df[col].apply(f) except Exception as e: logger.error('Error when casting {}: {}'.format(col, e)) return df
46749bccf7af71256e1f1d490e1a2f241ed0c4d9
2,068
import base64 import struct def tiny_id(page_id): """Return *tiny link* ID for the given page ID.""" return base64.b64encode(struct.pack('<L', int(page_id)).rstrip(b'\0'), altchars=b'_-').rstrip(b'=').decode('ascii')
1a37b814ff9845949c3999999b61f79b26dacfdc
2,069