content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
from pathlib import Path import os import logging def mnist_10K_cluster(dataset_dir: Path) -> bool: """ Abstract: The MNIST database of handwritten digits with 784 features. It can be split in a training set of the first 60,000 examples, and a test set of 10,000 examples Source: Yann LeCun, Corinna Cortes, Christopher J.C. Burges http://yann.lecun.com/exdb/mnist/ Clustering task. n_classes = 10. mnist x clustering dataset (10000, 785) """ dataset_name = 'mnist_10K_cluster' os.makedirs(dataset_dir, exist_ok=True) nrows_train, dtype = 10000, np.float32 X, y = fetch_openml(name='mnist_784', return_X_y=True, as_frame=True, data_home=dataset_dir) y = y.astype(int) logging.info(f'{dataset_name} is loaded, started parsing...') x_train = np.ascontiguousarray(X.values[:nrows_train, 1:], dtype=dtype) y_train = np.ascontiguousarray(y.values[:nrows_train], dtype=dtype) filename = f'{dataset_name}.npy' data = np.concatenate((x_train, y_train[:, None]), axis=1) np.save(os.path.join(dataset_dir, filename), data) logging.info(f'dataset {dataset_name} is ready.') return True
1500387fca6b8b6528fb391649567fa8fe616197
3,652,500
def molefraction_2_pptv(n): """Convert mixing ratio units from mole fraction to parts per thousand by volume (pptv) INPUTS n: mole fraction (moles per mole air) OUTPUTS q: mixing ratio in parts per trillion by volume (pptv) """ # - start with COS mixing ratio n as mole fraction: # (n mol COS) / (mol air) # convert to mixing ratio as volumetric fraction # = (n * 6.023 * 10^23 molecules COS) / (6.023 * 10^23 molecules air) # = (q molecules COS) / (1000 molecules air) # q is mixing ratio in pptv, n is mole fraction # solve for q --> 1000n = q # therefore pptv = 1000 * mole fraction q = 1e3 * n return(q)
a6a26267f45fb70c346e86421c427bd155bfa65a
3,652,501
import warnings def is_valid_y(y, warning=False, throw=False, name=None): """ """ y = np.asarray(y, order='c') valid = True try: if len(y.shape) != 1: if name: raise ValueError(('Condensed distance matrix \'%s\' must ' 'have shape=1 (i.e. be one-dimensional).') % name) else: raise ValueError('Condensed distance matrix must have shape=1 ' '(i.e. be one-dimensional).') n = y.shape[0] d = int(np.ceil(np.sqrt(n * 2))) if (d * (d - 1) / 2) != n: if name: raise ValueError(('Length n of condensed distance matrix ' '\'%s\' must be a binomial coefficient, i.e.' 'there must be a k such that ' '(k \\choose 2)=n)!') % name) else: raise ValueError('Length n of condensed distance matrix must ' 'be a binomial coefficient, i.e. there must ' 'be a k such that (k \\choose 2)=n)!') except Exception as e: if throw: raise if warning: warnings.warn(str(e)) valid = False return valid
6c3f56c8b931b325d521805902b526054f21e22d
3,652,502
import json import yaml def yaml_parse(yamlstr): """Parse a yaml string""" try: # PyYAML doesn't support json as well as it should, so if the input # is actually just json it is better to parse it with the standard # json parser. return json.loads(yamlstr) except ValueError: yaml.SafeLoader.add_multi_constructor( "!", intrinsics_multi_constructor) return yaml.safe_load(yamlstr)
8586e1e39ae9f0933b6552531d72cb3a6516f615
3,652,503
import collections def csl_item_from_pubmed_article(article): """ article is a PubmedArticle xml element tree https://github.com/citation-style-language/schema/blob/master/csl-data.json """ csl_item = collections.OrderedDict() if not article.find("MedlineCitation/Article"): raise NotImplementedError("Unsupported PubMed record: no <Article> element") title = article.findtext("MedlineCitation/Article/ArticleTitle") if title: csl_item["title"] = title volume = article.findtext("MedlineCitation/Article/Journal/JournalIssue/Volume") if volume: csl_item["volume"] = volume issue = article.findtext("MedlineCitation/Article/Journal/JournalIssue/Issue") if issue: csl_item["issue"] = issue page = article.findtext("MedlineCitation/Article/Pagination/MedlinePgn") if page: csl_item["page"] = page journal = article.findtext("MedlineCitation/Article/Journal/Title") if journal: csl_item["container-title"] = journal journal_short = article.findtext("MedlineCitation/Article/Journal/ISOAbbreviation") if journal_short: csl_item["container-title-short"] = journal_short issn = article.findtext("MedlineCitation/Article/Journal/ISSN") if issn: csl_item["ISSN"] = issn date_parts = extract_publication_date_parts(article) if date_parts: csl_item["issued"] = {"date-parts": [date_parts]} authors_csl = list() authors = article.findall("MedlineCitation/Article/AuthorList/Author") for author in authors: author_csl = collections.OrderedDict() given = author.findtext("ForeName") if given: author_csl["given"] = given family = author.findtext("LastName") if family: author_csl["family"] = family authors_csl.append(author_csl) if authors_csl: csl_item["author"] = authors_csl for id_type, key in ("pubmed", "PMID"), ("pmc", "PMCID"), ("doi", "DOI"): xpath = f"PubmedData/ArticleIdList/ArticleId[@IdType='{id_type}']" value = article.findtext(xpath) if value: csl_item[key] = value.lower() if key == "DOI" else value abstract = article.findtext("MedlineCitation/Article/Abstract/AbstractText") if abstract: csl_item["abstract"] = abstract csl_item["URL"] = f"https://www.ncbi.nlm.nih.gov/pubmed/{csl_item['PMID']}" csl_item["type"] = "article-journal" return csl_item
889bb8bbbafd85607936db7caeb33140c4e356fb
3,652,504
import dateutil def image(resource: celtypes.MapType) -> celtypes.Value: """ Reach into C7N to get the image details for this EC2 or ASG resource. Minimally, the creation date is transformed into a CEL timestamp. We may want to slightly generalize this to json_to_cell() the entire Image object. The following may be usable, but it seems too complex: :: C7N.filter.prefetch_instance_images(C7N.policy.resources) image = C7N.filter.get_instance_image(resource["ImageId"]) return json_to_cel(image) .. todo:: Refactor C7N Provide the :py:class:`InstanceImageBase` mixin in a :py:class:`CELFilter` class. We want to have the image details in the new :py:class:`CELFilter` instance. """ # Assuming the :py:class:`CELFilter` class has this method extracted from the legacy filter. # Requies the policy already did this: C7N.filter.prefetch_instance_images([resource]) to # populate cache. image = C7N.filter.get_instance_image(resource) if image: creation_date = image["CreationDate"] image_name = image["Name"] else: creation_date = "2000-01-01T01:01:01.000Z" image_name = "" return json_to_cel( {"CreationDate": dateutil.parser.isoparse(creation_date), "Name": image_name} )
f5125f6b5afb070d7aa7767a4acdb4afc82c90b8
3,652,505
def unphase_uvw(ra, dec, uvw): """ Calculate unphased uvws/positions from phased ones in an icrs or gcrs frame. This code expects phased uvws or positions in the same frame that ra/dec are in (e.g. icrs or gcrs) and returns unphased ones in the same frame. Parameters ---------- ra : float Right ascension of phase center. dec : float Declination of phase center. uvw : ndarray of float Phased uvws or positions relative to the array center, shape (Nlocs, 3). Returns ------- unphased_uvws : ndarray of float Unphased uvws or positions relative to the array center, shape (Nlocs, 3). """ if uvw.ndim == 1: uvw = uvw[np.newaxis, :] return _utils._unphase_uvw( np.float64(ra), np.float64(dec), np.ascontiguousarray(uvw, dtype=np.float64), )
a6e3d1371ed612bd1ece08fc6fabd4ee77241603
3,652,506
import uuid def sender_msg_to_array(msg): """ Parse a list argument as returned by L{array_to_msg} function of this module, and returns the numpy array contained in the message body. @param msg: a list as returned by L{array_to_msg} function @rtype: numpy.ndarray @return: The numpy array contained in the message """ [_dtype, _shape, _bin_msg] = msg_to_array(msg[2:]) _uuid = uuid.UUID(bytes=msg[0]) _data_name = msg[1].decode() return (_uuid, _data_name, _dtype, _shape, _bin_msg)
c959a535a4f47c86f9520167fa59dc8eecc70071
3,652,507
def find_shortest_path(node): """Finds shortest path from node to it's neighbors""" next_node,next_min_cost=node.get_min_cost_neighbor() if str(next_node)!=str(node): return find_shortest_path(next_node) else: return node
4fa3979ff665b5cf8df423ff9b3fbaa880d62a73
3,652,508
from .features import Sequence, get_nested_type def cast_array_to_feature(array: pa.Array, feature: "FeatureType", allow_number_to_str=True): """Cast an array to the arrow type that corresponds to the requested feature type. For custom features like Audio or Image, it takes into account the "cast_storage" methods they defined to enable casting from other arrow types. Args: array (pa.Array): the PyArrow array to cast feature (FeatureType): the target feature type allow_number_to_str (bool, default ``True``): Whether to allow casting numbers to strings. Defaults to True. Raises: pa.ArrowInvalidError: if the arrow data casting fails TypeError: if the target type is not supported according, e.g. - if a field is missing = if casting from numbers to strings and allow_number_to_str is False Returns: pa.Array: the casted array """ _c = partial(cast_array_to_feature, allow_number_to_str=allow_number_to_str) if isinstance(array, pa.ExtensionArray): array = array.storage if hasattr(feature, "cast_storage"): return feature.cast_storage(array) elif pa.types.is_struct(array.type): # feature must be a dict or Sequence(subfeatures_dict) if isinstance(feature, Sequence) and isinstance(feature.feature, dict): feature = { name: Sequence(subfeature, length=feature.length) for name, subfeature in feature.feature.items() } if isinstance(feature, dict) and set(field.name for field in array.type) == set(feature): arrays = [_c(array.field(name), subfeature) for name, subfeature in feature.items()] return pa.StructArray.from_arrays(arrays, names=list(feature), mask=array.is_null()) elif pa.types.is_list(array.type): # feature must be either [subfeature] or Sequence(subfeature) if isinstance(feature, list): return pa.ListArray.from_arrays(array.offsets, _c(array.values, feature[0])) elif isinstance(feature, Sequence): if feature.length > -1: if feature.length * len(array) == len(array.values): return pa.FixedSizeListArray.from_arrays(_c(array.values, feature.feature), feature.length) else: return pa.ListArray.from_arrays(array.offsets, _c(array.values, feature.feature)) elif pa.types.is_fixed_size_list(array.type): # feature must be either [subfeature] or Sequence(subfeature) if isinstance(feature, list): return pa.ListArray.from_arrays(array.offsets, _c(array.values, feature[0])) elif isinstance(feature, Sequence): if feature.length > -1: if feature.length * len(array) == len(array.values): return pa.FixedSizeListArray.from_arrays(_c(array.values, feature.feature), feature.length) else: offsets_arr = pa.array(range(len(array) + 1), pa.int32()) return pa.ListArray.from_arrays(offsets_arr, _c(array.values, feature.feature)) if pa.types.is_null(array.type): return array_cast(array, get_nested_type(feature), allow_number_to_str=allow_number_to_str) elif not isinstance(feature, (Sequence, dict, list, tuple)): return array_cast(array, feature(), allow_number_to_str=allow_number_to_str) raise TypeError(f"Couldn't cast array of type\n{array.type}\nto\n{feature}")
28c3275445a79e869b8dfe5ec49522ff10ca6842
3,652,509
def check_key_match(config_name): """ Check key matches @param config_name: Name of WG interface @type config_name: str @return: Return dictionary with status """ data = request.get_json() private_key = data['private_key'] public_key = data['public_key'] return jsonify(f_check_key_match(private_key, public_key, config_name))
00a3e78e403a54b16558e21e2c6d095560f272d0
3,652,510
def delete_user_group(request, group_id, *args, **kwargs): """This one is not really deleting the group object, rather setting the active status to False (delete) which can be later restored (undelete) )""" try: hydroshare.set_group_active_status(request.user, group_id, False) messages.success(request, "Group delete was successful.") except PermissionDenied: messages.error(request, "Group delete errors: You don't have permission to delete" " this group.") return HttpResponseRedirect(request.META['HTTP_REFERER'])
acad59484befdc5448031343aad47989e9c67d64
3,652,511
from typing import List def _generate_room_square(dungen: DungeonGenerator, room_data: RoomConceptData) -> RoomConcept: """ Generate a square-shaped room. """ map_width = dungen.map_data.width map_height = dungen.map_data.height # ensure not bigger than the map room_width = min(dungen.rng.randint(room_data.min_width, room_data.max_width), map_width) room_height = min(dungen.rng.randint(room_data.min_height, room_data.max_height), map_height) # populate area with floor categories tile_categories: List[List[TileCategoryType]] = [] for x in range(room_width): tile_categories.append([]) for y in range(room_height): tile_categories[x].append(TileCategory.FLOOR) # convert to room room = RoomConcept(tile_categories=tile_categories, design="square", key=room_data.key) return room
d147f64aed8491ce9b4714f61b64f971683d913e
3,652,512
import json import os import torch def query(request): """ 响应前端返回的数据并进行相应的推荐 :param request: :return: """ content = {} if request.method=='POST': datatype = json.loads(request.body.decode('utf-8')) #得到前端返回的数据 province_all = datatype['all'] current_loc = datatype['currentLocation'] # 得到当前的省份,以这份省份为基准点算出经纬度 if province_all == 'true': #如果判断是全国的话,就是会直接将全国的数据返回 provinces_loc = province # provinces_loc_sorted = province else: provinces_loc = datatype['regions'] # 得到所需要的大学 distance = [] for i in range(len(provinces_loc)): distance.append(cal_distance(current_loc, provinces_loc[i])) provinces_loc_sorted = [x['pos'] for y, x in sorted(zip(distance, provinces_loc))] # 排序后的省份,按照距离来进行排序 colleges = [] rank = (datatype['rank']) #获得排名 category = datatype['category'] #获得类别 if len(rank) == 0 or len(category) == 0 or len(provinces_loc) == 0 :#如果有一个是空则返回500状态码 return JsonResponse({ 'status_code':500 }) else: rank = float(rank)#将rank变成float型来方便判断 temp=[] data={} col_majors = {} if category=='理科': #判断是否为理科 pdir = os.path.dirname(os.getcwd()) # 获取父目录 file = os.path.join(pdir, 'python14\江苏理科字典.pt') predicted_data = torch.load(file) #导入理科的预测的排名 for province_loc in provinces_loc_sorted: colleges_carrier = models.Colleges.objects.filter(provinceID=province.index(province_loc))#找到学校 for college in colleges_carrier: major_carrier = models.Majors.objects.filter(provinceID=4,collegeID=college.collegeID, categoryID=2)#找到专业 if len(major_carrier)!= 0: majors_ranks = predicted_data[college.collegeName]#从预测数据中找到rank for major_rank in majors_ranks: possibility = cal_possibility(float(rank), float(major_rank['rank']), float(major_rank['cov'])) # 计算相应的概率 if possibility>=0.2:#设置了一个推荐的阈值来控制推荐数量 data['major'] = major_rank['major'] data['rank'] = major_rank['rank'] data['possibility'] = round(possibility*5,3) temp.append(data.copy()) if len(temp) !=0:#如果不为空则可以代表有推荐的学校和专业 col_majors[college.collegeName]=temp[:] colleges.append(college.collegeName) temp.clear() if category == '文科': pdir = os.path.dirname(os.getcwd()) # 获取父目录 file = os.path.join(pdir, 'python14\江苏文科字典.pt') predicted_data = torch.load(file) for province_loc in provinces_loc_sorted: colleges_carrier = models.Colleges.objects.filter(provinceID=province.index(province_loc)) for college in colleges_carrier: major_carrier = models.Majors.objects.filter(provinceID=4,collegeID=college.collegeID,categoryID=1) if len(major_carrier)!=0: majors_ranks = predicted_data[college.collegeName] for major_rank in majors_ranks: possibility = cal_possibility(float(rank), float(major_rank['rank']), float(major_rank['cov'])) if possibility>=0.2: data['major'] = major_rank['major'] data['rank'] = major_rank['rank'] data['possibility'] = round(possibility*5,3) temp.append(data.copy()) if len(temp) != 0: col_majors[college.collegeName] = temp[:] colleges.append(college.collegeName) temp.clear() content={ 'colleges':colleges, 'status_code':200, 'col_majors':col_majors } return JsonResponse(content)
c44b9d18d564dca1b544429b8d4dba1a7ab8a568
3,652,513
def str_is_float(value): """Test if a string can be parsed into a float. :returns: True or False """ try: _ = float(value) return True except ValueError: return False
08f2e30f134479137052fd821e53e050375cd11e
3,652,514
import sys from pathlib import Path import os import requests import fnmatch import tempfile import shutil import json def get_from_cache(url, cache_dir=None, force_download=False, proxies=None, etag_timeout=10, resume_download=False): #for bert-based-uncased, url is https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-config.json #also, cache_dir is the following: /Users/msanatkar/.cache/torch/transformers """ Given a URL, look for the corresponding dataset in the local cache. If it's not there, download it. Then return the path to the cached file. """ if cache_dir is None: cache_dir = TRANSFORMERS_CACHE #TRANSFORMERS_CACHE is equal to /Users/msanatkar/.cache/torch/transformers if sys.version_info[0] == 3 and isinstance(cache_dir, Path): cache_dir = str(cache_dir) #sys.version_info[0] returns the Python version if sys.version_info[0] == 2 and not isinstance(cache_dir, str): cache_dir = str(cache_dir) if not os.path.exists(cache_dir): os.makedirs(cache_dir) #cache_dir is equal to /Users/msanatkar/.cache/torch/transformers # Get eTag to add to filename, if it exists. if url.startswith("s3://"): #url for BERT starts with https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-config.json so it doesn't satisfy this of condition etag = s3_etag(url, proxies=proxies) else: try: response = requests.head(url, allow_redirects=True, proxies=proxies, timeout=etag_timeout) #head method make a head request to a webpage and returns the HTTP header if response.status_code != 200: #response code 200 refers to an OK response and no error etag = None else: etag = response.headers.get("ETag") #The ETag HTTP response header is an identifier for a specific version of a resource. It lets caches be more efficient and save bandwidth, #as a web server does not need to resend a full response if the content has not changed #ETage for bert-base-uncased is 74d4f96fdabdd865cbdbe905cd46c1f1 except (EnvironmentError, requests.exceptions.Timeout): etag = None if sys.version_info[0] == 2 and etag is not None: etag = etag.decode('utf-8') filename = url_to_filename(url, etag) #etag for bert-base-uncased is 74d4f96fdabdd865cbdbe905cd46c1f1 and url is the following: #https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-config.json #filaname will be a str that is concatenation of the hashcode of the urlpath and the hashcode of the etag str # get cache path to put the file cache_path = os.path.join(cache_dir, filename) #cache_dir is equal to /Users/msanatkar/.cache/torch/transformers #cache_path for bert-base-uncased is the following: #/Users/msanatkar/.cache/torch/transformers/4dad0251492946e18ac39290fcfe91b89d370fee250efe9521476438fe8ca185.bf3b9ea126d8c0001ee8a1e8b92229871d06d36d8808208cc2449280da87785c # If we don't have a connection (etag is None) and can't identify the file # try to get the last downloaded one if not os.path.exists(cache_path) and etag is None: #here, in this if condition, we are saying if cache_path doesn't exist which means that we never downloaded this json config file in .cache before #and if we do not have access to the internet wihch is confirmed by etag being None, then, we try to find to get the latest downloaded one matching_files = fnmatch.filter(os.listdir(cache_dir), filename + '.*') #os.listdir will return all the files and directories in cache_dir which is /Users/msanatkar/.cache/torch/transormers #in above, fnmatch returns a sublist of files returned by listdir that matches the hash-based filename corresponding to this config json file matching_files = list(filter(lambda s: not s.endswith('.json'), matching_files)) #in above, we only choose those files that do not end with ".json". It seems that for every encoder model, there exist two files in .cache #one of them is a josn file which will be the json config file describing the architecture of that model and the other one does not end with #json that must contain the weigths of the network if matching_files: cache_path = os.path.join(cache_dir, matching_files[-1]) if resume_download: #resume_download is for those cases that for some reason the downloading process of the files was interupted before and here we want to resume the #download instead of starting from scratch incomplete_path = cache_path + '.incomplete' @contextmanager def _resumable_file_manager(): with open(incomplete_path,'a+b') as f: yield f os.remove(incomplete_path) temp_file_manager = _resumable_file_manager if os.path.exists(incomplete_path): resume_size = os.stat(incomplete_path).st_size else: resume_size = 0 else: temp_file_manager = tempfile.NamedTemporaryFile #here, temp_file_manager will be a temporary file that later on when the download is complete can be moved to the actual cache folder resume_size = 0 #in below, we download the config file either if we didn't downlaod it before or the option force_download is True. Note: we never enable #force_download because we are not crazy! if not os.path.exists(cache_path) or force_download: # Download to temporary file, then copy to cache dir once finished. # Otherwise you get corrupt cache entries if the download gets interrupted. with temp_file_manager() as temp_file: logger.info("%s not found in cache or force_download set to True, downloading to %s", url, temp_file.name) # GET file object if url.startswith("s3://"): #for huggingface files, they don't start with s3 if resume_download: logger.warn('Warning: resumable downloads are not implemented for "s3://" urls') s3_get(url, temp_file, proxies=proxies) else: #http_get downloads the file and writes its content into temp_file http_get(url, temp_file, proxies=proxies, resume_size=resume_size)#resume_size will be zero if we didn't enable resume option #here, url refer to a json config file .json # we are copying the file before closing it, so flush to avoid truncation temp_file.flush() #flush method ensures that all the buffered data, are written into file # shutil.copyfileobj() starts at the current position, so go to the start temp_file.seek(0) logger.info("copying %s to cache at %s", temp_file.name, cache_path) with open(cache_path, 'wb') as cache_file: shutil.copyfileobj(temp_file, cache_file) #I believe cache_path here doesn't end with .json. In particular, if you look into .cache/torch/transformers, then there are bunch #of different resources which all of them have similar names hash(model_name).hash(url) with no .json suffix. Some of these files are #simply json config files of models and the other could be other resources like the weigths files. The json files inside the cache folder #reperesent the url path of the resource as well as the etag version. below, you can find how this json meta file is created! logger.info("creating metadata file for %s", cache_path) meta = {'url': url, 'etag': etag} meta_path = cache_path + '.json' with open(meta_path, 'w') as meta_file: output_string = json.dumps(meta) if sys.version_info[0] == 2 and isinstance(output_string, str): output_string = unicode(output_string, 'utf-8') # The beauty of python 2 meta_file.write(output_string) logger.info("removing temp file %s", temp_file.name) return cache_path
d2c52e32bf344ccc4466478f15418e954bbf35db
3,652,515
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: """Set up Ruckus Unleashed from a config entry.""" try: ruckus = await hass.async_add_executor_job( Ruckus, entry.data[CONF_HOST], entry.data[CONF_USERNAME], entry.data[CONF_PASSWORD], ) except ConnectionError as error: raise ConfigEntryNotReady from error coordinator = RuckusUnleashedDataUpdateCoordinator(hass, ruckus=ruckus) await coordinator.async_refresh() if not coordinator.last_update_success: raise ConfigEntryNotReady hass.data[DOMAIN][entry.entry_id] = { COORDINATOR: coordinator, UNDO_UPDATE_LISTENERS: [], } for platform in PLATFORMS: hass.async_create_task( hass.config_entries.async_forward_entry_setup(entry, platform) ) return True
567af55b3f46c2b5e2ef5204dbe85fabb87c9a74
3,652,516
def get_user_plugins_grouped(get_allowed_plugin_uids_func, get_registered_plugins_grouped_func, registry, user, sort_items=True): """Get user plugins grouped. :param callable get_allowed_plugin_uids_func: :param callable get_registered_plugins_grouped_func: :param fobi.base.BaseRegistry registry: Subclass of ``fobi.base.BaseRegistry`` instance. :param django.contrib.auth.models.User user: :param bool sort_items: :return dict: """ ensure_autodiscover() if not RESTRICT_PLUGIN_ACCESS or getattr(user, 'is_superuser', False): return get_registered_plugins_grouped_func() registered_plugins = {} allowed_plugin_uids = get_allowed_plugin_uids_func(user) for uid, plugin in registry._registry.items(): if uid in allowed_plugin_uids: if PY3: plugin_name = force_text(plugin.name, encoding='utf-8') plugin_group = force_text(plugin.group, encoding='utf-8') else: plugin_name = force_text( plugin.name, encoding='utf-8' ).encode('utf-8') plugin_group = force_text( plugin.group, encoding='utf-8' ).encode('utf-8') if plugin_group not in registered_plugins: registered_plugins[plugin_group] = [] registered_plugins[plugin_group].append((uid, plugin_name)) if sort_items: for key, prop in registered_plugins.items(): prop.sort() return registered_plugins
f355738b99f503568a35945e1008f84145569b62
3,652,517
def calc_randnm7(reg_dict, mlx75027): """ Calculate the RANDMN7 register value Parameters ---------- reg_dict : dict The dictionary that contains all the register information mlx75027 : bool Set to True if using the MLX75027 sensor, False if using the MLX75026 sensor. Returns ---------- randnm7 : int The randnm7 register value """ # print("calc_randnm7()") speed = calc_speed(reg_dict, mlx75027) hmax = calc_hmax(reg_dict, mlx75027, speed=speed) pretime_enabled = np.any( reg_dict["Px_PREHEAT"][2] | reg_dict["Px_PREMIX"][2]) if pretime_enabled: px_pretime = calc_pretime(reg_dict, mlx75027) # As noted in 7.12. can be calculated as: 1070 + HMAX * FLOOR( ((Px_PRETIME(in us)−11.13) / HMAX )* 120), with Px_PRETIME >= 11.13 if px_pretime >= 11.13: randnm7 = 1070 + hmax * np.floor(((px_pretime-11.13)/hmax) * 120) else: randnm7 = 1070 else: randnm7 = 1070 return int(randnm7)
898c4662f045fbcbe655870a8d5642de92debcaf
3,652,518
def get_orientation(pose, ori): """Generate an orientation vector from yaw/pitch/roll angles in radians.""" yaw, pitch, roll = pose c1 = np.cos(-yaw) s1 = np.sin(-yaw) c2 = np.cos(-pitch) s2 = np.sin(-pitch) c3 = np.cos(-roll) s3 = np.sin(-roll) Ryaw = np.array([[c1, s1, 0], [-s1, c1, 0], [0, 0, 1]]) Rpitch = np.array([[c2, 0, -s2], [0, 1, 0], [s2, 0, c2]]) Rroll = np.array([[1, 0, 0], [0, c3, s3], [0, -s3, c3]]) R = np.dot(Ryaw, np.dot(Rpitch, Rroll)) n = np.dot(R, ori) return n
d00cc9befde7afd28b66c572116fb1234e109367
3,652,519
import copy def draw_deformation(source_image, grid, grid_size = 12): """ source_image: PIL image object sample_grid: the sampling grid grid_size: the size of drawing grid """ im = copy.deepcopy(source_image) d = ImageDraw.Draw(im) H,W = source_image.size dist =int(H/grid_size) for i in range(grid_size): step = int(dist*i) d.line(list(zip((grid[0,step,:,0].numpy()+1)/2*H, (grid[0,step,:,1].numpy()+1)/2*H)),fill = 255,width=1) d.line(list(zip((grid[0,:,step,0].numpy()+1)/2*H, (grid[0,:,step,1].numpy()+1)/2*H)),fill = 255,width=1) return im
ec9c6b90e89221789ecba55e2c2360ccd24f9c8c
3,652,520
def dial_socket(host='localhost', port): """ Connect to the socket created by the server instance on specified host and port """ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect((host, port)) return sock
555c90f05cdf0feda97d5db160dd048542e03376
3,652,521
def analyseClassificationCoefficients(X: pd.DataFrame, y: pd.Series, D_learning_results: pd.DataFrame, outputPath: str) -> dict: """ This function evaluates the importance coefficients of the input features of a model Args: X (pd.DataFrame): Input pandas dataFrame. y (pd.Series): Input pandas series sith target label. D_learning_results (pd.DataFrame): Results dataframe obstained from a grid search (analytics.learning.grids). outputPath (str): Output filename path to save the results. Returns: dict: DESCRIPTION. """ output_figures = {} # define the confusion matrix x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=0) for index, row in D_learning_results.iterrows(): y_pred = row['MODEL'].predict(x_test) cm = confusion_matrix(y_test, y_pred) # plot the confusion matrix fig = plt.figure(figsize=(9, 9)) ax = fig.gca() sns.heatmap(cm, annot=True, fmt=".3f", linewidths=.5, square=True, cmap='Blues_r') plt.ylabel('Actual label') plt.xlabel('Predicted label') ax.set(xticks=np.arange(cm.shape[1]), yticks=np.arange(cm.shape[0])) ax.set_xticklabels(labels=row['MODEL'].classes_, rotation=45) ax.set_yticklabels(labels=row['MODEL'].classes_, rotation=45) all_sample_title = 'Accuracy Score: {0}'.format(np.round(row['SCORE_TEST'], 2)) plt.title(f"Model: {row['MODEL_NAME']}, {all_sample_title}", size=15) output_figures[f"{row['MODEL_NAME']}_confusionMatrix"] = fig # analyse output for QDA if row['MODEL_NAME'] == 'quadratic_discriminant_analysis': # Print the mean for each class # create a dataframe with one row for each feature of X features_list = list(X.columns) # extract coefficients riprendere da qui fig = plt.figure(figsize=(12, 10)) means = row['MODEL'].means_ means_scaled = scale(means) plt.imshow(means_scaled, cmap='bwr') ax = fig.gca() # set xticks ax.set_xticks(range(0, len(features_list))) ax.set_xticklabels(features_list, rotation=90) # set yticks ax.set_yticks(range(0, len(row['MODEL'].classes_))) ax.set_yticklabels(row['MODEL'].classes_, rotation=45) plt.colorbar() plt.xlabel('Feature name') plt.ylabel('Classes') plt.title('QDA means per class') output_figures[f"{row['MODEL_NAME']}_means"] = fig # analyse output for LDA elif row['MODEL_NAME'] == 'linear_discriminant_analysis': # Print coefficients # create a dataframe with one row for each feature of X features_list = list(X.columns) # extract coefficients riprendere da qui fig = plt.figure(figsize=(12, 10)) coefficients = row['MODEL'].coef_ coefficients_scaled = scale(coefficients) plt.imshow(coefficients_scaled, cmap='bwr') ax = fig.gca() # set xticks ax.set_xticks(range(0, len(features_list))) ax.set_xticklabels(features_list, rotation=90) # set yticks ax.set_yticks(range(0, len(row['MODEL'].classes_))) ax.set_yticklabels(row['MODEL'].classes_, rotation=45) plt.colorbar() plt.xlabel('Feature name') plt.ylabel('Classes') plt.title('LDA coefficients') output_figures[f"{row['MODEL_NAME']}_coefficients"] = fig # analyse output for logistic regression elif row['MODEL_NAME'] == 'logistic_regression': # Print coefficients # create a dataframe with one row for each feature of X features_list = list(X.columns) # extract coefficients riprendere da qui fig = plt.figure(figsize=(12, 10)) coefficients = row['MODEL'].coef_ coefficients_scaled = scale(coefficients) plt.imshow(coefficients_scaled, cmap='bwr') ax = fig.gca() # set xticks ax.set_xticks(range(0, len(features_list))) ax.set_xticklabels(features_list, rotation=90) # set yticks ax.set_yticks(range(0, len(row['MODEL'].classes_))) ax.set_yticklabels(row['MODEL'].classes_, rotation=45) plt.colorbar() plt.xlabel('Feature name') plt.ylabel('Classes') plt.title('Logistic regression coefficients') output_figures[f"{row['MODEL_NAME']}_coefficients"] = fig elif row['MODEL_NAME'] == 'naive bayes': # Print coefficients # create a dataframe with one row for each feature of X features_list = list(X.columns) # print variance fig = plt.figure(figsize=(12, 10)) coefficients = row['MODEL'].sigma_ coefficients_scaled = scale(coefficients) plt.imshow(coefficients_scaled, cmap='bwr') ax = fig.gca() # set xticks ax.set_xticks(range(0, len(features_list))) ax.set_xticklabels(features_list, rotation=90) # set yticks ax.set_yticks(range(0, len(row['MODEL'].classes_))) ax.set_yticklabels(row['MODEL'].classes_, rotation=45) plt.colorbar() plt.xlabel('Feature name') plt.ylabel('Classes') plt.title('Naive bayes sigma') output_figures[f"{row['MODEL_NAME']}_sigma"] = fig # print mean fig = plt.figure(figsize=(12, 10)) coefficients = row['MODEL'].theta_ coefficients_scaled = scale(coefficients) plt.imshow(coefficients_scaled, cmap='bwr') ax = fig.gca() # set xticks ax.set_xticks(range(0, len(features_list))) ax.set_xticklabels(features_list, rotation=90) # set yticks ax.set_yticks(range(0, len(row['MODEL'].classes_))) ax.set_yticklabels(row['MODEL'].classes_, rotation=45) plt.colorbar() plt.xlabel('Feature name') plt.ylabel('Classes') plt.title('Naive bayes theta') output_figures[f"{row['MODEL_NAME']}_theta"] = fig elif row['MODEL_NAME'] == 'decision tree': # Print coefficients # create a dataframe with one row for each feature of X features_list = list(X.columns) # print variance fig = plt.figure(figsize=(12, 10)) coefficients = row['MODEL'].feature_importances_ # coefficients_scaled = scale(coefficients) plt.bar(features_list, coefficients) ax = fig.gca() # set xticks # ax.set_xticks(range(0,len(features_list))) ax.set_xticklabels(features_list, rotation=45) plt.xlabel('Feature name') plt.ylabel('Feature importance') plt.title('Decision tree Gini importance') output_figures[f"{row['MODEL_NAME']}_Gini"] = fig # save the decision tree dotfile = open(f"{outputPath}//dt.dot", 'w') tree.export_graphviz(row['MODEL'], out_file=dotfile, feature_names=features_list, class_names=row['MODEL'].classes_, rounded=True, proportion=False, precision=2, filled=True) dotfile.close() # http://webgraphviz.com/ else: print(f"{row['MODEL_NAME']}, model not considered") return output_figures
2e6a1e4e05d505ab5e43c638810fc23a6b11a228
3,652,522
def centerfreq_to_bandnum(center_freq, norm_freq, nth_oct): """Returns band number from given center frequency.""" return nth_oct * np.log2(center_freq / norm_freq)
857b9b2598981ba608c958c8acce35a1e71d021f
3,652,523
from typing import Union from typing import Sequence from typing import Optional def crossval_model( estimator: BaseEstimator, X: pd.DataFrame, y: Union[pd.Series, pd.DataFrame], evaluators: Sequence[Evaluator], cv: Optional[ Union[int, BaseCrossValidator] ] = None, # defaults to KFold(n_splits=5) random_state: Optional[Union[int, np.random.RandomState]] = None, stratify: Optional[Union[np.ndarray, pd.Series]] = None, n_jobs=1, ) -> Sequence[Evaluator]: """ Evaluate a model using cross validation. A list of evaluators determines what other metrics, such as feature importance and partial dependence are computed """ # Run various checks and prepare the evaluators random_state = check_random_state(random_state) cv = 5 if cv is None else cv if isinstance(cv, int): cv = KFold(n_splits=cv, shuffle=True, random_state=random_state) cross_val_split_generator = cv.split(X, stratify) evalutors_evaluations = _repeatedly_evaluate_model( estimator=estimator, X=X, y=y, train_test_indices_generator=cross_val_split_generator, evaluators=evaluators, use_group_cv=False, random_state=random_state, name_for_logging="Cross validate", n_jobs=n_jobs, ) _set_evaluators_evaluations(evalutors_evaluations) return evalutors_evaluations
49c95241ed04c248221c6366cde717e575f5f7c1
3,652,524
import time def measure_dist(positions,weights,v_ref,side = False): """ Will plot the mouse and allow me to click and measure with two clicks side is false (so top view) but can be True, then it's cut though the major axis of hte mouse (determined by v_reference) """ # simplest trick is to just rotate all points so the reference # direction is perpendicular to x v_ref = np.append(v_ref,0) angle_with_x = angle_between(np.array([1.,0,0]),v_ref) RR = rotate_body_model(0,0,-angle_with_x) positions = (RR @ positions.T).T - v_ref if side: xx,yy = positions[:,0],positions[:,2] else: xx,yy = positions[:,0],positions[:,1] #top view plt.figure() plt.scatter(xx,yy,c= weights/np.max(weights),s = 5) # plt.xlim([-.05,.1]) # plt.ylim([0,.15]) ax = plt.gca plt.axes().set_aspect('equal', 'datalim') plt.title('click center of hip, then mid, then head of mouse!') w,h = 570,800 plt.get_current_fig_manager().window.setGeometry(1920-w-10,60,w,h) click_points = np.asanyarray(plt.ginput(0)) if click_points.shape[0] % 2 is not 0: print('missing a point') click_points = click_points[:-1,:] n_clicks = click_points.shape[0] start_points = click_points[np.arange(n_clicks)%2==0,:] end_points = click_points[np.arange(n_clicks)%2==1,:] n_points = start_points.shape[0] plt.figure() plt.scatter(xx,yy,c= weights/np.max(weights),s = 5) for s,e in zip(start_points,end_points): plt.plot([s[0],e[0]],[s[1],e[1]],'o-') dist = np.linalg.norm(end_points-start_points,axis = 1) leg_list = [str(np.round(d,decimals = 3))+" m" for d in dist] plt.legend(leg_list) plt.xlabel("x [m]") plt.ylabel("y [m]") plt.title('distance in meters') # plt.xlim([-.05,.1]) # plt.ylim([0,.15]) ax = plt.gca plt.axes().set_aspect('equal', 'datalim') timestr = time.strftime("%Y%m%d-%H%M%S") plt.savefig('/home/chrelli/git/3d_sandbox/mycetrack0p4/measurements/'+timestr+'.png') plt.show() w,h = 570,800 plt.get_current_fig_manager().window.setGeometry(1920-w-10,60,w,h) return dist
4d67344b0df64d3721d87803772c8aad15936fd9
3,652,525
def _get_draft_comments(request, issue, preview=False): """Helper to return objects to put() and a list of draft comments. If preview is True, the list of objects to put() is empty to avoid changes to the datastore. Args: request: Django Request object. issue: Issue instance. preview: Preview flag (default: False). Returns: 2-tuple (put_objects, comments). """ comments = [] tbd = [] # XXX Should request all drafts for this issue once, now we can. for patchset in issue.patchset_set.order('created'): ps_comments = list(models.Comment.gql( 'WHERE ANCESTOR IS :1 AND author = :2 AND draft = TRUE', patchset, request.user)) if ps_comments: patches = dict((p.key(), p) for p in patchset.patch_set) for p in patches.itervalues(): p.patchset = patchset for c in ps_comments: c.draft = False # Get the patch key value without loading the patch entity. # NOTE: Unlike the old version of this code, this is the # recommended and documented way to do this! pkey = models.Comment.patch.get_value_for_datastore(c) if pkey in patches: patch = patches[pkey] c.patch = patch if not preview: tbd.append(ps_comments) patchset.update_comment_count(len(ps_comments)) tbd.append(patchset) ps_comments.sort(key=lambda c: (c.patch.filename, not c.left, c.lineno, c.date)) comments += ps_comments return tbd, comments
affea5c09e42283057d70d7d1ce9f931d373d90d
3,652,526
def activate_model(cfg): """Activate the dynamic parts.""" cfg["fake"] = cfg["fake"]() return cfg
df8b0a23dc683435c1379e57bc9fd98149876d9d
3,652,527
def convert_to_number(string): """ Tries to cast input into an integer number, returning the number if successful and returning False otherwise. """ try: number = int(string) return number except: return False
30110377077357d3e7d45cac4c106f5dc9349edd
3,652,528
def _ts_value(position, counts, exposure, background, kernel, norm, flux_estimator): """Compute TS value at a given pixel position. Uses approach described in Stewart (2009). Parameters ---------- position : tuple (i, j) Pixel position. counts : `~numpy.ndarray` Counts image background : `~numpy.ndarray` Background image exposure : `~numpy.ndarray` Exposure image kernel : `astropy.convolution.Kernel2D` Source model kernel norm : `~numpy.ndarray` Norm image. The flux value at the given pixel position is used as starting value for the minimization. Returns ------- TS : float TS value at the given pixel position. """ dataset = SimpleMapDataset.from_arrays( counts=counts, background=background, exposure=exposure, kernel=kernel, position=position, norm=norm, ) return flux_estimator.run(dataset)
5a53a408205e5aecf0b2035efbc3feb33097e46c
3,652,529
from typing import List def mean(nums: List) -> float: """ Find mean of a list of numbers. Wiki: https://en.wikipedia.org/wiki/Mean >>> mean([3, 6, 9, 12, 15, 18, 21]) 12.0 >>> mean([5, 10, 15, 20, 25, 30, 35]) 20.0 >>> mean([1, 2, 3, 4, 5, 6, 7, 8]) 4.5 >>> mean([]) Traceback (most recent call last): ... ValueError: List is empty """ if not nums: raise ValueError("List is empty") return sum(nums) / len(nums)
3c802b4967f646b6338e52b4ce12977274054c15
3,652,530
import scipy def post_3d(post_paths, labels, colours, linestyles, contour_levels_sig, x_label=None, y_label=None, z_label=None, x_lims=None, y_lims=None, z_lims=None, smooth_xy=None, smooth_xz=None, smooth_yz=None, smooth_x=None, smooth_y=None, smooth_z=None, print_areas=False, save_path=None): """ Produce triangle plot showing multiple 3D posteriors, each as output by plot_utils.get_3d_post. Args: post_paths (list): List of paths to 3D posterior .npz files, each as output by plot_utils.get_3d_post. labels (list): List of legend labels, one for each posterior grid. colours (list): List of colours, one for each posterior grid. linestyles (list): List of linestyles, one for each posterior grid. contour_levels_sig (list): List of confidence regions to plot in ascending order, e.g. [1, 3]. x_label (str, optional): X-axis label - default None, i.e. no label. y_label (str, optional): Y-axis label - default None, i.e. no label. z_label (str, optional): Z-axis label - default None, i.e. no label. x_lims ((float, float), optional): X-axis limits - default None, limits set automatically. y_lims ((float, float), optional): Y-axis limits - default None, limits set automatically. z_lims ((float, float), optional): Z-axis limits - default None, limits set automatically. smooth_xy (list, optional): List of kernel standard deviations for Gaussian smoothing in the x-y plane, one for each posterior grid, or None for no smoothing (default None). smooth_xz (list, optional): List of kernel standard deviations for Gaussian smoothing in the x-z plane, one for each posterior grid, or None for no smoothing (default None). smooth_yz (list, optional): List of kernel standard deviations for Gaussian smoothing in the y-z plane, one for each posterior grid, or None for no smoothing (default None). smooth_x (list, optional): List of kernel standard deviations for Gaussian smoothing of the 1D x posterior, one for each posterior grid, or None for no smoothing (default None). smooth_y (list, optional): List of kernel standard deviations for Gaussian smoothing of the 1D y posterior, one for each posterior grid, or None for no smoothing (default None). smooth_z (list, optional): List of kernel standard deviations for Gaussian smoothing of the 1D z posterior, one for each posterior grid, or None for no smoothing (default None). print_areas (bool, optional): If True, print relative areas/widths of the different posteriors. Note that smoothing can affect these results, so for reliable results smoothing should be switched off to extract relative areas, and then smoothing values should be set to preserve unsmoothed relative areas. Default False. save_path (str, optional): Path to save figure to, if supplied. If not supplied, figure is displayed. """ # Load unnormalised 3D posteriors post_grids = [] for post_idx, post_path in enumerate(post_paths): print(f'Loading {post_idx + 1} / {len(post_paths)}') with np.load(post_path) as data: x_grid_tmp = data['x_grid'] y_grid_tmp = data['y_grid'] z_grid_tmp = data['z_grid'] post_grids.append(data['post_grid']) # Check grids consistent if post_idx == 0: x_grid, y_grid, z_grid = x_grid_tmp, y_grid_tmp, z_grid_tmp else: assert np.array_equal(x_grid, x_grid_tmp) assert np.array_equal(y_grid, y_grid_tmp) assert np.array_equal(z_grid, z_grid_tmp) # Form 1D & 2D grids print('Forming 1D & 2D grids') x = x_grid[:, 0, 0] y = y_grid[0, :, 0] z = z_grid[0, 0, :] xy_x, xy_y = np.meshgrid(x, y, indexing='ij') xz_x, xz_z = np.meshgrid(x, z, indexing='ij') yz_y, yz_z = np.meshgrid(y, z, indexing='ij') # Calculate integration elements print('Calculating integration elements') dx = x[1] - x[0] dy = y[1] - y[0] dz = z[1] - z[0] assert np.allclose(np.diff(x), dx) assert np.allclose(np.diff(y), dy) assert np.allclose(np.diff(z), dz) dxdy = dx * dy dxdz = dx * dz dydz = dy * dz dxdydz = dx * dy * dz # Normalise 3D posteriors print('Normalising') post_grids = [post_grid / (np.sum(post_grid) * dxdydz) for post_grid in post_grids] assert all([np.isclose(np.sum(post_grid) * dxdydz, 1) for post_grid in post_grids]) # Marginalise to get 2D posteriors print('Marginalising 3D -> 2D') posts_xy = [np.sum(post_grid, axis=2) * dz for post_grid in post_grids] posts_xz = [np.sum(post_grid, axis=1) * dy for post_grid in post_grids] posts_yz = [np.sum(post_grid, axis=0) * dx for post_grid in post_grids] assert all([np.isclose(np.sum(post_xy) * dxdy, 1) for post_xy in posts_xy]) assert all([np.isclose(np.sum(post_xz) * dxdz, 1) for post_xz in posts_xz]) assert all([np.isclose(np.sum(post_yz) * dydz, 1) for post_yz in posts_yz]) # Marginalise again to get 1D posteriors print('Marginalising 2D -> 1D') posts_x = [np.sum(post_xy, axis=1) * dy for post_xy in posts_xy] posts_y = [np.sum(post_xy, axis=0) * dx for post_xy in posts_xy] posts_z = [np.sum(post_xz, axis=0) * dx for post_xz in posts_xz] assert all([np.isclose(np.sum(post_x) * dx, 1) for post_x in posts_x]) assert all([np.isclose(np.sum(post_y) * dy, 1) for post_y in posts_y]) assert all([np.isclose(np.sum(post_z) * dz, 1) for post_z in posts_z]) # Additional marginalisation checks print('Checking normalisation') assert all([np.allclose(post_x, np.sum(post_xz, axis=1) * dz) for post_x, post_xz in zip(posts_x, posts_xz)]) assert all([np.allclose(post_y, np.sum(post_yz, axis=1) * dz) for post_y, post_yz in zip(posts_y, posts_yz)]) assert all([np.allclose(post_z, np.sum(post_yz, axis=0) * dy) for post_z, post_yz in zip(posts_z, posts_yz)]) assert all([np.allclose(post_x, np.sum(p_3d, axis=(1, 2)) * dydz) for post_x, p_3d in zip(posts_x, post_grids)]) assert all([np.allclose(post_y, np.sum(p_3d, axis=(0, 2)) * dxdz) for post_y, p_3d in zip(posts_y, post_grids)]) assert all([np.allclose(post_z, np.sum(p_3d, axis=(0, 1)) * dxdy) for post_z, p_3d in zip(posts_z, post_grids)]) # Apply smoothing if smooth_xy is not None: posts_xy = [ndimage.gaussian_filter(post_xy, [sig, sig / 2.]) for post_xy, sig in zip(posts_xy, smooth_xy)] if smooth_xz is not None: posts_xz = [ndimage.gaussian_filter(post_xz, sig) for post_xz, sig in zip(posts_xz, smooth_xz)] if smooth_yz is not None: posts_yz = [ndimage.gaussian_filter(post_yz, sig) for post_yz, sig in zip(posts_yz, smooth_yz)] if smooth_x is not None: posts_x = [ndimage.gaussian_filter(post_x, sig) for post_x, sig in zip(posts_x, smooth_x)] if smooth_y is not None: posts_y = [ndimage.gaussian_filter(post_y, sig) for post_y, sig in zip(posts_y, smooth_y)] if smooth_z is not None: posts_z = [ndimage.gaussian_filter(post_z, sig) for post_z, sig in zip(posts_z, smooth_z)] # Convert 2D & 1D posteriors to confidence levels print('Converting to confidence levels') confs_xy = [gcl_post.post_to_conf(post_xy, dxdy) for post_xy in posts_xy] confs_xz = [gcl_post.post_to_conf(post_xz, dxdz) for post_xz in posts_xz] confs_yz = [gcl_post.post_to_conf(post_yz, dydz) for post_yz in posts_yz] confs_x = [gcl_post.post_to_conf(post_x, dx) for post_x in posts_x] confs_y = [gcl_post.post_to_conf(post_y, dy) for post_y in posts_y] confs_z = [gcl_post.post_to_conf(post_z, dz) for post_z in posts_z] # Extract out relative widths and areas contour_levels = [0.] + [scipy.special.erf(contour_level / np.sqrt(2)) for contour_level in contour_levels_sig] if print_areas: print('Note that smoothing should be switched off to extract unbiased relative areas, and smoothing should be ' 'set such that relative areas are preserved') def count_points_within_outermost_contour(conf_grid): return np.count_nonzero(conf_grid < contour_levels[-1]) rel_areas_xy = list(map(count_points_within_outermost_contour, confs_xy)) print('Relative areas x-y:', np.divide(rel_areas_xy, max(rel_areas_xy))) rel_areas_xz = list(map(count_points_within_outermost_contour, confs_xz)) print('Relative areas x-z:', np.divide(rel_areas_xz, max(rel_areas_xz))) rel_areas_yz = list(map(count_points_within_outermost_contour, confs_yz)) print('Relative areas y-z:', np.divide(rel_areas_yz, max(rel_areas_yz))) rel_widths_x = list(map(count_points_within_outermost_contour, confs_x)) print('Relative widths x:', np.divide(rel_widths_x, max(rel_widths_x))) rel_widths_y = list(map(count_points_within_outermost_contour, confs_y)) print('Relative widths y:', np.divide(rel_widths_y, max(rel_widths_y))) rel_widths_z = list(map(count_points_within_outermost_contour, confs_z)) print('Relative widths z:', np.divide(rel_widths_z, max(rel_widths_z))) # Plot everything print('Plotting') plt.rcParams.update({'font.size': 13}) plt.rcParams['axes.titlesize'] = 17 fig, axes = plt.subplots(nrows=3, ncols=3, sharex='col', figsize=(12.8, 8.6)) plt.subplots_adjust(left=.08, right=.97, bottom=.08, top=.97, wspace=0, hspace=0) fill_colours = [[np.squeeze(matplotlib.colors.to_rgba_array(c, a)) for a in [0.3, 0.1, 0]] for c in colours] # Row 0: x for post_x, colour, fill, linestyle, label in zip(posts_x, colours, fill_colours, linestyles, labels): axes[0, 0].plot(x, post_x, color=colour, ls=linestyle, lw=2, label=label) axes[0, 0].fill_between(x, post_x, color=fill[1]) axes[0, 1].axis('off') axes[0, 2].axis('off') # Row 1: x vs y, y for conf_xy, post_y, colour, fill, linestyle in zip(confs_xy, posts_y, colours, fill_colours, linestyles): axes[1, 0].contour(xy_x, xy_y, conf_xy, levels=contour_levels, colors=colour, linestyles=[linestyle], linewidths=2) axes[1, 0].contourf(xy_x, xy_y, conf_xy, levels=contour_levels, colors=fill) axes[1, 1].plot(y, post_y, color=colour, ls=linestyle, lw=2) axes[1, 1].fill_between(y, post_y, color=fill[1]) axes[1, 2].axis('off') # Row 2: x vs z, y vs z, z for conf_xz, conf_yz, post_z, colour, fill, linestyle in zip(confs_xz, confs_yz, posts_z, colours, fill_colours, linestyles): axes[2, 0].contour(xz_x, xz_z, conf_xz, levels=contour_levels, colors=colour, linestyles=[linestyle], linewidths=2) axes[2, 0].contourf(xz_x, xz_z, conf_xz, levels=contour_levels, colors=fill) axes[2, 1].contour(yz_y, yz_z, conf_yz, levels=contour_levels, colors=colour, linestyles=[linestyle], linewidths=2) axes[2, 1].contourf(yz_y, yz_z, conf_yz, levels=contour_levels, colors=fill) axes[2, 2].plot(z, post_z, color=colour, ls=linestyle, lw=2) axes[2, 2].fill_between(z, post_z, color=fill[1]) # Hide y ticks for 1D posteriors axes[0, 0].tick_params(axis='y', which='both', left=False, labelleft=False) axes[1, 1].tick_params(axis='y', which='both', left=False, labelleft=False) axes[2, 2].tick_params(axis='y', which='both', left=False, labelleft=False) # Add x ticks at top and bottom of 2D posteriors and at bottom of 1D posteriors axes[0, 0].tick_params(axis='x', which='both', bottom=True, direction='in') axes[1, 0].tick_params(axis='x', which='both', top=True, bottom=True, direction='in') axes[2, 0].tick_params(axis='x', which='both', top=True, bottom=True, direction='inout', length=7.5) axes[0, 1].tick_params(axis='x', which='both', bottom=True, direction='in') axes[2, 1].tick_params(axis='x', which='both', top=True, bottom=True, direction='inout', length=7.5) axes[2, 2].tick_params(axis='x', which='both', bottom=True, direction='inout', length=7.5) # Add y ticks at left and right of 2D posteriors axes[1, 0].tick_params(axis='y', which='both', left=True, direction='inout', length=7.5) axes[1, 0].secondary_yaxis('right').tick_params(axis='y', which='both', right=True, direction='in', labelright=False) axes[2, 0].tick_params(axis='y', which='both', left=True, right=True, direction='inout', length=7.5) axes[2, 1].tick_params(axis='y', which='both', left=True, right=True, labelleft=False, direction='in') # Limits axes[2, 0].set_xlim(x_lims) axes[2, 1].set_xlim(y_lims) axes[2, 2].set_xlim(z_lims) axes[1, 0].set_ylim(y_lims) axes[2, 0].set_ylim(z_lims) axes[2, 1].set_ylim(z_lims) # Fix overlapping z tick labels by removing every other tick axes[2, 2].set_xticks(axes[2, 2].get_xticks()[1::2]) # Label axes axes[2, 0].set_xlabel(x_label) axes[2, 1].set_xlabel(y_label) axes[2, 2].set_xlabel(z_label) axes[1, 0].set_ylabel(y_label) axes[2, 0].set_ylabel(z_label) fig.align_ylabels() # Title axes[0, 0].annotate('Full Euclid-like mask', xy=(2.95, .95), xycoords='axes fraction', ha='right', va='top', size=plt.rcParams['axes.titlesize']) # Legend leg_title = f'{min(contour_levels_sig)}\N{en dash}{max(contour_levels_sig)}$\\sigma$ confidence' axes[0, 0].legend(loc='upper right', bbox_to_anchor=(3, .8), handlelength=4, frameon=False, title=leg_title) if save_path is not None: plt.savefig(save_path) print('Saved ' + save_path) else: plt.show()
5e25de6f69a7d281e59ac1423b6be4b27080a689
3,652,531
def det(m1: ndarray) -> float: """ Compute the determinant of a double precision 3x3 matrix. https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/det_c.html :param m1: Matrix whose determinant is to be found. :return: The determinant of the matrix. """ m1 = stypes.to_double_matrix(m1) return libspice.det_c(m1)
aa0a6629536df22ea016bb81a8e62769c7b3ab9e
3,652,532
def path_to_xy(path: PointList) -> XYList: """Convert PointList to XYList""" return [p.xy() for p in path]
ea8cc222ab2b8ce6634e9bb1ea7d456bfa451782
3,652,533
def diff(source: list): """ Compute the first-order discrete differences for a 1-dimensional list. TODO: Support higher orders and dimensions as required. """ result = [] for index in range(1, len(source)): result.append(source[index] - source[index - 1]) return result
e3773ca911130f4d1a2c70e43cb72b7b831e242a
3,652,534
def is_gzip(name): """Return True if the name indicates that the file is compressed with gzip.""" return name.endswith(".gz")
a6ea06f04808a07c4b26338f87273986eda86ef1
3,652,535
from os.path import exists, abspath, join from os import pathsep, environ def _search_on_path(filenames): """Find file on system path.""" # http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52224 search_path = environ["PATH"] paths = search_path.split(pathsep) for path in paths: for filename in filenames: if exists(join(path, filename)): return abspath(join(path, filename))
c611797ce121c4dc9ecd01d0fafa5c74a38725a5
3,652,536
def possible_sums_of(numbers: list) -> list: """Compute all possible sums of numbers excluding self.""" possible_sums = [] for idx, nr_0 in enumerate(numbers[:-1]): for nr_1 in numbers[idx + 1:]: possible_sums.append(nr_0 + nr_1) return possible_sums
39ebe3e48c45a9c30f16b43e6c34778c5e813940
3,652,537
def normalize_norms(X, scale_factor=1, axis=0, by='sum'): """ wrapper of `normalize_colsum` and `normalize_rowsum` Parameters ---------- X: a (sparse) matrix scale_factor: numeric, None if None, use the median of sum level as the scaling factor. axis: int, {0, 1} if axis = 0, apply to each column; if axis = 1, apply to each row. by: str, {'sum', 'max'} normalization method """ foo = normalize_col if axis == 0 else normalize_row return foo(X, scale_factor=scale_factor, by=by)
7c491245fc83b2b48c21cb91f79915af7261f5ba
3,652,538
def full_solution(combined, prob_dists): """ combined: (w, n-1->n-w, 3, 3) prob_dists: (n, 3, total_reads) p[v,g,k] = prob of observing k of total_reads on ref if gneotype ig on varaint v """ N = len(combined[0])+1 best_idx, best_score = np.empty(N), -np.inf*np.ones(N) for j, counts in enumerate(combined, 1): scores = get_scores(counts, prob_dists[:-j]) do_update = scores>best_score[j:] best_score[j:][do_update] = scores[do_update] best_idx[j:][do_update] = np.flatnonzero(do_update) rev_scores = get_scores(counts.swapaxes(-2, -1), prob_dists[j:]) do_update = rev_scores>best_score[:-j] best_score[:-j][do_update] = rev_scores[do_update] best_idx[:-j][do_update] = np.flatnonzero(do_update)+j return best_idx
2732c57e44aa0c17bd652b01226053c095d9fdb3
3,652,539
def ycbcr2bgr(img): """Convert a YCbCr image to BGR image. The bgr version of ycbcr2rgb. It implements the ITU-R BT.601 conversion for standard-definition television. See more details in https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion. It differs from a similar function in cv2.cvtColor: `YCrCb <-> BGR`. In OpenCV, it implements a JPEG conversion. See more details in https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion. Args: img (ndarray): The input image. It accepts: 1. np.uint8 type with range [0, 255]; 2. np.float32 type with range [0, 1]. Returns: ndarray: The converted BGR image. The output image has the same type and range as input image. """ img_type = img.dtype img = _convert_input_type_range(img) * 255 out_img = np.matmul( img, [ [0.00456621, 0.00456621, 0.00456621], [0.00791071, -0.00153632, 0], [0, -0.00318811, 0.00625893], ], ) * 255.0 + [-276.836, 135.576, -222.921] out_img = _convert_output_type_range(out_img, img_type) return out_img
e5e5c408e40645ae4844635fd0fbf065746f187d
3,652,540
from datetime import datetime def tensorize_fg_coeffs( data, wgts, fg_model_comps, notebook_progressbar=False, verbose=False, ): """Initialize foreground coefficient tensors from uvdata and modeling component dictionaries. Parameters ---------- data: list list of tf.Tensor objects, each with shape (ngrps, nbls, nfreqs) representing data wgts: list list of tf.Tensor objects, each with shape (ngrps, nbls, nfreqs) representing weights. fg_model_comps: list list of fg modeling tf.Tensor objects representing foreground modeling vectors. Each tensor is (nvecs, ngrps, nbls, nfreqs) see description in tensorize_fg_model_comps_dict docstring. notebook_progressbar: bool, optional use progress bar optimized for notebook output. default is False. verbose: bool, optional lots of text output default is False. Returns ------- fg_coeffs_re: tf.Tensor object 1d tensor containing real parts of coeffs for each modeling vector. ordering is over foreground modeling vector per redundant group and then redundant group in the order of groups appearing in red_grps fg_coeffs_im: tf.Tensor object 1d tensor containing imag parts of coeffs for each modeling vector. ordering is over foreground modeling vector per redundant group and then redundant group in the order of groups appearing in red_grps """ echo( f"{datetime.datetime.now()} Computing initial foreground coefficient guesses using linear-leastsq...\n", verbose=verbose, ) fg_coeffs = [] nchunks = len(data) binary_wgts = [ tf.convert_to_tensor(~np.isclose(wgts[cnum].numpy(), 0.0), dtype=wgts[cnum].dtype) for cnum in range(nchunks) ] for cnum in PBARS[notebook_progressbar](range(nchunks)): # set up linear leastsq fg_coeff_chunk = [] ngrps = data[cnum].shape[0] ndata = data[cnum].shape[1] * data[cnum].shape[2] nvecs = fg_model_comps[cnum].shape[0] # pad with zeros for gnum in range(ngrps): nonzero_rows = np.where( np.all(np.isclose(fg_model_comps[cnum][:, gnum].numpy().reshape(nvecs, ndata), 0.0), axis=1) )[0] if len(nonzero_rows) > 0: nvecs_nonzero = np.min(nonzero_rows) else: nvecs_nonzero = nvecs # solve linear leastsq fg_coeff_chunk.append( tf.reshape( tf.linalg.lstsq( tf.transpose(tf.reshape(fg_model_comps[cnum][:, gnum], (nvecs, ndata)))[:, :nvecs_nonzero], tf.reshape(data[cnum][gnum] * binary_wgts[cnum][gnum], (ndata, 1)), ), (nvecs_nonzero,), ) ) # pad zeros at the end back up to nvecs. fg_coeff_chunk[-1] = tf.pad(fg_coeff_chunk[-1], [(0, nvecs - nvecs_nonzero)]) # add two additional dummy indices to satify broadcasting rules. fg_coeff_chunk = tf.reshape(tf.transpose(tf.stack(fg_coeff_chunk)), (nvecs, ngrps, 1, 1)) fg_coeffs.append(fg_coeff_chunk) echo( f"{datetime.datetime.now()} Finished initial foreground coefficient guesses...\n", verbose=verbose, ) return fg_coeffs
dbff52b154326df6a324ef454887c65bfe528044
3,652,541
def receive_incoming_bets(): """ Sends fixtures to the front-end """ return fixtures.fixtures_information
2ab61c0bc15bb9c8c4359bb8ca7e8b1287b1d182
3,652,542
def fibonacci(n): """ object: fibonacci(n) returns the first n Fibonacci numbers in a list input: n- the number used to calculate the fibonacci list return: retList- the fibonacci list """ if type(n) != int: print(n) print(":input not an integer") return False if n <= 0: print(str(n)+"not a postive integer") return False f1=1 f2=1 retList=[] for i in range (0,n): retList.append(f1) fn=f1+f2 f1=f2 f2=fn return retList
ac37d952eecae57b33fb3768f1c8097d76769534
3,652,543
def psnr_batch(_mse_batch_val): """ :param _mse_val_each: ndarray :return: ndarray Usage: 1) The Bath is deal with channel. Thus, it is recommended to call mse_batch function before the psnr_batch function. 2) cumsum_psnr_rgb += (metric_.psnr_batch(_mse_batch_val=(metric_.mse_batch(_ndarr_input=imgcv_.batch2channel(_ndarr=ndarr_input), _ndarr_ref=imgcv_.batch2channel(_ndarr=ndarr_ref), _num_colr_channel=3)))).sum() """ return (10 * np.log10((255.0 ** 2) / _mse_batch_val))
c33eaa3e04fbd7d9749ad8989a15ea198ff4d806
3,652,544
def get_u0(u0_type, num_features): """return a polyhedral definition for U^0, B_mat and b_vec""" assert u0_type in ["box", "positive_normed"] if u0_type == "box": B_mat, b_vec = U0_box(num_features) if u0_type == "positive_normed": B_mat, b_vec = U0_positive_normed(num_features) return B_mat, b_vec
bb6856284067ac3d5b39ca50d30c5745a7ee5e07
3,652,545
def funcparser_callable_search_list(*args, caller=None, access="control", **kwargs): """ Usage: $objlist(#123) Legacy alias for search with a return_list=True kwarg preset. """ return funcparser_callable_search(*args, caller=caller, access=access, return_list=True, **kwargs)
511bff6803ba9b088fa94d32e9cb3f85c4823b94
3,652,546
def upcoming_movie_name(soup): """ Extracts the list of movies from BeautifulSoup object. :param soup: BeautifulSoup object containing the html content. :return: list of movie names """ movie_names = [] movie_name_tag = soup.find_all('h4') for _movie in movie_name_tag: _movie_result = _movie.find_all('a') try: _movie_name = _movie_result[0]['title'] movie_names.append(_movie_name) except KeyError as e: continue return movie_names
6bac06375109ec103492a079746e2c0364bfac17
3,652,547
def options(*args, **kw): """Mark the decorated function as a handler for OPTIONS requests.""" return _make_handler_decorator('OPTIONS', *args, **kw)
21e6f830e054a84cd16e5cdfbb63c2202ff70d7b
3,652,548
import codecs import json def lookup_vendor_name(mac_address): """ Translates the returned mac-address to a vendor """ url = "http://macvendors.co/api/%s" % mac_address request = urllib2.Request(url, headers={'User-Agent': "API Browser"}) try: response = urllib2.urlopen(request) reader = codecs.getreader("utf-8") obj = json.load(reader(response)) response.close() return obj['result']['company'] except urllib2.URLError: return "Unable to lookup MAC address" except KeyError: return "MAC lookup API changed"
ad854390256c87c537b1d8e4e8906b3b3d0b10bd
3,652,549
def train_on(text): """ Return a dictionary whose keys are alle the tuple of len PREFIX of consecutive words inside text, and whose value is the list of every single word which follows that tuple inside the text. For ex: {('Happy', 'birthday'): ['to', 'dear'] ...} """ words = text.split() assert len(words) > PREFIX training = defaultdict(list) for i in range(0, len(words) - PREFIX): duo = tuple(words[i:i + PREFIX]) following = words[i + PREFIX] training[duo].append(following) return training
40230bbb346cb4c98d6694fb0d18652e7d6bd4e7
3,652,550
def learning_rate_decay(alpha, decay_rate, global_step, decay_step): """learning_rate_decay: updates the learning rate using inverse time decay in numpy Args: alpha : is the original learning rate decay_rate : is the weight used to determine the rate at which alpha will decay global_step : is the number of passes of gradient descent that have elapsed decay_step : is the number of passes of gradient descent that should occur before alpha is decayed further Returns: the updated value for alpha """ alpha = alpha / (1 + decay_rate * int(global_step / decay_step)) return alpha
a98f893acc7f14dafcf2dea551df4eb44da07bc4
3,652,551
def update_studio(request): """updates the studio """ studio_id = request.params.get('studio_id') studio = Studio.query.filter_by(id=studio_id).first() name = request.params.get('name', None) dwh = request.params.get('dwh', None) wh_mon_start = get_time(request, 'mon_start') wh_mon_end = get_time(request, 'mon_end') wh_tue_start = get_time(request, 'tue_start') wh_tue_end = get_time(request, 'tue_end') wh_wed_start = get_time(request, 'wed_start') wh_wed_end = get_time(request, 'wed_end') wh_thu_start = get_time(request, 'thu_start') wh_thu_end = get_time(request, 'thu_end') wh_fri_start = get_time(request, 'fri_start') wh_fri_end = get_time(request, 'fri_end') wh_sat_start = get_time(request, 'sat_start') wh_sat_end = get_time(request, 'sat_end') wh_sun_start = get_time(request, 'sun_start') wh_sun_end = get_time(request, 'sun_end') if studio and name and dwh: # update new studio studio.name = name studio.daily_working_hours = int(dwh) wh = WorkingHours() def set_wh_for_day(day, start, end): if start != end: wh[day] = [[start.seconds/60, end.seconds/60]] else: wh[day] = [] set_wh_for_day('mon', wh_mon_start, wh_mon_end) set_wh_for_day('tue', wh_tue_start, wh_tue_end) set_wh_for_day('wed', wh_wed_start, wh_wed_end) set_wh_for_day('thu', wh_thu_start, wh_thu_end) set_wh_for_day('fri', wh_fri_start, wh_fri_end) set_wh_for_day('sat', wh_sat_start, wh_sat_end) set_wh_for_day('sun', wh_sun_start, wh_sun_end) studio.working_hours = wh DBSession.add(studio) # Commit will be handled by the zope transaction extension return HTTPOk()
2fbdcbd04bb0ec7d0b2f5790e59e9211c831066f
3,652,552
def flip_coin(num_of_experiments = 1000, num_of_flips = 30): """ Flip the coin `num_of_flips` times and repeat this experiment `num_of_experiments` times. And return the number of heads grouped together in all the experiments. """ all_heads = [] for i in range(num_of_experiments): heads = tails = 0 for counter in range(num_of_flips): num = np.random.randint(0,2) if num == 0: heads += 1 else: tails += 1 all_heads.append(heads) # group the number of heads in all the experiments flip_heads = [] for flip in range(num_of_flips + 1): num_of_heads = 0 for h in all_heads: if h == flip: num_of_heads += 1 flip_heads.append(num_of_heads) return flip_heads
24ccd52693233f93f5c0bb7bb4f09220e86f320c
3,652,553
from pathlib import Path def get_questions( path: str, uid2idx: dict = None, path_data: Path = None, ) -> po.DataFrame: """ Identify correct answer text and filter out wrong distractors from question string Get tokens and lemmas Get explanation sentence ids and roles """ # Dropping questions without explanations hurts score df = po.read_csv(path, sep="\t") df = add_q_reformat(df) # Preprocess texts tokens, lemmas = preprocess_texts(df.q_reformat.tolist(), path_data) df["tokens"], df["lemmas"], df["embedding"] = tokens, lemmas, None # Get explanation uids and roles exp_uids = [] exp_roles = [] exp_idxs = [] for exp_string in df.explanation.values: _uids, _roles = extract_explanation(exp_string) uids = [] roles = [] idxs = [] assert len(_uids) == len(_roles) for i in range(len(_uids)): if _uids[i] not in uid2idx: continue uids.append(_uids[i]) roles.append(_roles[i]) idxs.append(uid2idx[_uids[i]]) exp_uids.append(uids) exp_roles.append(roles) exp_idxs.append(idxs) df["exp_uids"], df["exp_roles"], df[ "exp_idxs"] = exp_uids, exp_roles, exp_idxs print(df.shape) return df
877c75f20b7b766655ecda5dc4bc63ada7ee593c
3,652,554
def simple_command(device, cmd_id, data=None, receive=True): """ Raises: HIDException -> if reading/writing to the USB device failed: KBProtocolException -> if the packet is too large """ cmd_packet = bytearray(EP_VENDOR_SIZE) cmd_packet[0] = cmd_id # Optional data component if data != None: data = bytearray(data) if len(data) > (EP_VENDOR_SIZE-1): raise KBProtocolException("Data can't fit in one packet. Got {} " "bytes, max is {}".format(len(data), EP_VENDOR_SIZE)) for i, byte in enumerate(data): cmd_packet[i+1] = byte device.write(cmd_packet) if receive: response = device.read() packet_type = response[0] while packet_type != cmd_id and packet_type != CMD_ERROR_CODE: # ignore other packets response = device.read(timeout=2) if response == None: device.write(cmd_packet) else: packet_type = response[0] if response[0] == CMD_ERROR_CODE: raise_error_code(response[1]) elif response[0] != cmd_id: raise KBProtocolException("Unexpected packet with packet_id: {}" .format(response[0])) return response[1:] else: return None
57a5e237f2296fec1563c125cb934ce1914d8bac
3,652,555
def dbopen(dbname, perm = 'r'): """Open a Datascope database""" return Dbptr(dbname, perm)
08a083def4f792927232eff5d625ae4e6f3355fb
3,652,556
def to_nx(dsk): """ Code mainly identical to dask.dot.to_graphviz and kept compatible. """ collapse_outputs = False verbose = False data_attributes = {} function_attributes = {} g = nx.DiGraph() seen = set() connected = set() for k, v in dsk.items(): k_name = name(k) if istask(v): func_name = name((k, "function")) if not collapse_outputs else k_name if collapse_outputs or func_name not in seen: seen.add(func_name) attrs = function_attributes.get(k, {}).copy() attrs.setdefault("label", key_split(k)) attrs.setdefault("shape", "circle") g.add_node(func_name, **attrs) if not collapse_outputs: g.add_edge(func_name, k_name) connected.add(func_name) connected.add(k_name) for dep in get_dependencies(dsk, k): dep_name = name(dep) if dep_name not in seen: seen.add(dep_name) attrs = data_attributes.get(dep, {}).copy() attrs.setdefault("label", box_label(dep, verbose)) attrs.setdefault("shape", "box") g.add_node(dep_name, **attrs) g.add_edge(dep_name, func_name) connected.add(dep_name) connected.add(func_name) elif ishashable(v) and v in dsk: v_name = name(v) g.add_edge(v_name, k_name) connected.add(v_name) connected.add(k_name) if (not collapse_outputs or k_name in connected) and k_name not in seen: seen.add(k_name) attrs = data_attributes.get(k, {}).copy() attrs.setdefault("label", box_label(k, verbose)) attrs.setdefault("shape", "box") g.add_node(k_name, **attrs) assert nx.dag.is_directed_acyclic_graph(g) return g
140b6a74ce7e75ddbc906bc4b4c7330e7585e0d8
3,652,557
def predict(model, img_base64): """ Returns the prediction for a given image. Params: model: the neural network (classifier). """ return model.predict_disease(img_base64)
545a98dd682b81a1662878f91091615871562226
3,652,558
import hashlib def get_hash(x: str): """Generate a hash from a string.""" h = hashlib.md5(x.encode()) return h.hexdigest()
538c936c29867bb934776333fb2dcc73c06e23d0
3,652,559
def pair_force(r1, r2, par1, par2, sigma_c, box, r_cut, lj=True, coulomb=True): """Compute the sum of the Lennard Jones force and the short ranged part of the Coulomb force between two particles. Arguments: r1 (ndarray): A one dimensional numpy-array with d elements (position of the first particle) r2 (ndarray): A one dimensional numpy-array with d elements (position of the second particle) par1 (ndarray): A one dimensional numpy-array with 4 elements (charge, epsillon, sigma, mass) for the first particle par2 (ndarray): A one dimensional numpy-array with 4 elements (charge, epsillon, sigma, mass) for the second particle sigma_c (float): A positive float (width of the gaussian distribution used to shield the particle) box (ndarray): A one dimensional numpy-array with d elements (size of preriodic box) r_cut (float): A positive float (cutoff radius) lj (boolean): If True the Lannard Jones force is calculated coulomb (boolean): If True the Coulomb force is calculated Returns: force * direction (ndarray): A one dimensional numpy-array with d elements (force acting on the first particle) """ dist = pbc(r1 - r2, box) r12 = np.linalg.norm(dist) force = 0 if r12 <= r_cut: if lj: epsilon = calc_eps(par1[1], par2[1]) sigma_lj = calc_sig(par2[2], par2[2]) rs = sigma_lj / r12 force += 24 * epsilon / r12 * (2 * rs**12 - rs**6) if coulomb: q1 = par1[0] q2 = par2[0] f1 = erfc(r12 / (np.sqrt(2) * sigma_c)) / r12 f2 = np.sqrt(2 / np.pi) / sigma_c * np.exp(- r12**2 / (2 * sigma_c**2)) force += q1 * q2 / (4 * np.pi * eps * r12) * (f1 + f2) direction = dist / r12 return force * direction
10c6eee7547f94c06e650a0a738aace3380de454
3,652,560
def delete_network_acl_entry(client, network_acl_id, num=100, egress=False, dry=True): """ Delete a network acl entry https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html#EC2.Client.delete_network_acl_entry """ try: response = client.delete_network_acl_entry( Egress=egress, NetworkAclId=network_acl_id, RuleNumber=num, DryRun=dry) print('Deleted %s %s' % (network_acl_id, ('(dry)' if dry else ''))) return response except Exception as err: handle(err)
e27e476f2fe37e7e0150a97ebd4b5e3cb93e86b1
3,652,561
import time def mp_run(data, process_num, func, *args): """ run func with multi process """ level_start = time.time() partn = max(len(data) / process_num, 1) start = 0 p_idx = 0 ps = [] while start < len(data): local_data = data[start:start + partn] start += partn p = mp.Process(target=func, args=(local_data, p_idx) + args) ps.append(p) p.start() p_idx += 1 for p in ps: p.join() for p in ps: p.terminate() return p_idx
13576bb107eae5a49063bcba3d698eeb957dbb1e
3,652,562
def spell(corpus): """ Train a Spelling Normalizer Parameters ---------- corpus : list of strings. Prefer to feed with malaya.load_malay_dictionary(). Returns ------- SPELL_NORMALIZE: Trained malaya.normalizer._SPELL_NORMALIZE class """ if not isinstance(corpus, list): raise ValueError('corpus must be a list') if not isinstance(corpus[0], str): raise ValueError('corpus must be list of strings') return _SPELL_NORMALIZE([unidecode(w) for w in corpus])
1aee5a941e1553f50540a5327ee0e3c4d1ce0bd3
3,652,563
def check_PA_vector(angle_list, unit='deg'): """ Checks if the angle list has the right format to avoid any bug in the pca-adi algorithm. The right format complies to 3 criteria: 1) angles are expressed in degree 2) the angles are positive 3) there is no jump of more than 180 deg between consecutive values (e.g. no jump like [350deg,355deg,0deg,5deg] => replaced by [350deg,355deg,360deg,365deg]) Parameters ---------- angle_list: 1D-array_like Vector containing the derotation angles unit: string, {'deg','rad'}, optional The unit type of the input angle list Returns ------- angle_list: 1-D array_like Vector containing the derotation angles (after correction to comply with the 3 criteria, if needed) """ angle_list = angle_list.copy() if unit != 'rad' and unit != 'deg': raise ValueError("The input unit should either be 'deg' or 'rad'") npa = angle_list.shape[0] for ii in range(npa): if unit == 'rad': angle_list[ii] = np.rad2deg(angle_list[ii]) if angle_list[ii] < 0: angle_list[ii] = 360+angle_list[ii] correct = False sorted_rot = np.sort(angle_list) # Check if there is a jump > 180deg within the angle list for ii in range(npa-1): if abs(sorted_rot[ii+1]-sorted_rot[ii]) > 180: correct = True break # In the previous case, correct for it by adding 360deg to angles < 180deg if correct: for ii in range(npa): if angle_list[ii] < 180: angle_list[ii] = 360+angle_list[ii] return angle_list
9f369de90008e60965625acd403a6431c64763fc
3,652,564
def client_id_to_org_type_id(client_id): """ Client ID should be a string: "g:" + self._options['org'] + ":" + self._options['type'] + ":" + self._options['id'], """ split = client_id.split(':') if len(split) != 4: raise InvalidClientId() org = split[1] device_type = split[2] device_id = split[3] return (org, device_type, device_id)
475058962f81760dc65b19ddbdc1d74e0ec2f55e
3,652,565
def get_total_implements(): """Obtiene los implementos totales solicitados en prestamos.""" total_implements = 0 for i in Loans.objects.all(): total_implements += i.ammount_implements return total_implements
5b8e2b21f8c31e33c60518fd4fba20eded614f05
3,652,566
from typing import Optional from typing import Union def _parse_maybe_array( type_name: str, innermost_type: Optional[Union[ast_nodes.ValueType, ast_nodes.PointerType]] ) -> Union[ast_nodes.ValueType, ast_nodes.PointerType, ast_nodes.ArrayType]: """Internal-only helper that parses a type that may be an array type.""" array_match = ARRAY_EXTENTS_PATTERN.search(type_name) if array_match: extents = tuple( int(s.strip()) for s in ARRAY_N_PATTERN.findall(array_match.group(0))) inner_type_str = type_name[:array_match.start()] return ast_nodes.ArrayType( inner_type=_parse_maybe_pointer(inner_type_str.strip(), innermost_type), extents=extents) else: return _parse_maybe_pointer(type_name, innermost_type)
8a284083e604688c2a1eff8767b6cb31b493cb07
3,652,567
def ema_decay_schedule( base_rate: jnp.ndarray, step: jnp.ndarray, total_steps: jnp.ndarray, use_schedule: bool, ) -> jnp.ndarray: """Anneals decay rate to 1 with cosine schedule.""" if not use_schedule: return base_rate multiplier = _cosine_decay(step, total_steps, 1.) return 1. - (1. - base_rate) * multiplier
a6269162e1a93544031b241ff43e043971bec488
3,652,568
from typing import Callable def _kill_filter(mm: MergedMiningCoordinator, filter_fn: Callable[[MergedMiningStratumProtocol], bool]) -> int: """ Kill all workers that the filter `fltr` returns true for. """ count = 0 for protocol in filter(filter_fn, mm.miner_protocols.values()): count += 1 protocol.transport.abort() return count
8a73427e46a418bf1d3ba974f73992dce0f1ad8c
3,652,569
def get_node_layer_sort_preference(device_role): """Layer priority selection function Layer sort preference is designed as numeric value. This function identifies it by LAYERS_SORT_ORDER object position by default. With numeric values, the logic may be improved without changes on NeXt app side. 0(null) results undefined layer position in NeXt UI. Valid indexes start with 1. """ for i, role in enumerate(LAYERS_SORT_ORDER, start=1): if device_role == role: return i return 1
08fbdbcb272664498d3709ffc9f49dbb2042fef2
3,652,570
def is_anagram(s,t): """True if strings s and t are anagrams. """ # We can use sorted() on a string, which will give a list of characters # == will then compare two lists of characters, now sorted. return sorted(s)==sorted(t)
2b615f8180bcaa598e24c0772893c9a528bc5153
3,652,571
def f1_score(labels, predict, name=None): """ Streaming f1 score. """ predictions = tf.floor(predict + 0.5) with tf.variable_scope(name, 'f1', (labels, predictions)): epsilon = 1e-7 _, tp = tf.metrics.true_positives(labels, predictions) _, fn = tf.metrics.false_negatives(labels, predictions) _, fp = tf.metrics.false_positives(labels, predictions) precision = tf.div(tp, epsilon + tp + fp, name='precision') recall = tf.div(tp, epsilon + tp + fn, name='recall') f1 = 2.0 * precision * recall / (precision + recall + epsilon) return f1
243612cad4ca1a876ccfbccfe55fdeeed893d644
3,652,572
import requests def test_notify_matrix_plugin_fetch(mock_post, mock_get): """ API: NotifyMatrix() Server Fetch/API Tests """ # Disable Throttling to speed testing plugins.NotifyBase.request_rate_per_sec = 0 response_obj = { 'room_id': '!abc123:localhost', 'room_alias': '#abc123:localhost', 'joined_rooms': ['!abc123:localhost', '!def456:localhost'], # Login details 'access_token': 'abcd1234', 'user_id': '@apprise:localhost', 'home_server': 'localhost', } def fetch_failed(url, *args, **kwargs): # Default configuration request = mock.Mock() request.status_code = requests.codes.ok request.content = dumps(response_obj) if url.find('/rooms/') > -1: # over-ride on room query request.status_code = 403 request.content = dumps({ u'errcode': u'M_UNKNOWN', u'error': u'Internal server error', }) return request mock_get.side_effect = fetch_failed mock_post.side_effect = fetch_failed obj = plugins.NotifyMatrix( user='user', password='passwd', include_image=True) assert isinstance(obj, plugins.NotifyMatrix) is True # We would hve failed to send our image notification assert obj.send(user='test', password='passwd', body="test") is False # Do the same query with no images to fetch asset = AppriseAsset(image_path_mask=False, image_url_mask=False) obj = plugins.NotifyMatrix(user='user', password='passwd', asset=asset) assert isinstance(obj, plugins.NotifyMatrix) is True # We would hve failed to send our notification assert obj.send(user='test', password='passwd', body="test") is False # Disable Throttling to speed testing plugins.NotifyBase.request_rate_per_sec = 0 response_obj = { # Registration 'access_token': 'abcd1234', 'user_id': '@apprise:localhost', 'home_server': 'localhost', # For room joining 'room_id': '!abc123:localhost', } # Default configuration mock_get.side_effect = None mock_post.side_effect = None request = mock.Mock() request.status_code = requests.codes.ok request.content = dumps(response_obj) mock_post.return_value = request mock_get.return_value = request obj = plugins.NotifyMatrix(include_image=True) assert isinstance(obj, plugins.NotifyMatrix) is True assert obj.access_token is None assert obj._register() is True assert obj.access_token is not None # Cause retries request.status_code = 429 request.content = dumps({ 'retry_after_ms': 1, }) code, response = obj._fetch('/retry/apprise/unit/test') assert code is False request.content = dumps({ 'error': { 'retry_after_ms': 1, } }) code, response = obj._fetch('/retry/apprise/unit/test') assert code is False request.content = dumps({ 'error': {} }) code, response = obj._fetch('/retry/apprise/unit/test') assert code is False
27dde8766cdfd136104e647a5a97416a69982cb5
3,652,573
import copy def site_summary_data(query, notime=True, extra="(1=1)"): """ Summary of jobs in different states for errors page to indicate if the errors caused by massive site failures or not """ summary = [] summaryResources = [] # remove jobstatus from the query if 'jobstatus__in' in query: del query['jobstatus__in'] # remove the time window limit for active jobs table querynotime = copy.deepcopy(query) if notime: if 'modificationtime__castdate__range' in querynotime: del querynotime['modificationtime__castdate__range'] ejquery = {'jobstatus__in': ['failed', 'finished', 'closed', 'cancelled']} jvalues = ('cloud', 'computingsite', 'jobstatus', 'resourcetype', 'corecount') orderby = ('cloud', 'computingsite', 'jobstatus') summaryResources.extend( Jobsactive4.objects.filter(**querynotime).exclude(**ejquery).values(*jvalues).extra(where=[extra]).annotate(Count('jobstatus')).order_by(*orderby)) summaryResources.extend( Jobsactive4.objects.filter(**query).filter(**ejquery).values(*jvalues).extra(where=[extra]).annotate(Count('jobstatus')).order_by(*orderby)) summaryResources.extend( Jobsdefined4.objects.filter(**querynotime).values(*jvalues).extra(where=[extra]).annotate(Count('jobstatus')).order_by(*orderby)) summaryResources.extend( Jobswaiting4.objects.filter(**querynotime).values(*jvalues).extra(where=[extra]).annotate(Count('jobstatus')).order_by(*orderby)) summaryResources.extend( Jobsarchived4.objects.filter(**query).values(*jvalues).extra(where=[extra]).annotate(Count('jobstatus')).order_by(*orderby)) summaryResourcesDict = {} actualcorecount = 0 for sumS in summaryResources: if sumS['corecount'] is None: actualcorecount = 1 else: actualcorecount = sumS['corecount'] if sumS['cloud'] not in summaryResourcesDict: summaryResourcesDict[sumS['cloud']] = {} if sumS['computingsite'] not in summaryResourcesDict[sumS['cloud']]: summaryResourcesDict[sumS['cloud']][sumS['computingsite']] = {} if sumS['jobstatus'] not in summaryResourcesDict[sumS['cloud']][sumS['computingsite']]: summaryResourcesDict[sumS['cloud']][sumS['computingsite']][sumS['jobstatus']] = {} if sumS['resourcetype'] not in summaryResourcesDict[sumS['cloud']][sumS['computingsite']][sumS['jobstatus']]: summaryResourcesDict[sumS['cloud']][sumS['computingsite']][sumS['jobstatus']][sumS['resourcetype']] = { 'jobstatus__count': 0, 'corecount': actualcorecount } summaryResourcesDict[sumS['cloud']][sumS['computingsite']][sumS['jobstatus']][sumS['resourcetype']]['jobstatus__count'] += sumS['jobstatus__count'] summaryList = [] obj = {} for cloud in summaryResourcesDict.keys(): for site in summaryResourcesDict[cloud].keys(): for jobstatus in summaryResourcesDict[cloud][site].keys(): jobscount =0 obj['resource'] = {} for i, resource in enumerate(summaryResourcesDict[cloud][site][jobstatus]): if resource not in obj['resource']: obj['resource'][resource] = {} obj['resource'][resource]['jobstatus__count'] = {} if resource not in obj['resource']: obj['resource'][resource] = {} obj['resource'][resource]['corecount'] = {} obj['resource'][resource]['jobstatus__count'] = summaryResourcesDict[cloud][site][jobstatus][resource]['jobstatus__count'] obj['resource'][resource]['corecount'] = summaryResourcesDict[cloud][site][jobstatus][resource]['corecount'] jobscount += summaryResourcesDict[cloud][site][jobstatus][resource]['jobstatus__count'] if i == len(summaryResourcesDict[cloud][site][jobstatus]) - 1: obj['cloud'] = cloud obj['computingsite'] = site obj['jobstatus'] = jobstatus obj['jobstatus__count'] = jobscount summaryList.append(obj) obj = {} return summaryList
010ca33e4de15c74199fbf54c565119f493698cc
3,652,574
def Epsilon(u): """Vector symmetric gradient.""" return Sym(Grad(u.transpose()))
ed1d163ca031ada0d1645029690fa53c3d2acfa0
3,652,575
def at(seq, msg, cmd=None, *args, **kwargs): """Output the comwdg""" return translator(seq)(*COMWDG_CMD)()
dd98234261731c3048444ab7d99ec6ed34eb62f1
3,652,576
def get_directory(f): """Get a directory in the form of a list of entries.""" entries = [] while 1: line = f.readline() if not line: print '(Unexpected EOF from server)' break if line[-2:] == CRLF: line = line[:-2] elif line[-1:] in CRLF: line = line[:-1] if line == '.': break if not line: print '(Empty line from server)' continue gtype = line[0] parts = line[1:].split(TAB) if len(parts) < 4: print '(Bad line from server: %r)' % (line,) continue if len(parts) > 4: if parts[4:] != ['+']: print '(Extra info from server:', print parts[4:], ')' else: parts.append('') parts.insert(0, gtype) entries.append(parts) return entries
fdd83e040f23f5ab84e0eb7cef457dfd66159f78
3,652,577
import random import os def init(provider=None): """ Runs through a questionnaire to set up your project's deploy settings """ if os.path.exists(DEPLOY_YAML): _yellow("\nIt looks like you've already gone through the questionnaire.") cont = prompt("Do you want to go through it again and overwrite the current one?", default="No") if cont.strip().lower() == "no": return None _green("\nWelcome to the django-deployer!") _green("\nWe need to ask a few questions in order to set up your project to be deployed to a PaaS provider.") # TODO: identify the project dir based on where we find the settings.py or urls.py django_settings = prompt( "* What is your Django settings module?", default="settings", validate=_validate_django_settings ) managepy = prompt( "* Where is your manage.py file?", default="./manage.py", validate=_validate_managepy ) requirements = prompt( "* Where is your requirements.txt file?", default="requirements.txt", validate=_validate_requirements ) # TODO: confirm that the file exists # parse the requirements file and warn the user about best practices: # Django==1.4.1 # psycopg2 if they selected PostgreSQL # MySQL-python if they selected MySQL # South for database migrations # dj-database-url pyversion = prompt("* What version of Python does your app need?", default="Python2.7") # TODO: get these values by reading the settings.py file static_url = prompt("* What is your STATIC_URL?", default="/static/") media_url = prompt("* What is your MEDIA_URL?", default="/media/") if not provider: provider = prompt("* Which provider would you like to deploy to (dotcloud, appengine, stackato, openshift)?", validate=_validate_providers) # Where to place the provider specific questions site = {} additional_site = {} if provider == "appengine": applicationid = prompt("* What's your Google App Engine application ID (see https://appengine.google.com/)?", validate=r'.+') instancename = prompt("* What's the full instance ID of your Cloud SQL instance\n" "(should be in format \"projectid:instanceid\" found at https://code.google.com/apis/console/)?", validate=r'.+:.+') databasename = prompt("* What's your database name?", validate=r'.+') sdk_location = prompt("* Where is your Google App Engine SDK location?", default="/usr/local/google_appengine", validate=r'.+' # TODO: validate that this path exists ) additional_site.update({ # quotes for the yaml issue 'application_id': applicationid, 'instancename': instancename, 'databasename': databasename, 'sdk_location': sdk_location, }) # only option with Google App Engine is MySQL, so we'll just hardcode it site = { 'database': 'MySQL' } elif provider == "openshift": application_name = prompt("* What is your openshift application name?") site = { 'application_name': application_name } else: database = prompt("* What database does your app use?", default="PostgreSQL") site = { 'database': database, } # TODO: add some validation that the admin password is valid # TODO: let the user choose the admin username instead of hardcoding it to 'admin' admin_password = prompt("* What do you want to set as the admin password?", validate=_validate_admin_password ) SECRET_KEY = ''.join([random.SystemRandom().choice('abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)') for i in range(50)]) SECRET_KEY = "'" + SECRET_KEY + "'" site.update({ 'pyversion': pyversion, 'django_settings': django_settings, 'managepy': managepy, 'requirements': requirements, 'static_url': static_url, 'media_url': media_url, 'provider': provider, 'admin_password': admin_password, 'secret_key': SECRET_KEY, }) site.update(additional_site) _create_deploy_yaml(site) return site
63b3ee1771369666528438edfb7f803dbafdc9ac
3,652,578
def get_worker_status(worker): """Retrieve worker status by worker ID from redis.""" set_redis_worker_status_pool() global WORKER_STATUS_POOL # retrieve worker status r = StrictRedis(connection_pool=WORKER_STATUS_POOL) res = r.get(WORKER_STATUS_KEY_TMPL % worker) return res.decode() if hasattr(res, "decode") else res
886817f7995bc8259891b10699ec4d26587e0653
3,652,579
def lif_r_psc_aibs_converter(config, syn_tau=[5.5, 8.5, 2.8, 5.8]): """Creates a nest glif_lif_r_psc object""" coeffs = config['coeffs'] threshold_params = config['threshold_dynamics_method']['params'] reset_params = config['voltage_reset_method']['params'] params = {'V_th': coeffs['th_inf'] * config['th_inf'] * 1.0e03 + config['El_reference'] * 1.0e03, 'g': coeffs['G'] / config['R_input'] * 1.0e09, 'E_L': config['El'] * 1.0e03 + config['El_reference'] * 1.0e03, 'C_m': coeffs['C'] * config['C'] * 1.0e12, 't_ref': config['spike_cut_length'] * config['dt'] * 1.0e03, 'a_spike': threshold_params['a_spike'] * 1.0e03, 'b_spike': threshold_params['b_spike'] * 1.0e-03, 'a_reset': reset_params['a'], 'b_reset': reset_params['b'] * 1.0e03, 'tau_syn': syn_tau, # in ms 'V_dynamics_method': 'linear_exact'} return params
091e45f44f9c777dac6c2b35fd51459a7947e301
3,652,580
def get_bigwig_values(bigwig_path, chrom_name, chrom_end, chrom_start=0): """ Get the values for a genomic region of interest from a bigwig file. :param bigwig_path: Path to the bigwig file :param chrom_name: Chromosome name :param chrom_end: chromosome end :param chrom_start: chromosome start :return: Bigwig values from the region given """ with pyBigWig.open(bigwig_path) as input_bw: return np.nan_to_num(input_bw.values(chrom_name, chrom_start, chrom_end, numpy=True))
37fe5a40a5fde1ccaee7cac32d8b9beb68a65c51
3,652,581
import logging def dtool_config(files): """Provide default dtool config.""" logger = logging.getLogger(__name__) # use environment variables instead of custom config file, see # https://github.com/jic-dtool/dtoolcore/pull/17 # _original_environ = os.environ.copy() # inject configuration into environment: dtool_config = _read_json(files['dtool_config_path']) logger.debug("dtool config overrides:") _log_nested_dict(logger.debug, dtool_config) return dtool_config
4dc6df375e9f2d0bd9b099c601c3826601520f9c
3,652,582
def get_successors(state, maxwords): """Traverses state graph to find valid anagrams.""" terminal = len(state['chars']) == 0 # Check whether the state is invalid and should be pruned if not is_valid(state['anagram'], terminal, maxwords): return [] # If valid terminal state, stop search and return if terminal: return [state['anagram']] # Continue to recursively explore subsequent states next_states = [] for c in state['chars']: chars = state['chars'].copy() chars.subtract({c: 1}) if chars[c] == 0: del chars[c] next_states.append({ 'anagram': state['anagram'] + c, 'chars': chars, }) # Add an additional next state for word breaks if state['anagram'] != '' and state['anagram'][-1] != ' ': next_states.append({ 'anagram': state['anagram'] + ' ', 'chars': state['chars'], }) anagrams = [] for next_state in next_states: anagrams += get_successors(next_state, maxwords=maxwords) return anagrams
9c842edc378a781195ef41ed58c7952f216b642e
3,652,583
def read_and_parse_cdl_file(file_name): """ Reads relevant information from a "cdl" file """ if file_name is None: return None wl_map = {} bl_map = {} colclk_wl_map = {} # Parse line-by-line with open(file_name, "r") as fp: for line in fp: line = line.strip() if not line: continue if line.startswith("#"): continue fields = split_cdl_line(line) if not fields: continue # Row definition if fields[0] == "define_row": wl_idx = fields.index("-WL_range") row = 0 for pair in fields[wl_idx+1]: if isinstance(pair, list) and len(pair) == 2: wl_map[row] = (int(pair[0]), int(pair[1]),) row += 1 # Clock column definition elif fields[0] == "define_colclk_instances": wl_idx = fields.index("-WL_Port") row_idx = fields.index("-row") wl = int(fields[wl_idx+1]) row = int(fields[row_idx+1]) colclk_wl_map[row] = (wl, wl,) # Column definition elif fields[0] == "define_column": bl_idx = fields.index("-BL_range") col = 0 for pair in fields[bl_idx+1]: if isinstance(pair, list) and len(pair) == 2: bl_map[col] = (int(pair[0]), int(pair[1]),) col += 1 data = { "colclk_wl_map": colclk_wl_map, "wl_map": wl_map, "bl_map": bl_map, } return data
e1bfbb75f473932861bb2e804dd0609c62544cf3
3,652,584
def detect_outlier_at_index( srs: pd.Series, idx: int, n_samples: int, z_score_threshold: float, ) -> bool: """ Check if a value at index `idx` in a series is an outlier. The passed series is supposed to be ordered by increasing timestamps. This function - detects z-score window index boundaries with respeect to index order and number of samples - computes the z-score of the current element with respect to the z-score window values - compares the z-score to the threshold to declare the current element an outlier :param srs: input series :param idx: numerical index of a value to check :param n_samples: number of samples in z-score window :param z_score_threshold: threshold to mark a value as an outlier based on its z-score in the window :return: whether the element at index idx is an outlier """ # Set z-score window boundaries. window_first_index = max(0, idx - n_samples) # Get a series window to compute z-score for. window_srs = srs.iloc[window_first_index : idx + 1] # Compute z-score of a value at index. z_score = (srs.iloc[idx] - window_srs.mean()) / window_srs.std() # Return if a value at index is an outlier. # Done via `<=` since a series can contain None values that should be detected # as well but will result to NaN if compared to the threshold directly. is_outlier = not (abs(z_score) <= z_score_threshold) return is_outlier
65a4d7e661f6cf4641d9cd82d1bb31c5e2d21616
3,652,585
def _organize_parameter(parameter): """ Convert operation parameter message to its dict format. Args: parameter (OperationParameter): Operation parameter message. Returns: dict, operation parameter. """ parameter_result = dict() parameter_keys = [ 'mapStr', 'mapBool', 'mapInt', 'mapDouble', ] for parameter_key in parameter_keys: base_attr = getattr(parameter, parameter_key) parameter_value = dict(base_attr) # convert str 'None' to None for key, value in parameter_value.items(): if value == 'None': parameter_value[key] = None parameter_result.update(parameter_value) # drop `mapStrList` and `strValue` keys in result parameter str_list_para = dict(getattr(parameter, 'mapStrList')) result_str_list_para = dict() for key, value in str_list_para.items(): str_list_para_list = list() for str_ele in getattr(value, 'strValue'): str_list_para_list.append(str_ele) str_list_para_list = list(map(lambda x: None if x == '' else x, str_list_para_list)) result_str_list_para[key] = str_list_para_list parameter_result.update(result_str_list_para) return parameter_result
8cbd7c863bb244e71266a573ba756647d0ba13ea
3,652,586
def colorpicker(request): """ Controller for the app home page. """ my_param = MyParamColor() context = get_context(request, my_param) return render(request, 'tethys_django_form_tutorial/colorpicker.html', context)
071f587683a24c101a7963a3934c989570c0fa66
3,652,587
def translate_date(default=defaults.get('language')): """Parse/translate a date.""" d = request.args.get('date') if not d: raise RuntimeError(_('Date is mandatory.')) dest_lang = request.args.get('dest') if request.args.get('dest') else default variation = request.args.get('variation') if request.args.get('variation') else 'short' d_list = d.split('/') if request.args.get('src') == 'es': d = date(year=int(d_list[2]), month=int(d_list[1]), day=int(d_list[0])) else: d = date(*d_list) return render_template_string(source=get_date(d=d, f=variation, l=dest_lang))
ada6f4416e227414dfc6f32fc3387c8b38830e70
3,652,588
from typing import Any from typing import Union from typing import Optional def check_call( *command: Any, working_directory: Union[PathLike, str] = ".", verbose: bool = False, quoted: bool = False, **kwargs: Any, ) -> Optional[str]: """Proxy for subprocess.check_call""" return check_run( *command, working_directory=working_directory, verbose=verbose, quoted=quoted, **kwargs )
384cd78599355e694445a7c682613672bba374a1
3,652,589
from typing import Tuple def fit_client(client: Client, weights: Weights) -> Tuple[Weights, int]: """Refine weights on a single client.""" return client.fit(weights)
db8e6003f452a5147274ac6e83df7d216ca46c91
3,652,590
def _find_rpms_in_packages(koji_api, name_list, major_minor): """ Given a list of package names, look up the RPMs that are built in them. Of course, this is an inexact science to do generically; contents can vary from build to build, and multiple packages could build the same RPM name. We will first look for the latest build in the tags for the given major_minor version. If not there, we will look in brew for the package name and choose the latest build. :koji_api: existing brew connection :name_list: list of package names to search for :major_minor: minor version of OCP to search for builds in Returns: a map of package_name: set(rpm_names) """ rpms_for_package = {} tags = _tags_for_version(major_minor) for package in name_list: for tag in tags: for build in koji_api.getLatestBuilds(tag=tag, package=package): rpm_list = set(rpm["name"] for rpm in koji_api.listBuildRPMs(build["build_id"])) rpms_for_package.setdefault(package, set()).update(rpm_list) if package not in rpms_for_package: # it wasn't in our tags; look for it by name pkg_info = koji_api.getPackage(package) if not pkg_info: continue latest_builds = koji_api.listBuilds(packageID=pkg_info["id"], state=1, queryOpts=dict(limit=1)) if not latest_builds: continue rpm_list = set(rpm["name"] for rpm in koji_api.listBuildRPMs(latest_builds[0]["build_id"])) rpms_for_package[package] = set(rpm_list) return rpms_for_package
edfb55f0b6997d8f930c8d93c2ee1be1c111bcfc
3,652,591
def calculate_algorithm_tags(analyses): """ Calculate the algorithm tags (eg. "ip", True) that should be applied to a sample document based on a list of its associated analyses. :param analyses: the analyses to calculate tags for :type analyses: list :return: algorithm tags to apply to the sample document :rtype: dict """ pathoscope = False nuvs = False for analysis in analyses: if pathoscope is not True and analysis["algorithm"] in PATHOSCOPE_TASK_NAMES: pathoscope = analysis["ready"] or "ip" or pathoscope if nuvs is not True and analysis["algorithm"] == "nuvs": nuvs = analysis["ready"] or "ip" or nuvs if pathoscope is True and nuvs is True: break return { "pathoscope": pathoscope, "nuvs": nuvs }
b2b13e3a0ccd21f446c5406baa966b2c0c4c6be9
3,652,592
import json def open_json(filepath): """ Returns open .json file in python as a list. :param: .json file path :returns: list :rvalue: str """ with open(filepath) as f: notes = json.load(f) return notes
a7cae15880ee1caaaf7bfa8c1aec98f5f83debe7
3,652,593
import json def remove_user_list(): """ Endpoint to remove a specific list or a complete user --- tags: - User Methods parameters: - name: user type: string in: query required: true description: user you want to query - name: list type: string in: query required: false description: specific list that belong to a user responses: 400: description: Incorrect dbs used 200: description: Your list was deleted """ to_remove_user = request.args.get('user') if to_remove_user is not None: validation = data_validator.validate_json_for_user(to_remove_user) to_remove_list = request.args.get('list', default=None) if to_remove_list is not None: data_validator.validate_json_for_list(to_remove_list) return_object = logic_for_users_controller_delete.worker_for_delete(to_remove_user, to_remove_list) if return_object['status_code'] is 200: res = json.dumps(return_object['body'], indent=4) return res, 200 else: res = json.dumps(return_object['body'].__dict__, indent=4) return res, return_object['status_code']
b53660edd56fcf5bbe061331d5f2b8756f621dd8
3,652,594
import requests def upload(f, content_type, token, api_key): """Upload a file with the given content type to Climate This example supports files up to 5 MiB (5,242,880 bytes). Returns The upload id if the upload is successful, False otherwise. """ uri = '{}/v4/uploads'.format(api_uri) headers = { 'authorization': bearer_token(token), 'x-api-key': api_key } md5 = file.md5(f) length = file.length(f) data = { 'md5': md5, 'length': length, 'contentType': content_type } # initiate upload res = requests.post(uri, headers=headers, json=data) Logger().info(to_curl(res.request)) if res.status_code == 201: upload_id = res.json() Logger().info("Upload Id: %s" % upload_id) put_uri = '{}/{}'.format(uri, upload_id) # for this example, size is assumed to be small enough for a # single upload (less than or equal to 5 MiB) headers['content-range'] = 'bytes {}-{}/{}'.format(0, (length - 1), length) headers['content-type'] = binary_content_type f.seek(0) # send image for position in range(0, length, CHUNK_SIZE): buf = f.read(CHUNK_SIZE) headers['content-range'] = 'bytes {}-{}/{}'.format( position, position + len(buf) - 1, length) try: res = requests.put(put_uri, headers=headers, data=buf) Logger().info(headers) except Exception as e: Logger().error("Exception: %s" % e) if res.status_code == 204: return upload_id return False
d6ead1f029811ec5894848b71841fd008068cee0
3,652,595
def get_node_name_centres(nodeset: Nodeset, coordinates_field: Field, name_field: Field): """ Find mean locations of node coordinate with the same names. :param nodeset: Zinc Nodeset or NodesetGroup to search. :param coordinates_field: The coordinate field to evaluate. :param name_field: The name field to match. :return: Dict of names -> coordinates. """ components_count = coordinates_field.getNumberOfComponents() fieldmodule = nodeset.getFieldmodule() fieldcache = fieldmodule.createFieldcache() name_records = {} # name -> (coordinates, count) nodeiter = nodeset.createNodeiterator() node = nodeiter.next() while node.isValid(): fieldcache.setNode(node) name = name_field.evaluateString(fieldcache) coordinates_result, coordinates = coordinates_field.evaluateReal(fieldcache, components_count) if name and (coordinates_result == RESULT_OK): name_record = name_records.get(name) if name_record: name_centre = name_record[0] for c in range(components_count): name_centre[c] += coordinates[c] name_record[1] += 1 else: name_records[name] = [ coordinates, 1 ] node = nodeiter.next() # divide centre coordinates by count name_centres = {} for name in name_records: name_record = name_records[name] name_count = name_record[1] name_centre = name_record[0] if name_count > 1: scale = 1.0/name_count for c in range(components_count): name_centre[c] *= scale name_centres[name] = name_centre return name_centres
2dc1e670999d9491e52efce02e5d7ecd22b75226
3,652,596
from enum import Enum def pred(a): """ pred :: a -> a the predecessor of a value. For numeric types, pred subtracts 1. """ return Enum[a].pred(a)
070bf20e7b7ecd694806e78bd705e872b2fd8464
3,652,597
import sys def main(): """ Entry point of the app. """ if len(sys.argv) != 2: print(f"{sys.argv[0]} [SERVER_LIST_FILE]") return 1 return process(server_list_file=sys.argv[1])
1fefab9f590db251dd68dc703d862f2818337d14
3,652,598
def pascal_to_snake(pascal_string): """Return a snake_string for a given PascalString.""" camel_string = _pascal_to_camel(pascal_string) snake_string = _camel_to_snake(camel_string) return "".join(snake_string)
69c54fd8600878af2a8d168659a781b8389419ce
3,652,599