content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
from typing import Optional def read(db, query: Optional[dict] = None, pql: any = None, order_by: Optional[str] = None, limit: Optional[int] = None, offset: Optional[int] = None, disable_count_total: bool = False, **kwargs): """Read data from DB. Args: db (MontyCollection): DB connection query (dict or Query): Query to select items pql (PQL) Python-Query-Language to select items order_by (list): column name to sort by with format [ ( column1, 1 or -1 ), ... ] limit (int): number of items to return per a page offset (int): offset of cursor disable_count_total (bool): set True to avoid counting total number of records **kwargs: kwargs for function `pandas.read_sql_query` or `influxdb.DataFrameClient.query` Returns: (list, int): list of data and total number of records """ if limit is None: limit = 0 if offset is None: offset = 0 if pql is not None and query is not None: raise ValueError('Either query or pql can be specified') if pql: query = PQL.find(pql) if query: query = _fix_query_exists(query) if order_by is None: data = db.find(query).skip(offset).limit(limit) count_total = db.count(query) if not disable_count_total else None else: data = db.find(query).sort(order_by).skip(offset).limit(limit) count_total = db.count(query) if not disable_count_total else None else: if order_by is None: data = db.find().skip(offset).limit(limit) count_total = db.count({}) if not disable_count_total else None else: data = db.find().sort(order_by).skip(offset).limit(limit) count_total = db.count({}) if not disable_count_total else None data = list(data) count_total = count_total if count_total is not None else len(data) return data, count_total
b2153ce1b83de7f3f7dd5311a619a0623aedc01b
3,649,854
def check_horizontal(board: list) -> bool: """ Function check if in each line are unique elements. It there are function return True. False otherwise. >>> check_horizontal(["**** ****",\ "***1 ****",\ "** 3****",\ "* 4 1****",\ " 9 5 ",\ " 6 83 *",\ "3 1 **",\ " 8 2***",\ " 12 ****"]) True >>> check_horizontal(["**** ****",\ "***1 ****",\ "** 3****",\ "* 4 1****",\ " 9 5 ",\ " 6 83 *",\ "3 1 **",\ " 8 2***",\ "112 ****"]) False """ unique = True for line in board: if not check_unique(list(line)): unique = False break return unique
0769f0821637c78c1a18e387eb64d6234a0ced5c
3,649,855
import math def update_events(dt: float, pos_x: float, pos_y: float, dir_x: float, dir_y: float, plane_x: float, plane_y: float): """ Updates player position in response to user input. """ for e in pygame.event.get(): if e.type == pygame.KEYDOWN: if e.key == pygame.K_ESCAPE: pygame.quit() raise SystemExit elif e.type == pygame.QUIT: pygame.quit() raise SystemExit move_speed: float = dt * 5.0 rot_speed: float = dt * 3.0 pressed = pygame.key.get_pressed() new_xpos_plus: int = int(pos_x + dir_x * move_speed) new_ypos_plus: int = int(pos_y + dir_y * move_speed) if pressed[pygame.K_UP]: if not WORLD_MAP[new_xpos_plus][int(pos_y)]: pos_x += dir_x * move_speed if not WORLD_MAP[int(pos_x)][new_ypos_plus]: pos_y += dir_y * move_speed new_xpos_minus: int = int(pos_x - dir_x * move_speed) new_ypos_minus: int = int(pos_y - dir_y * move_speed) if pressed[pygame.K_DOWN]: if not WORLD_MAP[new_xpos_minus][int(pos_y)]: pos_x -= dir_x * move_speed if not WORLD_MAP[int(pos_x)][new_ypos_minus]: pos_y -= dir_y * move_speed if pressed[pygame.K_RIGHT]: old_dir_x: float = dir_x dir_x = dir_x * math.cos(-rot_speed) - dir_y * math.sin(-rot_speed) dir_y = old_dir_x * math.sin(-rot_speed) + dir_y * math.cos(-rot_speed) old_plane_x: float = plane_x plane_x = plane_x * math.cos(-rot_speed) - plane_y * math.sin(-rot_speed) plane_y = old_plane_x * math.sin(-rot_speed) + plane_y * math.cos(-rot_speed) if pressed[pygame.K_LEFT]: old_dir_x: float = dir_x dir_x = dir_x * math.cos(rot_speed) - dir_y * math.sin(rot_speed) dir_y = old_dir_x * math.sin(rot_speed) + dir_y * math.cos(rot_speed) old_plane_x: float = plane_x plane_x = plane_x * math.cos(rot_speed) - plane_y * math.sin(rot_speed) plane_y = old_plane_x * math.sin(rot_speed) + plane_y * math.cos(rot_speed) return pos_x, pos_y, dir_x, dir_y, plane_x, plane_y
e43cc7a2e6ab3f35637bf4ab37baefed96279656
3,649,857
def deg_to_xyz(lat_deg, lon_deg, altitude): """ http://www.oc.nps.edu/oc2902w/coord/geodesy.js lat,lon,altitude to xyz vector input: lat_deg geodetic latitude in deg lon_deg longitude in deg altitude altitude in km output: returns vector x 3 long ECEF in km """ clat = cos(radians(lat_deg)) slat = sin(radians(lat_deg)) clon = cos(radians(lon_deg)) slon = sin(radians(lon_deg)) _, rn, _ = radcur(lat_deg) ecc = EARTH_Ecc esq = ecc * ecc x = (rn + altitude) * clat * clon y = (rn + altitude) * clat * slon z = ((1 - esq) * rn + altitude) * slat return [x, y, z]
0493132eb0658026727d7a292862fcf2d5d6d48b
3,649,858
def remove_unused_colours(ip, line_colours): """ >>> remove_unused_colours(np.array([[0,0,3], [1,5,1], [2,0,6], [2,2,2],[4,4,0]]), {2, 4}) array([[0, 0, 0], [0, 0, 0], [2, 0, 0], [2, 2, 2], [4, 4, 0]]) """ #get a list of all unique colours all_colours = list(np.unique(ip)) #remove back ground colour 0 all_colours.remove(0) #remove the line colours for line_colour in line_colours: all_colours.remove(line_colour) #for all other colours, (i.e. those not back ground colour of zero of line colours) turn to back ground colour = 0 for each_colour in all_colours: ip[np.where(ip == each_colour)]= 0 return ip
7e80cbb2e3e9ac86da4cf7d6e99a6d9bf2edeead
3,649,859
def extract_info(spec): """Extract information from the instance SPEC.""" info = {} info['name'] = spec.get('InstanceTypeId') info['cpu'] = spec.get('CpuCoreCount') info['memory'] = spec.get('MemorySize') info['nic_count'] = spec.get('EniQuantity') info['disk_quantity'] = spec.get('DiskQuantity') if spec.get('LocalStorageAmount'): info['disk_count'] = spec.get('LocalStorageAmount') info['disk_size'] = spec.get('LocalStorageCapacity') info['disk_type'] = spec.get('LocalStorageCategory') # Some special families use NVMe driver for local disks _families = ['ecs.i3', 'ecs.i3g'] if spec.get('InstanceTypeFamily') in _families: info['local_disk_driver'] = 'nvme' else: info['local_disk_driver'] = 'virtio_blk' # Some special families use NVMe driver for cloud disks _families = ['ecs.g7se'] if spec.get('InstanceTypeFamily') in _families: info['cloud_disk_driver'] = 'nvme' else: info['cloud_disk_driver'] = 'virtio_blk' # Some security-enhanced instance families have 50% encrypted memory _families = ['ecs.c7t', 'ecs.g7t', 'ecs.r7t'] if spec.get('InstanceTypeFamily') in _families: info['memory'] = int(info['memory'] * 0.5) return info
7f93dcad1a8d99743a30d441dad64c2b9af08037
3,649,860
def sum_values(**d): # doc string 예제. git commit 메시지 쓰듯이 쓰면 된다 """dict의 values를 더한 값을 리턴 key는 뭐가 들어오던지 말던지 신경 안 쓴다. """ return sum_func(*d.values())
29b90a04760376d2b8f6844994a7341fa742f05d
3,649,861
def parse_title(title): """Parse strings from lineageos json :param title: format should be `code - brand phone` """ split_datum = title.split(' - ') split_name = split_datum[1].split(' ') device = split_datum[0] brand = split_name[0] name = ' '.join(split_name[1:]) return [brand, name, device, device]
c3783ab36f4f7e021bdd5f0f781bb289ab2d458f
3,649,862
def addCountersTransactions(b): """Step 2 : The above list with count as the last element should be [ [1, 1, 0, 1], [0, 0, 0, 4], [1, 1, 1, 3] ] converted to the following way [ [1, 1, 0, 1, 0], [1, 1, 1, 3, 4] ] with cnt 1 and cnt 2 for anti-mirroring technique Algorithm ========= Check for the first element in the listitem. If it is 1, cnt2 = 0 If it is 0, Not the values of the list except the last item (count) Check the Not valued list is matching with existing 1valued list If it is matching, then add the last count to cnt2 of that matched list else add a new entry with last count as cnt2 and cnt1 as 0 """ # n = list(b) # x = b[:] # cnt1 = [] # cnt2 = [] temp_list2 = [] t1list = [] zlist = [] for i in range(len(b)): #print b[i], b[i][0] if b[i][0] == 1: b[i] = b[i] + [0] #adding this list item to another list zlist = remove_counts(b[i],t1list) #print 'zlist = ',zlist temp_list2.append(b[i]) #print 'temp_list appended ', temp_list #print b if b[i][0] == 0: #print 'Found an item that starts with 0' for item in range(len(b[i])): #print b[i][item],i,item, len(b[i]) if b[i][item] == 0: #print 'Found a 0 item, change it to 1' b[i][item] = 1 else: #print 'Found a 1 item, change it to 0' if item != len(b[i])-1: #print 'Not the last element, so it is changed here (NOT)' b[i][item] = 0 else: b[i] = b[i] + [b[i][item]] b[i][item] = 0 #print 'Changed cos' #print 'Present list item inside loop is ', b[i] #print 'Present list item is ', b[i] temp = b[i] #print temp tlist = [] telist = remove_counts(temp,tlist) temp_list2.append(b[i]) #########print 'temp_list appended \n', temp_list2 #print 'telist = ',telist #print 'y is ', y # if telist in temp_list2: # print 'HEY FOUND HIM' # #b[i] = b[i] + [b[i][item]] # else: # print'Else not found' return temp_list2 '''Step 3: Do {I1} {I2} and {In} Then check for support and prune the list Do the above step for all the subsets and prune with support To compute {I1}, {I2}, ... {In} 1. For loop i to len(items) 2. Check for ith item in lists, If it is 1, Sum up Cnt1 and put it in Ii If it is 0, Sum up Cnt2 and put it in Ii 2. Print all Ii's '''
44fb81280fc7540c796e6f8308219147993c6b7a
3,649,863
import typing import torch def aggregate_layers( hidden_states: dict, mode: typing.Union[str, typing.Callable] ) -> np.ndarray: """Input a hidden states dictionary (key = layer, value = 2D array of n_tokens x emb_dim) Args: hidden_states (dict): key = layer (int), value = 2D PyTorch tensor of shape (n_tokens, emb_dim) Raises: NotImplementedError Returns: dict: key = layer, value = array of emb_dim """ states_layers = dict() emb_aggregation = mode # iterate over layers for i in hidden_states.keys(): if emb_aggregation == "last": state = hidden_states[i][-1, :] # get last token elif emb_aggregation == "first": state = hidden_states[i][0, :] # get first token elif emb_aggregation == "mean": state = torch.mean(hidden_states[i], dim=0) # mean over tokens elif emb_aggregation == "median": state = torch.median(hidden_states[i], dim=0) # median over tokens elif emb_aggregation == "sum": state = torch.sum(hidden_states[i], dim=0) # sum over tokens elif emb_aggregation == "all" or emb_aggregation == None: state = hidden_states elif callable(emb_aggregation): state = emb_aggregation(hidden_states[i]) else: raise NotImplementedError( f"Sentence embedding method [{emb_aggregation}] not implemented" ) states_layers[i] = state.detach().cpu().numpy() return states_layers
21c91a4c031c561b6776a604aa653c3880d69b15
3,649,864
def get_bg_stat_info(int_faces, adj_list, face_inds, face_inds_new): """ Out put list of faces and list of verts for each stat. """ stat_faces = [] stat_verts = [] for k in range(len(int_faces)): # Check if face already exists. if int_faces[k] != 0: continue else: # See if there are any adjacent faces. for j in range(len(adj_list[k])): if int_faces[adj_list[k][j]] != 0 and int_faces[adj_list[k][j-1]] != 0: #stat_faces.append([k, adj_list[k][j], adj_list[k][j-1]]) # Find relevant verticies stat_verts_new = find_vertex_ind(k, adj_list[k][j], adj_list[k][j-1], face_inds, face_inds_new) #remaining_verts = set(face_inds_new[k]) #remaining_verts.remove(vert_0) #remaining_verts = list(remaining_verts) #stat_verts_new = [vert_0] #print stat_verts_new, vert_0, remaining_verts, k, j if stat_verts_new != None: stat_faces.append([k, adj_list[k][j], adj_list[k][j-1]]) stat_verts.append(stat_verts_new) #assert len(stat_verts_new) == 3, "ERROR: stat_verts incorectly computed" return stat_faces, stat_verts
262130ffcb4fe474ece01ed6a63705efdaac360c
3,649,865
def config_data() -> dict: """Dummy config data.""" return { "rabbit_connection": { "user": "guest", "passwd": "guest", "host": "localhost", "port": 5672, "vhost": "/", }, "queues": {"my_queue": {"settings": {"durable": True}, "limit": 0}}, "queue_limits": {0: ["my_queue"], 1: ["my_other_queue"]}, "notifiers": { "smtp": { "host": "localhost", "user": None, "passwd": None, "from_addr": "[email protected]", "to_addr": ["[email protected]"], "subject": "AMQPeek - RMQ Monitor", }, "slack": {"api_key": "apikey", "username": "ampeek", "channel": "#general"}, }, }
cbbed3baf79b5928be47d3d00c747ac6be625ae5
3,649,867
def copy_linear(net, net_old_dict): """ Copy linear layers stored within net_old_dict to net. """ net.linear.weight.data = net_old_dict["linears.0.weight"].data net.linear.bias.data = net_old_dict["linears.0.bias"].data return net
8ba7f40e72b65ebef9948025b3404cbc5a660960
3,649,868
async def read_book(request: Request) -> dict: """Read single book.""" data = await request.json() query = readers_books.insert().values(**data) last_record_id = await database.execute(query) return {"id": last_record_id}
e2ec15df60e2e8a5974c16688a9e5caa8c4452d8
3,649,869
def setup_dev(): """Runs the set-up needed for local development.""" return setup_general()
889153114ffecd74c50530e867a03128279fc26f
3,649,870
def countAllAnnotationLines( mpqa_dir="mpqa_dataprocessing\\database.mpqa.cleaned", doclist_filename='doclist.2.0' ): """ It counts all annotation lines available in all documents of a corpus. :return: an integer """ m2d = mpqa2_to_dict(mpqa_dir=mpqa_dir) mpqadict = m2d.corpus_to_dict(doclist_filename=doclist_filename) count = 0 for doc in mpqadict['doclist']: # Iterate over all docs count += len(mpqadict['docs'][doc]['annotations'].keys()) return count
2a1c981db125db163e072eb495144be2b004a096
3,649,871
def convergence(report: Report, **kwargs): """ Function that displays the convergence using a antco.report.Report object. Parameters ---------- report: antco.report.Report antco.report.Report instance returned by the antco.run() function. **kwargs figsize: tuple, default=(8, 5) Tuple indicating the size of the figure. title: str, default='Convergence' Plot title. alpha_grid: float, default=0.7 Transparency of the grid lines of the plot. alpha_graph: float, default=0.2 Transparency of the lines of the plot. save_plot: str, default=None File in which to save the generated graph, if no value is provided the graph will not be saved. Returns ------- :matplotlib.pyplot.Fig Figure with convergence graph. """ def _draw(ax_, params_: dict, alpha_: float, color_: str, label_: str, linestyle_: str, linewidth_: int): x = np.arange(len(params_)) y = [np.mean(vals) for vals in params_.values()] ax_.plot(x, y, color=color_, label=label_, alpha=alpha_, linestyle=linestyle_, linewidth=linewidth_) return ax_ # Check that the parameters necessary to represent convergence can be obtained. try: report.get('mean_cost') except Exception: raise Exception( 'The Report instance does not have the "mean_cost" value, make sure you have saved the "mean_cost" value ' 'throughout the interactions of the algorithm using the method report.save("mean_cost").') try: report.get('max_cost') except Exception: raise Exception( 'The Report instance does not have the "max_cost" value, make sure you have saved the "max_cost" value ' 'throughout the interactions of the algorithm using the method report.save("max_cost").') parameters = { 'mean_cost': {'color': '#85C1E9', 'label': 'Average cost', 'linestyle': 'solid', 'linewidth': 3}, 'max_cost': {'color': '#AF7AC5', 'label': 'Max cost', 'linestyle': 'dashed', 'linewidth': 2}} # Get optional arguments figsize = kwargs.get('figsize', (8, 5)) title = kwargs.get('title', 'Convergence') alpha_graph = kwargs.get('alpha_graph', 0.7) alpha_grid = kwargs.get('alpha_grid', 0.2) save_plot = kwargs.get('save_plot', None) fig, ax = plt.subplots(figsize=figsize) for param, values in parameters.items(): ax = _draw(ax, report.get(param), alpha_graph, values['color'], values['label'], values['linestyle'], values['linewidth']) ax.set_xlabel('Iteration') ax.set_ylabel('Cost') ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) ax.grid(alpha=alpha_grid) ax.xaxis.set_major_locator(MaxNLocator(integer=True)) ax.legend(loc='upper center', bbox_to_anchor=(0.5, 0.1), fancybox=True, shadow=True, ncol=len(parameters)) ax.set_title(title) if save_plot is not None: plt.savefig(save_plot, dpi=150) return fig
523e64b68d88d705f22a5c31faecee51e5e59b2d
3,649,872
def ca_set_container_policies(h_session, h_container, policies): """ Set multiple container policies. :param int h_session: Session handle :param h_container: target container handle :param policies: dict of policy ID ints and value ints :return: result code """ h_sess = CK_SESSION_HANDLE(h_session) container_id = CK_ULONG(h_container) pol_id_list = list(policies.keys()) pol_val_list = list(policies.values()) pol_ids = AutoCArray(data=pol_id_list, ctype=CK_ULONG) pol_vals = AutoCArray(data=pol_val_list, ctype=CK_ULONG) ret = CA_SetContainerPolicies( h_sess, container_id, pol_ids.size.contents, pol_ids.array, pol_vals.array ) return ret
b4c56108d137d8caa6fa65f6ffcfd8c649af1840
3,649,874
def extend(arr, num=1, log=True, append=False): """Extend the given array by extraplation. Arguments --------- arr <flt>[N] : array to extend num <int> : number of points to add (on each side, if ``both``) log <bool> : extrapolate in log-space append <bool> : add the extended points onto the given array Returns ------- retval <flt>[M] : extension (or input ``arr`` with extension added, if ``append``). """ if(log): useArr = np.log10(arr) else: useArr = np.array(arr) steps = np.arange(1, num+1) left = useArr[0] + (useArr[0] - useArr[1])*steps[::-1].squeeze() rigt = useArr[-1] + (useArr[-1] - useArr[-2])*steps.squeeze() if(log): left = np.power(10.0, left) rigt = np.power(10.0, rigt) if(append): return np.hstack([left, arr, rigt]) return [left, rigt]
e5f8b7fea74b1a92dba19aed527be1c823c058f9
3,649,876
def do_associate_favorite(parser, token): """ @object - object to return the favorite count for """ try: tag, node, user = token.split_contents() except ValueError: raise template.TemplateSyntaxError, "%r tag requires one argument" % token.contents.split()[0] return AssociateFavorite(node, user)
90ed604936a0b7639adf356911a803ae755a9653
3,649,878
from typing import Type from pydantic import BaseModel # noqa: E0611 from typing import Tuple from typing import List def parse_cookie(cookie: Type[BaseModel]) -> Tuple[List[Parameter], dict]: """Parse cookie model""" schema = get_schema(cookie) parameters = [] components_schemas = dict() properties = schema.get('properties') definitions = schema.get('definitions') if properties: for name, value in properties.items(): data = { "name": name, "in": ParameterInType.cookie, "description": value.get("description"), "required": name in schema.get("required", []), "schema": Schema(**value) } parameters.append(Parameter(**data)) if definitions: for name, value in definitions.items(): components_schemas[name] = Schema(**value) return parameters, components_schemas
797c876676b1e002b4e54a7943f77301ed82efb1
3,649,879
def bdev_nvme_add_error_injection(client, name, opc, cmd_type, do_not_submit, timeout_in_us, err_count, sct, sc): """Add error injection Args: name: Name of the operating NVMe controller opc: Opcode of the NVMe command cmd_type: Type of NVMe command. Valid values are: admin, io do_not_submit: Do not submit commands to the controller timeout_in_us: Wait specified microseconds when do_not_submit is true err_count: Number of matching NVMe commands to inject errors sct: NVMe status code type sc: NVMe status code Returns: True on success, RPC error otherwise """ params = {'name': name, 'opc': opc, 'cmd_type': cmd_type} if do_not_submit: params['do_not_submit'] = do_not_submit if timeout_in_us: params['timeout_in_us'] = timeout_in_us if err_count: params['err_count'] = err_count if sct: params['sct'] = sct if sc: params['sc'] = sc return client.call('bdev_nvme_add_error_injection', params)
3833256e71f47a49eef2643bf8c244308795a0b1
3,649,880
def tetheredYN(L0, KxStar, Rtot, Kav, fully=True): """ Compare tethered (bispecific) vs monovalent """ if fully: return polyc(L0, KxStar, Rtot, [[1, 1]], [1.0], Kav)[2][0] / \ polyfc(L0 * 2, KxStar, 1, Rtot, [0.5, 0.5], Kav)[0] else: return polyc(L0, KxStar, Rtot, [[1, 1]], [1.0], Kav)[0][0] / \ polyfc(L0 * 2, KxStar, 1, Rtot, [0.5, 0.5], Kav)[0]
a8a4be3c7b217164d690eed29eb8ab1acca45e05
3,649,881
def valid_payload(request): """ Fixture that yields valid data payload values. """ return request.param
0c02e52a02b9089e4832ccf2e9c37fc2d355e893
3,649,882
def prune_deg_one_nodes(sampled_graph): """ prune out degree one nodes from graph """ deg_one_nodes = [] for v in sampled_graph.nodes(): if sampled_graph.degree(v) == 1: deg_one_nodes.append(v) for v in deg_one_nodes: sampled_graph.remove_node(v) return sampled_graph
c4df72a66c6fb57d5d42a1b877a846338f32f42a
3,649,885
def reduce_clauses(clauses): """ Reduce a clause set by eliminating redundant clauses """ used = [] unexplored = clauses while unexplored: cl, unexplored = unexplored[0], unexplored[1:] if not subsume(used, cl) and not subsume(unexplored,cl): used.append(cl) return used
d28fc08f214a04aac433827560251143204fa290
3,649,886
import numpy as np def get_np_io(arr, **kwargs) -> BytesIO: """Get the numpy object as bytes. :param arr: Array-like :param kwargs: Additional kwargs to pass to :func:`numpy.save`. :return: A bytes object that can be used as a file. """ bio = BytesIO() np.save(bio, arr, **kwargs) bio.seek(0) return bio
278a452dc97d8ca74398771bd34545c7505c191f
3,649,888
from typing import Mapping def get_deep_attr(obj, keys): """ Helper for DeepKey""" cur = obj for k in keys: if isinstance(cur, Mapping) and k in cur: cur = cur[k] continue else: try: cur = getattr(cur, k) continue except AttributeError: pass raise DataError(error='Unexistent key') return cur
f7e3af73c2e45a5448e882136811b6898cc45e29
3,649,889
def fork_node_item_inline_editor(item, view, pos=None) -> bool: """Text edit support for Named items.""" @transactional def update_text(text): item.subject.joinSpec = text return True def escape(): item.subject.joinSpec = join_spec subject = item.subject if not subject: return False join_spec = subject.joinSpec or "" box = view.get_item_bounding_box(view.hovered_item) entry = popup_entry(join_spec, update_text) show_popover(entry, view, box, escape) return True
7c4b0bdbe321bab427e22440e7225539262806f2
3,649,890
def get_selfies_alphabet(smiles_list): """Returns a sorted list of all SELFIES tokens required to build a SELFIES string for each molecule.""" selfies_list = list(map(sf.encoder, smiles_list)) all_selfies_symbols = sf.get_alphabet_from_selfies(selfies_list) all_selfies_symbols.add('[nop]') selfies_alphabet = list(all_selfies_symbols) selfies_alphabet.sort() return selfies_alphabet
f18206e0c4c03ab75db3efd693655a1a1cacb9e2
3,649,891
import datasets import random def get_face_angular_dataloader(dataset_path, input_size, batch_size, num_workers, train_portion=1): """ Prepare dataset for training and evaluating pipeline Args: dataset_path (str) input_size (int) batch_size (int) num_workers (int) train_portion (float) Return: train_loader (torch.utils.data.DataLoader) val_loader (torch.utils.data.DataLoader) test_loader (torch.utils.data.DataLoader) """ train_transform = transforms.Compose([ transforms.Resize(input_size), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(FACE_MEAN, FACE_STD) ]) test_transform = transforms.Compose([ transforms.ToTensor(), ]) train_dataset = datasets.ImageFolder(root=osp.join(dataset_path, "face", "train", "CASIA-WebFace"), transform=train_transform) test_dataset = PairFaceDataset(root=osp.join(dataset_path, "face", "test", "LFW"), transform=test_transform) if train_portion != 1: train_len = len(train_dataset) indices = list(range(train_len)) random.shuffle(indices) split = int(np.floor(train_portion * train_len)) train_idx, val_idx = indices[:split], indices[split:] train_sampler = SubsetRandomSampler(train_idx) val_sampler = SubsetRandomSampler(val_idx) train_loader = DataLoader( train_dataset, num_workers=num_workers, batch_size=batch_size, sampler=train_sampler, pin_memory=True) val_loader = DataLoader( train_dataset, num_workers=num_workers, batch_size=batch_size, sampler=val_sampler, pin_memory=True) else: train_loader = DataLoader( train_dataset, batch_size=batch_size, num_workers=num_workers, shuffle=True) val_loader = None test_loader = DataLoader( dataset=test_dataset, shuffle=False, batch_size=batch_size, num_workers=num_workers) return train_loader, val_loader, test_loader
5aa6d62c98ca942e79bbfaca192b11353a0a2fe1
3,649,892
def compile_sql_numericize(element, compiler, **kw): """ Turn common number formatting into a number. use metric abbreviations, remove stuff like $, etc. """ arg, = list(element.clauses) def sql_only_numeric(text): # Returns substring of numeric values only (-, ., numbers, scientific notation) # return func.nullif(func.substring(text, r'([+\-]?(\d\.?\d*[Ee][+\-]?\d+|(\d+\.\d*|\d*\.\d+)|\d+))'), '') return func.coalesce( func.substring(text, r'([+\-]?(\d+\.?\d*[Ee][+\-]?\d+))'), # check for valid scientific notation func.nullif( func.regexp_replace(text, r'[^0-9\.\+\-]+', '', 'g'), # remove all the non-numeric characters '' ) ) return compiler.process(sql_only_numeric(arg), **kw)
ef8631e98cd74b276ad00731c75a5c1c907eb303
3,649,893
def run_sgd(model, epochs): """ Runs SGD for a predefined number of epochs and saves the resulting model. """ print("Training full network") weights_rand_init = model.optimize(epochs=epochs) # weights_rand_init = model.optimize(epochs=epochs, batch_size=55000, learning_rate=0.1) print("Model optimized!!!") return [model.get_model_weights(), weights_rand_init]
14c6fd1ffa8aab3a783b5738093d69771d036411
3,649,894
def get_all_outcome_links_for_context_courses(request_ctx, course_id, outcome_style=None, outcome_group_style=None, per_page=None, **request_kwargs): """ :param request_ctx: The request context :type request_ctx: :class:RequestContext :param course_id: (required) ID :type course_id: string :param outcome_style: (optional) The detail level of the outcomes. Defaults to "abbrev". Specify "full" for more information. :type outcome_style: string or None :param outcome_group_style: (optional) The detail level of the outcome groups. Defaults to "abbrev". Specify "full" for more information. :type outcome_group_style: string or None :param per_page: (optional) Set how many results canvas should return, defaults to config.LIMIT_PER_PAGE :type per_page: integer or None :return: Get all outcome links for context :rtype: requests.Response (with array data) """ if per_page is None: per_page = request_ctx.per_page path = '/v1/courses/{course_id}/outcome_group_links' payload = { 'outcome_style' : outcome_style, 'outcome_group_style' : outcome_group_style, 'per_page' : per_page, } url = request_ctx.base_api_url + path.format(course_id=course_id) response = client.get(request_ctx, url, payload=payload, **request_kwargs) return response
78026eff6aef5a486d920a888d4dfdabc94bfc00
3,649,895
def GetContentResourceSpec(): """Gets Content resource spec.""" return concepts.ResourceSpec( 'dataplex.projects.locations.lakes.content', resource_name='content', projectsId=concepts.DEFAULT_PROJECT_ATTRIBUTE_CONFIG, locationsId=LocationAttributeConfig(), lakesId=LakeAttributeConfig(), contentId=ContentAttributeConfig())
434cb149fdeff6154928a4514d1f6241d44c85a7
3,649,896
from typing import Optional def softplus( x: oneflow._oneflow_internal.BlobDesc, name: Optional[str] = None ) -> oneflow._oneflow_internal.BlobDesc: """This operator computes the softplus value of Blob. The equation is: .. math:: out = log(e^x+1) Args: x (oneflow._oneflow_internal.BlobDesc): A Blob name (Optional[str], optional): The name for the operation. Defaults to None. Returns: oneflow._oneflow_internal.BlobDesc: The result Blob For example: .. code-block:: python import oneflow.compatible.single_client as flow import numpy as np import oneflow.compatible.single_client.typing as tp @flow.global_function() def softplus_Job(x: tp.Numpy.Placeholder((3,)) ) -> tp.Numpy: return flow.math.softplus(x) x = np.array([-1, 0, 1]).astype(np.float32) out = softplus_Job(x) # out [0.31326166 0.6931472 1.3132616 ] """ return build_unary_elemwise_math_op("softplus", x, name)
2bef1db640e0e5b3e9971b1d9b4fbe23e4eba808
3,649,897
from typing import Tuple from typing import List def diff_gcs_directories( base_directory_url: str, target_directory_url: str ) -> Tuple[List[str], List[str], List[str]]: """ Compare objects under different GCS prefixes. :param base_directory_url: URL for base directory :param target_directory_url: URL for target directory :returns: Tuple with 3 elements: List of objects in base directory that are not present in target directory List of objects in target directory that are not present in base directory List of objects with different content in base and target directory """ base = urlparse(base_directory_url) target = urlparse(target_directory_url) if base.scheme != "gs": raise ValueError("base_directory_url must be a gs:// URL") if target.scheme != "gs": raise ValueError("target_directory_url must be a gs:// URL") client = Client(project=None) base_blobs = client.list_blobs(base.hostname, prefix=base.path.strip("/") + "/") base_blobs = { _remove_prefix(blob.name, base.path.strip("/")): blob for blob in base_blobs } missing_objects = set(base_blobs.keys()) extra_objects = [] changed_objects = [] target_blobs = client.list_blobs( target.hostname, prefix=target.path.strip("/") + "/" ) for blob in target_blobs: key = _remove_prefix(blob.name, target.path.strip("/")) missing_objects.discard(key) try: if blob.md5_hash != base_blobs[key].md5_hash: changed_objects.append(key) except KeyError: extra_objects.append(key) return GCSDiffResult(list(missing_objects), extra_objects, changed_objects)
1e7727fb352d320c79de16d6efdd6f46120e89d7
3,649,898
from typing import List def load_compatible_apps(file_name: str) -> List[Product]: """Loads from file and from github and merges results""" local_list = load_installable_apps_from_file(file_name) try: github_list = load_compatible_apps_from_github() except (URLError, IOError): github_list = [] return list(set(local_list) | set(github_list))
efbde4a2c2f4589bc73497017d89631e0333081c
3,649,899
import gettext def delete(page_id): """Delete a page.""" page = _get_page(page_id) page_name = page.name site_id = page.site_id success, event = page_service.delete_page(page.id, initiator_id=g.user.id) if not success: flash_error( gettext('Page "%(name)s" could not be deleted.', name=page_name) ) return url_for('.view_current_version', page_id=page.id) flash_success(gettext('Page "%(name)s" has been deleted.', name=page_name)) page_signals.page_deleted.send(None, event=event) return url_for('.index_for_site', site_id=site_id)
9c858d19b27f42e71d6aa19ae636e282925f0492
3,649,900
def drift_var(): """ Concept drift: 1. n_drifts 2. concept_sigmoid_spacing (None for sudden) 3. incremental [True] or gradual [False] 4. recurring [True] or non-recurring [False] """ return [(10, None, False, False), (10, 5, False, False), (10, 5, True, False)]
34f2c55f928a16cca8c52307853ab32f56ecd954
3,649,901
def get_generators(matrix): """ Given a matrix in H-rep, gets the v-rep Turns out, the code is the same as get_inequalities, since lrs determines the directions based on the input. Left like this for readability. """ return get_inequalities(matrix)
ab5c2059544842d5010cae1211acc7da9e021994
3,649,902
def num_instances(diff, flag=False): """returns the number of times the mother and daughter have pallindromic ages in their lives, given the difference in age. If flag==True, prints the details.""" daughter = 0 count = 0 while True: mother = daughter + diff if are_reversed(daughter, mother) or are_reversed(daughter, mother+1): count = count + 1 if flag: print daughter, mother if mother > 120: break daughter = daughter + 1 return count
84d39159c594b25aabfc9efceef0d13ebc15a817
3,649,903
import inspect def get_dipy_workflows(module): """Search for DIPY workflow class. Parameters ---------- module : object module object Returns ------- l_wkflw : list of tuple This a list of tuple containing 2 elements: Worflow name, Workflow class obj Examples -------- >>> from dipy.workflows import align # doctest: +SKIP >>> get_dipy_workflows(align) # doctest: +SKIP """ return [ (m, obj) for m, obj in inspect.getmembers(module) if inspect.isclass(obj) and issubclass(obj, module.Workflow) and m not in SKIP_WORKFLOWS_LIST ]
a119d6defd6c741777c3fa2f1add6bc700357dbd
3,649,904
def azel_fit(coo_ref, coo_meas, nsamp=2000, ntune=2000, target_accept=0.95, random_seed=8675309): """ Fit full az/el pointing model using PyMC3. The terms are analogous to those used by TPOINT(tm). This fit includes the eight normal terms used in `~pytelpoint.transform.azel` with additional terms, az_sigma and el_sigma, that describes the intrinsic scatter. Parameters ---------- coo_ref : `~astropy.coordinates.SkyCoord` instance Reference coordinates coo_meas : `~astropy.coordinates.SkyCoord` instance Measured coordinates nsamp : int (default: 2000) Number of inference samples ntune : int (default: 2000) Number of burn-in samples target_accept : float (default: 0.95) Sets acceptance probability target for determining step size random_seed : int (default: 8675309) Seed number for random number generator Returns ------- idata : `~arviz.InferenceData` Inference data from the pointing model """ pointing_model = pm.Model() deg2rad = np.pi / 180 with pointing_model: # az/el are the astrometric reference values. az_raw/el_raw are the observed encoder values. az = pm.Data('az', coo_ref.az) el = pm.Data('el', coo_ref.alt) az_raw = pm.Data('az_raw', coo_meas.az) el_raw = pm.Data('el_raw', coo_meas.alt) ia = pm.Normal('ia', 1200., 100) ie = pm.Normal('ie', 0., 50.) an = pm.Normal('an', 0., 20.) aw = pm.Normal('aw', 0., 20.) ca = pm.Normal('ca', 0., 30.) npae = pm.Normal('npae', 0., 30.) tf = pm.Normal('tf', 0., 50.) tx = pm.Normal('tx', 0., 20.) az_sigma = pm.HalfNormal('az_sigma', sigma=1.) el_sigma = pm.HalfNormal('el_sigma', sigma=1.) daz = -ia daz -= an * pm.math.sin(deg2rad * az) * pm.math.tan(deg2rad * el) daz -= aw * pm.math.cos(deg2rad * az) * pm.math.tan(deg2rad * el) daz -= ca / pm.math.cos(deg2rad * el) daz -= npae * pm.math.tan(deg2rad * el) dalt = ie dalt -= an * pm.math.cos(deg2rad * az) dalt += aw * pm.math.sin(deg2rad * az) dalt -= tf * pm.math.cos(deg2rad * el) dalt -= tx / pm.math.tan(deg2rad * el) _ = pm.Normal('azerr', mu=0., sigma=az_sigma/3600, observed=pm.math.cos(deg2rad * el) * (az - (az_raw + daz/3600.))) _ = pm.Normal('elerr', mu=0., sigma=el_sigma/3600, observed=el - (el_raw + dalt/3600.)) idata = pm.sample( nsamp, tune=ntune, target_accept=target_accept, return_inferencedata=True, random_seed=random_seed ) return idata
22c3989049933b55643d11bfb2aebeb4c629ed60
3,649,905
def geojson_to_labels(geojson_dict, crs_transformer, extent=None): """Convert GeoJSON to ObjectDetectionLabels object. If extent is provided, filter out the boxes that lie "more than a little bit" outside the extent. Args: geojson_dict: dict in GeoJSON format crs_transformer: used to convert map coords in geojson to pixel coords in labels object extent: Box in pixel coords Returns: ObjectDetectionLabels """ features = geojson_dict['features'] boxes = [] class_ids = [] scores = [] def polygon_to_label(polygon, crs_transformer): polygon = [crs_transformer.map_to_pixel(p) for p in polygon] xmin, ymin = np.min(polygon, axis=0) xmax, ymax = np.max(polygon, axis=0) boxes.append(Box(ymin, xmin, ymax, xmax)) properties = feature['properties'] class_ids.append(properties['class_id']) scores.append(properties.get('score', 1.0)) for feature in features: geom_type = feature['geometry']['type'] coordinates = feature['geometry']['coordinates'] if geom_type == 'MultiPolygon': for polygon in coordinates: polygon_to_label(polygon[0], crs_transformer) elif geom_type == 'Polygon': polygon_to_label(coordinates[0], crs_transformer) else: raise Exception( "Geometries of type {} are not supported in object detection \ labels.".format(geom_type)) if len(boxes): boxes = np.array([box.npbox_format() for box in boxes], dtype=float) class_ids = np.array(class_ids) scores = np.array(scores) labels = ObjectDetectionLabels(boxes, class_ids, scores=scores) else: labels = ObjectDetectionLabels.make_empty() if extent is not None: labels = ObjectDetectionLabels.get_overlapping( labels, extent, ioa_thresh=0.8, clip=True) return labels
d8e0ed7034796235c6311d47eb234bfd0f38e68a
3,649,907
def processed_transcript(df): """ Cleans the Transcript table by splitting value fileds and replacing nan values, drop extra columns PARAMETERS: transcript dataframe RETURNS: Cleaned transcript dataframe """ #expand the dictionary to coulmns (reward, amount, offre id) from value field df['offer_id'] = df['value'].apply(lambda x: x.get('offer_id')) df['offer id'] = df['value'].apply(lambda x: x.get('offer id')) df['reward'] = df['value'].apply(lambda x: x.get('reward')) df['amount'] = df['value'].apply(lambda x: x.get('amount')) #move 'offer id' values into 'offer_id' df['offer_id'] = df.apply(lambda x : x['offer id'] if x['offer_id'] == None else x['offer_id'], axis=1) #drop 'offer id' column df.drop(['offer id' , 'value'] , axis=1, inplace=True) #replace nan df.fillna(0 , inplace=True) return df
452668d6d9616ca382f7968e0ac4dd52658be9f6
3,649,908
import six def _TestSuiteName(dash_json_dict): """Extracts a test suite name from Dashboard JSON. The dashboard JSON may contain a field "test_suite_name". If this is not present or it is None, the dashboard will fall back to using "benchmark_name" in the "chart_data" dict. """ name = None if dash_json_dict.get('test_suite_name'): name = dash_json_dict['test_suite_name'] else: try: name = dash_json_dict['chart_data']['benchmark_name'] except KeyError as e: six.raise_from( BadRequestError('Could not find test suite name. ' + str(e)), e) _ValidateNameString(name, 'test_suite_name') return name
1b2e6cbd820bde3b24be5cca107e49ea2dabc732
3,649,909
def clean_data(list_in): """ Inputs: list_in - filtered list of ticket orders Outputs: Return list of tuples, each tuple contains (last name, first name, note,[tickets]) """ notes_list = [] data_out = [] for row in list_in: trimmed_row = row[row.index('Purchaser Name: ')+16:] name = trimmed_row[:trimmed_row.index('<br/>')].strip().title() first_name = name[:name.rindex(' ')] #get first name last_name = name[name.rindex(' '):] #get last name trimmed_row = trimmed_row[len(name+'<br/>')+1:] if 'Special Instructions:' in row: #get notes note = trimmed_row[22:trimmed_row.index('<br/>')] trimmed_row = trimmed_row[trimmed_row.index('<br/>')+5:] notes_list.append((last_name,first_name,note)) else: note = '' orders = trimmed_row.split('<br/>') tickets = [] for order in orders: #get ticket orders if ('Membership Dues' in order) or ('Donation' in order): continue else: tickets.append(order) data_out.append([last_name, first_name, note, tickets]) # print(last_name, first_name,note,tickets) # print() data_out.sort(key=lambda item: item[1]) #sort by first name (to break last name ties) data_out.sort(key=lambda item: item[0]) #sort by last name # for idx, note in enumerate(notes_list): #optional print of all notes # print(idx,note) return data_out
f2cdf17895d1661e40b64f3fcc9ff92558f53bdd
3,649,911
def adfuller( vdf, column: str, ts: str, by: list = [], p: int = 1, with_trend: bool = False, regresults: bool = False, ): """ --------------------------------------------------------------------------- Augmented Dickey Fuller test (Time Series stationarity). Parameters ---------- vdf: vDataFrame input vDataFrame. column: str Input vcolumn to test. ts: str vcolumn used as timeline. It will be to use to order the data. It can be a numerical or type date like (date, datetime, timestamp...) vcolumn. by: list, optional vcolumns used in the partition. p: int, optional Number of lags to consider in the test. with_trend: bool, optional Add a trend in the Regression. regresults: bool, optional If True, the full regression results are returned. Returns ------- tablesample An object containing the result. For more information, see utilities.tablesample. """ def critical_value(alpha, N, with_trend): if not (with_trend): if N <= 25: if alpha == 0.01: return -3.75 elif alpha == 0.10: return -2.62 elif alpha == 0.025: return -3.33 else: return -3.00 elif N <= 50: if alpha == 0.01: return -3.58 elif alpha == 0.10: return -2.60 elif alpha == 0.025: return -3.22 else: return -2.93 elif N <= 100: if alpha == 0.01: return -3.51 elif alpha == 0.10: return -2.58 elif alpha == 0.025: return -3.17 else: return -2.89 elif N <= 250: if alpha == 0.01: return -3.46 elif alpha == 0.10: return -2.57 elif alpha == 0.025: return -3.14 else: return -2.88 elif N <= 500: if alpha == 0.01: return -3.44 elif alpha == 0.10: return -2.57 elif alpha == 0.025: return -3.13 else: return -2.87 else: if alpha == 0.01: return -3.43 elif alpha == 0.10: return -2.57 elif alpha == 0.025: return -3.12 else: return -2.86 else: if N <= 25: if alpha == 0.01: return -4.38 elif alpha == 0.10: return -3.24 elif alpha == 0.025: return -3.95 else: return -3.60 elif N <= 50: if alpha == 0.01: return -4.15 elif alpha == 0.10: return -3.18 elif alpha == 0.025: return -3.80 else: return -3.50 elif N <= 100: if alpha == 0.01: return -4.04 elif alpha == 0.10: return -3.15 elif alpha == 0.025: return -3.73 else: return -5.45 elif N <= 250: if alpha == 0.01: return -3.99 elif alpha == 0.10: return -3.13 elif alpha == 0.025: return -3.69 else: return -3.43 elif N <= 500: if alpha == 0.01: return 3.98 elif alpha == 0.10: return -3.13 elif alpha == 0.025: return -3.68 else: return -3.42 else: if alpha == 0.01: return -3.96 elif alpha == 0.10: return -3.12 elif alpha == 0.025: return -3.66 else: return -3.41 check_types( [ ("ts", ts, [str],), ("column", column, [str],), ("p", p, [int, float],), ("by", by, [list],), ("with_trend", with_trend, [bool],), ("regresults", regresults, [bool],), ], vdf=["vdf", vdf], ) columns_check([ts, column] + by, vdf) ts = vdf_columns_names([ts], vdf)[0] column = vdf_columns_names([column], vdf)[0] by = vdf_columns_names(by, vdf) schema = vdf._VERTICAPY_VARIABLES_["schema_writing"] if not (schema): schema = "public" name = "{}.VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_{}".format( schema, gen_name([column]).upper() ) relation_name = "{}.VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_VIEW_{}".format( schema, gen_name([column]).upper() ) try: vdf._VERTICAPY_VARIABLES_["cursor"].execute( "DROP MODEL IF EXISTS {}".format(name) ) vdf._VERTICAPY_VARIABLES_["cursor"].execute( "DROP VIEW IF EXISTS {}".format(relation_name) ) except: pass lag = [ "LAG({}, 1) OVER ({}ORDER BY {}) AS lag1".format( column, "PARTITION BY {}".format(", ".join(by)) if (by) else "", ts ) ] lag += [ "LAG({}, {}) OVER ({}ORDER BY {}) - LAG({}, {}) OVER ({}ORDER BY {}) AS delta{}".format( column, i, "PARTITION BY {}".format(", ".join(by)) if (by) else "", ts, column, i + 1, "PARTITION BY {}".format(", ".join(by)) if (by) else "", ts, i, ) for i in range(1, p + 1) ] lag += [ "{} - LAG({}, 1) OVER ({}ORDER BY {}) AS delta".format( column, column, "PARTITION BY {}".format(", ".join(by)) if (by) else "", ts ) ] query = "CREATE VIEW {} AS SELECT {}, {} AS ts FROM {}".format( relation_name, ", ".join(lag), "TIMESTAMPDIFF(SECOND, {}, MIN({}) OVER ())".format(ts, ts) if vdf[ts].isdate() else ts, vdf.__genSQL__(), ) vdf._VERTICAPY_VARIABLES_["cursor"].execute(query) model = LinearRegression(name, vdf._VERTICAPY_VARIABLES_["cursor"]) model.fit( relation_name, ["ts"] + ["lag1"] + ["delta{}".format(i) for i in range(1, p + 1)], "delta", ) coef = model.coef_ vdf._VERTICAPY_VARIABLES_["cursor"].execute("DROP MODEL IF EXISTS {}".format(name)) vdf._VERTICAPY_VARIABLES_["cursor"].execute( "DROP VIEW IF EXISTS {}".format(relation_name) ) if regresults: return coef coef = coef.transpose() DF = coef.values["lag1"][0] / (max(coef.values["lag1"][1], 1e-99)) p_value = coef.values["lag1"][3] count = vdf.shape()[0] result = tablesample( { "index": [ "ADF Test Statistic", "p_value", "# Lags used", "# Observations Used", "Critical Value (1%)", "Critical Value (2.5%)", "Critical Value (5%)", "Critical Value (10%)", "Stationarity (alpha = 1%)", ], "value": [ DF, p_value, p, count, critical_value(0.01, count, with_trend), critical_value(0.025, count, with_trend), critical_value(0.05, count, with_trend), critical_value(0.10, count, with_trend), DF < critical_value(0.01, count, with_trend) and p_value < 0.01, ], } ) return result
8f78b2128c981af15a84ac94f54435da4aee0c6c
3,649,912
from typing import Union from typing import List from typing import Dict from typing import Optional def get_routes_bend180( ports: Union[List[Port], Dict[str, Port]], bend: ComponentOrFactory = bend_euler, cross_section: CrossSectionFactory = strip, bend_port1: Optional[str] = None, bend_port2: Optional[str] = None, **kwargs, ) -> Routes: """Returns routes made by 180 degree bends. Args: ports: List or dict of ports bend: function for bend cross_section: **kwargs: bend settings """ ports = list(ports.values()) if isinstance(ports, dict) else ports bend = bend(angle=180, cross_section=cross_section, **kwargs) bend_ports = bend.get_ports_list() bend_port1 = bend_port1 or bend_ports[0].name bend_port2 = bend_port2 or bend_ports[1].name references = [bend.ref() for port in ports] references = [ref.connect(bend_port1, port) for port, ref in zip(ports, references)] ports = [ref.ports[bend_port2] for i, ref in enumerate(references)] lengths = [bend.info.length] * len(ports) return Routes(references=references, ports=ports, lengths=lengths)
f5ec1539a04c0c9eee9184d190e265af4e187ef0
3,649,913
import json def list_datasets(github_repo="Ouranosinc/xclim-testdata", branch="main"): """Return a DataFrame listing all xclim test datasets available on the GitHub repo for the given branch. The result includes the filepath, as passed to `open_dataset`, the file size (in KB) and the html url to the file. This uses an unauthenticated call to GitHub's REST API, so it is limited to 60 requests per hour (per IP). A single call of this function triggers one request per subdirectory, so use with parsimony. """ res = urlopen( # nosec f"https://api.github.com/repos/{github_repo}/contents?ref={branch}" ) base = json.loads(res.read().decode()) records = [] for folder in base: if folder["path"].startswith(".") or folder["size"] > 0: # drop hidden folders and other files. continue res = urlopen(folder["url"]) # nosec listing = json.loads(res.read().decode()) for file in listing: if file["path"].endswith(".nc"): records.append( { "name": file["path"], "size": file["size"] / 2**10, "url": file["html_url"], } ) df = pd.DataFrame.from_records(records).set_index("name") print(f"Found {len(df)} datasets.") return df
199c56efcb105d9ff043f2a7c1ef51857a8b9b77
3,649,914
import json def embed_terms(args, classes, dest, use_cache=True, path_to_json='ebd_cache.json'): """ Embeds class strings into word representations. :param args :param classes: (list of str) topic classes :param dest: (str) path to destination file :param path_to_json: (str) path to json file containing word embeddings :return: dict {newsgroup class (int id) : embedded vector (nparray of float)} """ if use_cache: with open(dest) as json_file: return classes, json.load(json_file) # Not using cache: extract vectors from global set with open(path_to_json) as json_file: mappings = json.load(json_file) input() input(mappings) # Cache topic reps cache = dict(zip(classes, [mappings[topic] for topic in classes])) with open(dest, 'w') as fp: json.dump(cache, fp)
8521b4828907c0083492b0d03848aeeb452d17e6
3,649,916
from pathlib import Path def wf_paths(reachable): """ Construct all well-formed paths satisfying a given condition. The condition is as follows: all the paths have height equal to the ceiling of log_2(`reachable` + 1). `reachable` is interpreted as a bitfield, with 1 meaning that the corresponding leaf on the floor of the path should be reachable from the root, 0 meaning the opposite. This function has been used to count well-formed paths and guess the link with Gelfand-Zetlin polytopes. """ if reachable <= 0: raise ValueError elif reachable == 1: return [Path([])] else: floors = [reachable & 1] reachable >>= 1 left = 2; right = 4 while reachable > 1: if reachable & 1: floors = [f | left for f in floors] + [f | right for f in floors] left <<= 2; right <<= 2 reachable >>= 1 floors = [f | left for f in floors] paths = [] for f in floors: paths.extend([p.cat(f) for p in wf_paths(_h4(f))]) return paths
86b0a2e6408a8257e201f21058459aea4aceac00
3,649,917
def get_imagemodel_in_rar(rar_path, mode): """ 압축파일(rar_path)의 이미지파일의 name, width, height를 모아서 반환한다.""" image_models = [] with rarfile.RarFile(rar_path) as rf: for name in rf.namelist(): if is_hidden_or_trash(name): continue if is_extensions_allow_image(name): model = BaseImageModel() model._name = name app.logger.info("fileName: " + name) if mode == "1": try: with rf.read(name) as f: data = BytesIO() data.write(f) data.seek(0) size = get_image_size_from_bytes(data) model._width = size[0] model._height = size[1] except Exception: app.logger.error("Can not getting width, height >> " + name) image_models.append(model) return image_models
ea94406e17b66bbbf0288b8f0cb03cdd723a2d63
3,649,918
from re import L def run_single(i,threshold_area_fraction,death_to_birth_rate_ratio,domain_size_multiplier,return_history=False): """run a single voronoi tessellation model simulation""" rates = (DEATH_RATE,DEATH_RATE/death_to_birth_rate_ratio) rand = np.random.RandomState() history = lib.run_simulation(simulation,L,TIMESTEP,TIMEND,rand,progress_on=False, init_time=INIT_TIME,til_fix=False,save_areas=True, return_events=False,save_cell_histories=False,N_limit=MAX_POP_SIZE, domain_size_multiplier=domain_size_multiplier,rates=rates,threshold_area_fraction=threshold_area_fraction) return number_proliferating_neighbours_distribution(history,threshold_area_fraction,death_to_birth_rate_ratio)
b4ef11ae873f69b472a2c41c2f5d33e88ed1169a
3,649,919
def matrixmult (A, B): """Matrix multiplication function This function returns the product of a matrix multiplication given two matrices. Let the dimension of the matrix A be: m by n, let the dimension of the matrix B be: p by q, multiplication will only possible if n = p, thus creating a matrix of m by q size. Parameters ---------- A : list First matrix, in a 2D array format. B : list Second matrix, in a 2D array format. Returns ------- C : list The product of the matrix multiplication. Examples -------- >>> from .pycgmStatic import matrixmult >>> A = [[11,12,13],[14,15,16]] >>> B = [[1,2],[3,4],[5,6]] >>> matrixmult(A, B) [[112, 148], [139, 184]] """ C = [[0 for row in range(len(A))] for col in range(len(B[0]))] for i in range(len(A)): for j in range(len(B[0])): for k in range(len(B)): C[i][j] += A[i][k]*B[k][j] return C
98065981c8047d927bacb07877dbf173ba379159
3,649,920
def TorsLattice(data = None, *args, **kwargs): """ Construct a lattice of torsion classes from various forms of input data This raises an error if the constructed lattice is not semidistributive, since the lattice of torsion classes is semidistributive. INPUT: - ``data``, ``*args``, ``**kwargs`` -- data and options that will be passed down to :func:`LatticePoset` to construct a poset that is also a lattice. OUTPUT: An instance of :class:`FiniteTorsLattice` """ if isinstance(data, FiniteTorsLattice) and not args and not kwargs: return data L = LatticePoset(data, *args, **kwargs) if not L.is_semidistributive(): raise ValueError("This lattice is not semidistributive.") return FiniteTorsLattice(L)
e07cfef58b2927b8e3c660ee20cc0c6bc365fa4b
3,649,921
def get_accumulated_report(trigger_id, mission='fermi'): """ Return the last value for each keyword on the summary page for a given trigger_id :param trigger_id: :param mission: 'fermi' or 'swift' :return: """ if 'fermi' in mission: site = fermi_grb_site elif 'swift' in mission: site = swift_grb_site else: print "Uknown mission {0}".format(mission) sys.exit() html = urllib.urlopen(site.format(trigger_id)) rs = page_to_reports(html) fs = None for r in rs: fs = report_to_fields(r, fs) return fs
de77dc845a48d6572b2ff9229eed57e7fd55b38c
3,649,922
import torch def sample(model, x, steps, temperature=1.0, sample=False, top_k=None): """ take a conditioning sequence of indices in x (of shape (b,t)) and predict the next token in the sequence, feeding the predictions back into the model each time. Clearly the sampling has quadratic complexity unlike an RNN that is only linear, and has a finite context window of block_size, unlike an RNN that has an infinite context window. """ block_size = model.get_block_size() model.eval() for k in range(steps): x_cond = x if x.size(1) <= block_size else x[:, -block_size:] # crop context if needed logits, _ = model(x_cond) # pluck the logits at the final step and scale by temperature logits = logits[:, -1, :] / temperature # optionally crop probabilities to only the top k options if top_k is not None: logits = top_k_logits(logits, top_k) # apply softmax to convert to probabilities probs = F.softmax(logits, dim=-1) # sample from the distribution or take the most likely if sample: ix = torch.multinomial(probs, num_samples=1) else: _, ix = torch.topk(probs, k=1, dim=-1) # append to the sequence and continue x = torch.cat((x, ix), dim=1) return x
c63ab2c001b7c88568d12d836da65abb368a8f31
3,649,923
def get_numbers(number, size, *, fg=DEFAULT_FGCHARACTER, bg=DEFAULT_BGCHARACTER): """Creates a shape of numbers. Positional arguments: number - number to print. size - size of the shape. Keyword arguments: fg - foreground character. bg - background character. """ _validate_positive_params(number+1,size) width = int(size+1) height = int(size*2+1) x = range(width) y = range(height) # https://en.wikipedia.org/wiki/Seven-segment_display l = [ f"y == {size*2} and x<={size}", # A f"x == {size} and y>{size} and y<={size*2}", # B f"x == {size} and y<={size}", # C f"y == 0 and x<={size}", # D f"x == 0 and y<={size}", # E f"x == 0 and y>{size} and y<={size*2}", # F f"y == {size} and x<={size}", # G ] numbers = [ {l[0],l[1],l[2],l[3],l[4],l[5] }, # 0 { l[1],l[2] }, # 1 {l[0],l[1], l[3],l[4], l[6]}, # 2 {l[0],l[1],l[2],l[3], l[6]}, # 3 { l[1],l[2], l[5],l[6]}, # 4 {l[0], l[2],l[3], l[5],l[6]}, # 5 {l[0], l[2],l[3],l[4],l[5],l[6]}, # 6 {l[0],l[1],l[2] }, # 7 {l[0],l[1],l[2],l[3],l[4],l[5],l[6]}, # 8 {l[0],l[1],l[2],l[3], l[5],l[6]}, # 9 ] res = "" for digit in str(number): feqs = numbers[int(digit)] s_digit = _make_shape(x, y, feqs, [], fg=fg, bg=bg) if res: new_res = "" for i,j in zip(res.split("\n"),s_digit.split("\n")): if i and j: new_res += i+" "+j+'\n' res=new_res else: res = s_digit return res
1cc992796f7118cbc0b19938ece0f87ed146a0d2
3,649,924
def to_cmyk(r: int, g: int, b: int) -> _cmyk: """ Takes RGB values 0->255 and returns their values in the CMYK namespace. https://www.rapidtables.com/convert/color/rgb-to-cmyk.html """ r, g, b = to_float(r, g, b) k = 1 - max(r, g, b) c = (1 - r - k) / (1 - k) m = (1 - g - k) / (1 - k) y = (1 - b - k) / (1 - k) return (c, m, y, k)
804f12c944ba0c0a740ca94c3e622b061db57dc5
3,649,926
def GetHostsInClusters(datacenter, clusterNames=[], connectionState=None): """ Return list of host objects from given cluster names. @param datacenter: datacenter object @type datacenter: Vim.Datacenter @param clusterNames: cluster name list @type clusterNames: string[] @param connectionState: host connection state ("connected", "disconnected", "notResponding"), None means all states. @typr connectionState: string """ if len(clusterNames) == 0: clusterObjs = GetAllClusters(datacenter) else: clusterObjs = GetClusters(datacenter, clusterNames) hostObjs = [] if connectionState == None: hostObjs = [h for cl in clusterObjs for h in cl.host] else: hostObjs = [h for cl in clusterObjs for h in cl.host if h.runtime.connectionState == connectionState] return hostObjs
c9722212e239eaec930da34dac2b5c82d45178fe
3,649,927
def get_gfa_targets(tiles, gfafile, faintlim=99, gaiadr="dr2"): """Returns a list of tables of GFA targets on each tile Args: tiles: table with columns TILEID, RA, DEC; or Tiles object targets: table of targets with columsn RA, DEC gaiadr: string, must be either "dr2" or "edr3" (default to "dr2") MAY NOT BE FULLY IMPLEMENTED Returns: list of tables (one row per input tile) with the subset of targets that are covered by GFAs on each tile. Each table has additional `GFA_LOC` column indicating 0-9 which GFA was covered. Note that a given target could be covered by GFAs on more than one tile. Output is a list of astropy Tables; inputs can be numpy structured arrays or astropy Tables """ log = Logger.get() tm = Timer() tm.start() # Convert tiles to vanilla numpy array if needed if isinstance(tiles, Tiles): tx = np.zeros(len(tiles.ra), dtype=[("RA", "f8"), ("DEC", "f8"), ("TILEID", "i4")]) tx["RA"] = tiles.ra tx["DEC"] = tiles.dec tx["TILEID"] = tiles.id tiles = tx # Load potential GFA targets and GFA locations targets = fitsio.read(gfafile) gfa = desimodel.focalplane.gfa.GFALocations(scale=2) # Pre-filter what GFA targets cover what tiles with some buffer. # find_points_in_tiles returns a list of lists; # convert to dictionary of lists keyed by tileid log.info("Finding overlap of {} GFA targets on {} tiles".format( len(targets), len(tiles))) gfa_tile_indices = dict() ii = desimodel.footprint.find_points_in_tiles( tiles, targets["RA"], targets["DEC"], radius=1.8) for i, tileid in enumerate(tiles["TILEID"]): gfa_tile_indices[tileid] = ii[i] gfa_targets = list() log.info("Generating GFA targets tables") for telra, teldec, tileid in zip(tiles["RA"], tiles["DEC"], tiles["TILEID"]): tmp = gfa.targets_on_gfa(telra, teldec, targets[gfa_tile_indices[tileid]]) t = Table(tmp) # Rename some columns for downstream clarity and consistency for oldname, newname in [ ("TYPE", "MORPHTYPE"), ("RA", "TARGET_RA"), ("DEC", "TARGET_DEC"), ("RA_IVAR", "TARGET_RA_IVAR"), ("DEC_IVAR", "TARGET_DEC_IVAR")]: if oldname in t.colnames: t.rename_column(oldname, newname) # Select which targets are good for ETC / GUIDE / FOCUS # 0 == good flag = np.zeros(len(t), dtype="i2") #- Not PSF-like isPSF = (t["MORPHTYPE"] == "PSF ") | (t["MORPHTYPE"] == "GPSF") | (t["MORPHTYPE"] == "PSF") flag[~isPSF] |= 2**0 #- Not Isolated if len(tmp) > 1: notIsolated = ~isolated(tmp['RA'], tmp['DEC']) flag[notIsolated] |= 2**1 #- Questionable astrometry / proper motion tych = (0 < t['REF_ID']) tych &= ( t['REF_ID'] < 1e10) flag[tych] |= 2**2 #- Too faint faint = t['GAIA_PHOT_G_MEAN_MAG'] > faintlim flag[faint] |= 2**3 # AR not passing the Gaia AEN criterion (PM correction done for AEN targets only) g = t["GAIA_PHOT_G_MEAN_MAG"] aen = t["GAIA_ASTROMETRIC_EXCESS_NOISE"] isaen = np.logical_or( (g <= 19.0) * (aen < 10.0 ** 0.5), (g >= 19.0) * (aen < 10.0 ** (0.5 + 0.2 * (g - 19.0))), ) flag[~isaen] |= 2**4 if len(flag)-np.count_nonzero(flag) == 0: log.error("ERROR: no good GFA targets for " "ETC/GUIDE/FOCUS on tile {}".format(tileid)) t["ETC_FLAG"] = flag t["GUIDE_FLAG"] = flag t["FOCUS_FLAG"] = flag # patch in Gaia-based synthetic r flux for use by ETC t["FLUX_R"] = gaia_synth_r_flux(t, gaiadr=gaiadr) gfa_targets.append(t) tm.stop() tm.report(" Identifying GFA targets") return gfa_targets
aa8c5a42babca87d26ad93538035734db54574f8
3,649,928
from typing import Tuple from typing import Union def fit_size( img: IMG, size: Tuple[int, int], mode: FitSizeMode = FitSizeMode.INCLUDE, direction: FitSizeDir = FitSizeDir.CENTER, bg_color: Union[str, float, Tuple[float, ...]] = (255, 255, 255, 0), ) -> IMG: """ 调整图片到指定的大小,超出部分裁剪,不足部分设为指定颜色 :params * ``img``: 待调整的图片 * ``size``: 期望图片大小 * ``mode``: FitSizeMode.INSIDE 表示图片必须在指定的大小范围内,不足部分设为指定颜色;FitSizeMode.INCLUDE 表示图片必须包括指定的大小范围,超出部分裁剪 * ``direction``: 调整图片大小时图片的方位;默认为居中 FitSizeDir.CENTER * ``bg_color``: FitSizeMode.INSIDE 时的背景颜色 """ return cut_size(limit_size(img, size, mode), size, direction, bg_color)
6cc33cb8c3fff4edec3bf15978f8cedc056a5e0c
3,649,930
import winreg def supports_colour(): """ Return True if the running system's terminal supports colour, and False otherwise. Adapted from https://github.com/django/django/blob/master/django/core/management/color.py """ def vt_codes_enabled_in_windows_registry(): """ Check the Windows Registry to see if VT code handling has been enabled by default, see https://superuser.com/a/1300251/447564. """ try: # winreg is only available on Windows. except ImportError: return False else: reg_key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, 'Console') try: reg_key_value, _ = winreg.QueryValueEx( reg_key, 'VirtualTerminalLevel') except FileNotFoundError: return False else: return reg_key_value == 1 # isatty is not always implemented, #6223. is_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty() return is_a_tty and ( sys.platform != 'win32' or HAS_COLORAMA or 'ANSICON' in os.environ or # Windows Terminal supports VT codes. 'WT_SESSION' in os.environ or # Microsoft Visual Studio Code's built-in terminal supports colors. os.environ.get('TERM_PROGRAM') == 'vscode' or vt_codes_enabled_in_windows_registry() )
d567b7818c314d345a30f10dffd99c7a3b411c3e
3,649,932
def next_code(value: int, mul: int = 252533, div: int = 33554393) -> int: """ Returns the value of the next code given the value of the current code The first code is `20151125`. After that, each code is generated by taking the previous one, multiplying it by `252533`, and then keeping the remainder from dividing that value by `33554393` """ return (value * mul) % div
a9e5183e405574cc56a138a244f14de08ea68d00
3,649,933
def read_csv_to_lol(full_path, sep=";"): """ Read csv file into lists of list. Make sure to have a empty line at the bottom """ with open(full_path, 'r') as ff: # read from CSV data = ff.readlines() # New line at the end of each line is removed data = [i.replace("\n", "") for i in data] # Creating lists of list data = [i.split(sep) for i in data] return data
e53c46c6a8eabaece788111530fbf859dd23133f
3,649,935
def read_experiment(path): """ Discovers CSV files an experiment produced and construct columns for the experiment's conditions from the sub-directory structure. Args: path: path to the experiment's results. Returns: pd.DataFrame """ objects = list(path.rglob('*.csv')) data = [] path_split = _recursive_split(path) for obj in objects: obj_path_split = _recursive_split(obj) if len(obj_path_split) - len(path_split) > 7: raise Exception("Path depth too long! Provide path to actual experiment or one of its sub-directories.") data.append(obj_path_split) df = pd.DataFrame(data=data) columns = ["experiment", "imputer", "task", "missing_type", "missing_fraction", "strategy", "file_or_dir", "detail_file"] auto_columns = [] for i in range(df.shape[1] - len(columns)): auto_columns.append(f"col{i}") df.columns = auto_columns + columns df.drop(auto_columns, axis=1, inplace=True) df["path"] = objects df["detail_file"] = df["detail_file"].fillna("") return df.reset_index(drop=True)
e9797fb71a0e9ba89e211fd0d079d5040e3a4639
3,649,936
from bs4 import BeautifulSoup def single_keyword_search(keyword): """ 구글에 keyword 검색결과를 html로 받아온뒤에 그 안에 일반 게시물 분류에 속하는 class='r' 부분만 모아서 return해주는 함수 입니다. Args: Keyword (String) : 구글에 검색할 Keyword Returns: title_list (bs4.element.ResultSet) : 구글 검색에서 확인된 일반게시물(class='r')들의 모음 """ URL = 'https://www.google.com/search?q=' +keyword_preprocessing(keyword) driver = webdriver.Chrome("C:/Users/ksg/py_tutorial/chromedriver.exe") driver.implicitly_wait(1) driver.get(URL) driver.implicitly_wait(2) html = driver.page_source soup = BeautifulSoup(html, 'html.parser') title_list = soup.find_all(name='div',attrs={'class':'r'}) return title_list
45909878da1c135c4dc6b1209c98ffc5e7e21b29
3,649,937
def partial_list(ys, xs, specified_shapes=None): """ Args: ys: A list of tensors. Each tensor will be differentiated with the partial_nd xs: A Tensor to be used for differentiation, or a list of tensors to be used for differentiation with the smae length as ys specified_shapes: A list of specified dynamical shapes of ys. The first element of each shape is dummy as None or -1. """ assert (len(ys) > 0) and (len(xs) > 0), "The length of ys is 0" if specified_shapes is None: if len(xs) == 1: return [partial_nd(y, xs) for y in ys] else: return [partial_nd(y, x) for (y,x) in zip(ys,xs)] else: if len(xs) == 1: return [partial_nd(y, xs, specified_shape) for (y, specified_shape) in (ys, specified_shapes)] else: return [partial_nd(y, x, specified_shape) for (y,x,specified_shape) in zip(ys,xs,specified_shapes)]
24b0d2583f21cd4497e1c38d79643e44eaab693e
3,649,938
def _GuessBrowserName(bisect_bot): """Returns a browser name string for Telemetry to use.""" default = 'release' browser_map = namespaced_stored_object.Get(_BOT_BROWSER_MAP_KEY) if not browser_map: return default for bot_name_prefix, browser_name in browser_map: if bisect_bot.startswith(bot_name_prefix): return browser_name return default
b6b0fedd238aff07bfa46c61e9d792087b647a13
3,649,939
from re import T import functools import collections def compile_train_function(network, batch_size, learning_rate): """Compiles the training function. Args: network: The network instance. batch_size: The training batch size. learning_rate: The learning rate. Returns: The update function that takes a batch of images and targets and updates the network weights. """ learning_rate = np.float32(learning_rate) input_var = network.input_layers[0].input_var target_var = T.ftensor4() # Loss function loss_fn = functools.partial( losses.bootstrapped_xentropy, targets=target_var, batch_size=batch_size, multiplier=BOOTSTRAP_MULTIPLIER ) # Update function lr = theano.shared(learning_rate) update_fn = functools.partial(lasagne.updates.adam, learning_rate=lr) pylogging.info("Compile SGD updates") gd_step = hybrid_training.compile_gd_step( network, loss_fn, [input_var, target_var], update_fn) reduce_lr = theano.function( inputs=[], updates=collections.OrderedDict([ (lr, T.maximum(np.float32(5e-5), lr / np.float32(1.25))) ]) ) def _compute_update(imgs, targets, update_counter): if (update_counter + 1) % REDUCE_LR_INTERVAL == 0: reduce_lr() loss = gd_step(imgs, targets) return loss return _compute_update
57778a2428d4348f6594d04ec35bc821a4fd8122
3,649,940
def filter_values(freq, values, nthOct: int = 3): """ Filters the given values into nthOct bands. Parameters ---------- freq : ndarray Array containing the frequency axis. values : ndarray Array containing the magnitude values to be filtered. nthOct : int, optional Fractional octave bands that the absorption will be filtered to. Returns ------- bands : ndarray An array containing the center frequencies of the available bands. result : ndarray An array containing the filtered values in the available bands. """ bands = fractional_octave_frequencies(nthOct=nthOct) # [band_min, band_center, band_max] bands = bands[np.argwhere((bands[:, 1] >= min(freq)) & (bands[:, 1] <= max(freq)))[:, 0]] idx = np.array([np.argwhere((freq >= bands[a, 0]) & (freq <= bands[a, 2])) for a in np.arange(0, len(bands))], dtype=object) result = np.array([np.sum(values[idx[a]]) / len(idx[a]) for a in np.arange(0, len(bands))], dtype=object) result = np.nan_to_num(result) return bands[:, 1], result.astype(values.dtype)
2a1b270049f1c2869fa03d7bc2a4f64658646b7a
3,649,942
def create_project(request): """View to create new project""" user = request.user if user.is_annotator: error = ErrorMessage(header="Access denied", message="Only admin and managers can create projects") return render(request, 'error.html', {'error':error}) if request.method == "POST": form = ProjectCreateForm(request.POST) if form.is_valid(): project = form.save(commit=False) project.manager = user project.save() return redirect('projects_list') else: form = ProjectCreateForm() return render(request, 'projects/create.html', {'form': form})
34d6def496c9ddac99710425a9550be7fa8eba58
3,649,943
from typing import Optional import pathlib def limit( observed_CLs: np.ndarray, expected_CLs: np.ndarray, poi_values: np.ndarray, figure_path: Optional[pathlib.Path] = None, close_figure: bool = False, ) -> mpl.figure.Figure: """Draws observed and expected CLs values as function of the parameter of interest. Args: observed_CLs (np.ndarray): observed CLs values expected_CLs (np.ndarray): expected CLs values, including 1 and 2 sigma bands poi_values (np.ndarray): parameter of interest values used in scan figure_path (Optional[pathlib.Path], optional): path where figure should be saved, or None to not save it, defaults to None close_figure (bool, optional): whether to close each figure immediately after saving it, defaults to False (enable when producing many figures to avoid memory issues, prevents rendering in notebooks) Returns: matplotlib.figure.Figure: the CLs figure """ fig, ax = plt.subplots() xmin = min(poi_values) xmax = max(poi_values) # line through CLs = 0.05 ax.hlines( 0.05, xmin=xmin, xmax=xmax, linestyle="dashdot", color="red", label=r"CL$_S$ = 5%", ) # 1 and 2 sigma bands ax.fill_between( poi_values, expected_CLs[:, 0], expected_CLs[:, 4], color="yellow", label=r"expected CL$_S$ $\pm 2\sigma$", ) ax.fill_between( poi_values, expected_CLs[:, 1], expected_CLs[:, 3], color="limegreen", label=r"expected CL$_S$ $\pm 1\sigma$", ) # expected CLs ax.plot( poi_values, expected_CLs[:, 2], "--", color="black", label=r"expected CL$_S$", ) # observed CLs values ax.plot(poi_values, observed_CLs, "o-", color="black", label=r"observed CL$_S$") # increase font sizes for item in ( [ax.xaxis.label, ax.yaxis.label] + ax.get_xticklabels() + ax.get_yticklabels() ): item.set_fontsize("large") # minor ticks for axis in [ax.xaxis, ax.yaxis]: axis.set_minor_locator(mpl.ticker.AutoMinorLocator()) ax.legend(frameon=False, fontsize="large") ax.set_xlabel(r"$\mu$") ax.set_ylabel(r"$\mathrm{CL}_{s}$") ax.set_xlim([xmin, xmax]) ax.set_ylim([0, 1]) ax.tick_params(axis="both", which="major", pad=8) ax.tick_params(direction="in", top=True, right=True, which="both") fig.tight_layout() utils._save_and_close(fig, figure_path, close_figure) return fig
85bf753844083dcfbea8273cabe6cf7c0513c6d9
3,649,944
def make_predictions(clf_object,predictors_str,data_source): """make_predictions comes up with predictions from given input data Input: clf_object object constructed classification model predictors_str nd str array string array containing names of predictors data_source ndarray source of data either from valid or test Output: preds ndarray prediction classes based on given input data """ preds = clf_object.predict(data_source[predictors_str]) return preds
ed5f29e65ddf3d7f7081b89e6f747925de944567
3,649,946
def get_token_annualized(address, days): """Return annualized returns for a specific token. Args: days [int]: Days ago for which to display annualized returns. address [str]: Ethereum token address. Return: dict: Annualized returns for a specified token. key [str]: Days annualized. value [str]: Annualized returns. """ url = f"{config.URLS['annualized_returns']}/{address}" response = api_call(url, params={'daysBack': days, 'key': POOLS_KEY}) return response
734b64fdce65d069eebd5fd62270b24fd2d27100
3,649,948
import random import string def generate_random_string(N): """ Generate a random string Parameters ------------- N length of the string Returns ------------- random_string Random string """ return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(N))
3e2e672140e18546260a0882fa6cf06073bdf8e7
3,649,949
import re def extract_charm_name_from_url(charm_url): """Extract the charm name from the charm url. E.g. Extract 'heat' from local:bionic/heat-12 :param charm_url: Name of model to query. :type charm_url: str :returns: Charm name :rtype: str """ charm_name = re.sub(r'-[0-9]+$', '', charm_url.split('/')[-1]) return charm_name.split(':')[-1]
9905d6b5c7a2f5047bc939d1b6e23d128ee8984d
3,649,950
def class_name(service_name: str) -> str: """Map service name to .pyi class name.""" return f"Service_{service_name}"
b4bed8a677f9eedfcd66d6d37078075b0967ea20
3,649,951
def interesting_columns(df): """Returns non-constant column names of a dataframe.""" return sorted(set(df.columns) - set(constant_columns(df)))
84e548e806bcfd9031d620d3c02f942f60fa53cc
3,649,952
def liberty_str(s): """ >>> liberty_str("hello") '"hello"' >>> liberty_str('he"llo') Traceback (most recent call last): ... ValueError: '"' is not allow in the string: 'he"llo' >>> liberty_str(1.0) '"1.0000000000"' >>> liberty_str(1) '"1.0000000000"' >>> liberty_str([]) Traceback (most recent call last): ... ValueError: [] is not a string >>> liberty_str(True) Traceback (most recent call last): ... ValueError: True is not a string """ try: if isinstance(s, (int, float)): s = liberty_float(s) except ValueError: pass if not isinstance(s, str): raise ValueError("%r is not a string" % s) if '"' in s: raise ValueError("'\"' is not allow in the string: %r" % s) return '"'+s+'"'
2bc56be42a062668f94c9cc88baa94f5f73feaa3
3,649,953
import warnings def is_valid_dm(D, tol=0.0, throw=False, name="D", warning=False): """ Return True if input array is a valid distance matrix. Distance matrices must be 2-dimensional numpy arrays. They must have a zero-diagonal, and they must be symmetric. Parameters ---------- D : ndarray The candidate object to test for validity. tol : float, optional The distance matrix should be symmetric. `tol` is the maximum difference between entries ``ij`` and ``ji`` for the distance metric to be considered symmetric. throw : bool, optional An exception is thrown if the distance matrix passed is not valid. name : str, optional The name of the variable to checked. This is useful if throw is set to True so the offending variable can be identified in the exception message when an exception is thrown. warning : bool, optional Instead of throwing an exception, a warning message is raised. Returns ------- valid : bool True if the variable `D` passed is a valid distance matrix. Notes ----- Small numerical differences in `D` and `D.T` and non-zeroness of the diagonal are ignored if they are within the tolerance specified by `tol`. """ D = np.asarray(D, order='c') valid = True try: s = D.shape if len(D.shape) != 2: if name: raise ValueError(('Distance matrix \'%s\' must have shape=2 ' '(i.e. be two-dimensional).') % name) else: raise ValueError('Distance matrix must have shape=2 (i.e. ' 'be two-dimensional).') if tol == 0.0: if not (D == D.T).all(): if name: raise ValueError(('Distance matrix \'%s\' must be ' 'symmetric.') % name) else: raise ValueError('Distance matrix must be symmetric.') if not (D[range(0, s[0]), range(0, s[0])] == 0).all(): if name: raise ValueError(('Distance matrix \'%s\' diagonal must ' 'be zero.') % name) else: raise ValueError('Distance matrix diagonal must be zero.') else: if not (D - D.T <= tol).all(): if name: raise ValueError(('Distance matrix \'%s\' must be ' 'symmetric within tolerance %5.5f.') % (name, tol)) else: raise ValueError('Distance matrix must be symmetric within' ' tolerance %5.5f.' % tol) if not (D[range(0, s[0]), range(0, s[0])] <= tol).all(): if name: raise ValueError(('Distance matrix \'%s\' diagonal must be' ' close to zero within tolerance %5.5f.') % (name, tol)) else: raise ValueError(('Distance matrix \'%s\' diagonal must be' ' close to zero within tolerance %5.5f.') % tol) except Exception as e: if throw: raise if warning: warnings.warn(str(e)) valid = False return valid
e21163a5d68fa5cf1c2e0bbed34276c7e5a6b851
3,649,954
from types import SimpleNamespace import scipy def solve_gradwavefront(data, excludeself=False, predict_at=None, fix_covar=False, **kw): """Find turbulent contributions to measured fiber positions. Assumes that the turbulent contributions can be modeled as the gradient of a wavefront error. i.e., they are curl free. Args: data : ndarray containing measured positions and residuals from expected locations excludeself : bool do not use this fiber when computing the turbulence affecting this fiber. **kw : additional keywords passed to solve_covar Returns: xturb : turbulent contributions in x direction yturb : turbulent contributions in y direction res : output from scipy.optimize.minimize describing best fit covariance matrix """ if predict_at is not None and excludeself: raise ValueError('predict_at does not make sense in combination with ' 'excludeself') if not fix_covar: covar, res = solve_covar(data, lossfun=loss_gradwavefront, covarfun=make_covar_gradwavefront, **kw) else: res = SimpleNamespace() res.x = [5e-3, 5e-3, 100] if kw.get('rq', False): res.x = res.x + [2] covar = make_covar_gradwavefront(data, res.x, **kw) dvec = np.concatenate([data['dx'], data['dy']]) if not excludeself: if predict_at: # K(X*, X)(K(X, X) + C_n)^-1 y # Rasmussen & Williams algorithm 2.1 chol, low = scipy.linalg.cho_factor(covar, check_finite=False, overwrite_a=True) covarpred = make_covar_gradwavefront_nonoise( data['x'], data['y'], predict_at[0], predict_at[1], res.x, **kw) alpha = scipy.linalg.cho_solve((chol, low), dvec) turb = np.dot(covarpred, alpha) xturb, yturb = turb[:len(predict_at[0])], turb[len(predict_at[0]):] else: # remove measurement noise contribution to covar cninv = np.eye(len(dvec))*res.x[0]**(-2) covar -= np.eye(len(dvec))*res.x[0]**2 cpcninv = np.dot(covar, cninv) aa = cpcninv+np.eye(len(dvec)) turb = np.linalg.solve(aa, np.dot(cpcninv, dvec)) xturb, yturb = turb[:len(data)], turb[len(data):] else: # Rasmussen & Williams 5.12 kinv = np.linalg.inv(covar) turb = dvec - kinv.dot(dvec)/np.diag(kinv) xturb, yturb = turb[:len(data)], turb[len(data):] return xturb, yturb, res
94733f5cf073aa44752fa395fe24f66fc9524049
3,649,955
from typing import Any def str_to_py(value: str): """Convert an string value to a native python type.""" rv: Any if is_boolean_state(value): rv = get_boolean(value) elif is_integer(value): rv = get_integer(value) elif is_float(value): rv = get_float(value) else: rv = value return rv
b0a5d0fbe573be6d9d961ac5c3c1895fd82539eb
3,649,956
from typing import Dict from typing import Any from typing import Tuple import json import requests def action(request: Dict[str, Any]) -> Tuple[str, int]: """Triggered from Slack action via an HTTPS endpoint. Args: request (dict): Request payload. """ if request.method != 'POST': return 'Only POST requests are accepted', 405 print('Triggered Slack action.') form = json.loads(request.form.get('payload', '')) _verify_web_hook(form) response_url = form.get('response_url') if not response_url: return 'No response URL!', 405 action_to_perform = form.get('actions')[0].get('value') in_office = action_to_perform == 'response_yes' _set_information(in_office) today = _now().strftime('%Y-%m-%d') status_to_response = { True: f'{DOG_NAME} will be in the office today ({today}). :dog:', False: f'{DOG_NAME} will not be in the office today ({today}). :no_entry_sign:', } response_text = f'Thanks for the response! I noted that {status_to_response[in_office]}' response = _format_slack_message(response_text) print(f'Replying with "{response_text}".') response_obj = requests.post( response_url, data=json.dumps(response), headers={'Content-Type': 'application/json'} ) print(f'Slack POST request status code: "{response_obj.status_code}".') return '', 200
fdf24beee3e5dc929f575883114987419967b2e9
3,649,957
def inmemory(): """Returns an xodb database backed by an in-memory xapian database. Does not support spelling correction. """ return open(xapian.inmemory_open(), spelling=False, inmem=True)
ed67dd9bd7d70c5aab33c963dfed4e2103f5cfd1
3,649,958
def all(iterable: object) -> bool: """all.""" for element in iterable: if not element: return False return True
130a93230538122f35e29a6ec4ad5fca0efd835b
3,649,959
def ind2slice(Is): """Convert boolean and integer index arrays to slices. Integer and boolean arrays are converted to slices that span the selected elements, but may include additional elements. If possible, the slices are stepped. Arguments --------- Is : tuple tuple of indices (slice, integer array, boolean array, or single integer) Returns ------- Js : tuple tuple of slices """ if isinstance(Is, tuple): return tuple(_ind2slice(I) for I in Is) else: return _ind2slice(Is)
6be6a82750f9f73b2008c528ff192b07b8e0a784
3,649,960
import re def select_devices(devices): """ 选择设备 """ device_count = len(devices) print("Device list:") print("0) All devices") for i, d in enumerate(devices, start=1): print("%d) %s\t%s" % (i, d['serial'], d['model'])) print("q) Exit this operation") selected = input("\nselect: ") nums = None if selected == '0': nums = range(0, device_count) elif selected == 'q': print("Exit this operation") exit(-1) else: nums = [] for i in re.split(r'[\s+,]', selected): if i.isdigit(): seq = int(i) - 1 if 0 <= seq < device_count: nums.append(seq) continue print("error input: %s, retry again\n" % i) return select_devices(devices) return nums
91c405c8a198deb01e8abecc592ac2286dc712fd
3,649,962
def Fill( h ): """fill every empty value in histogram with previous value. """ new_h = [] x,v = h[0] if type(v) == ListType or type(v) == TupleType: l = len(v) previous_v = [0] * l else: previous_v = 0 for x, v in h: if type(v) == ListType or type(v) == TupleType: for i in range(0,l): if v[i] == 0: v[i] = previous_v[i] else: previous_v[i] = v[i] else: if v == 0: v = previous_v else: previous_v = v new_h.append( (x, v) ) return new_h
429a4d723c38692e8ee0ebaea2fda1ec008cded6
3,649,963
from re import L def pendulum(theta, S, mg, drag) -> ps.Composition: """Draw a free body animation of a pendulum. params: theta: the angle from the vertical at which the pendulum is. S: the force exerted toward the pivot. mg: the force owing to gravity. drag: the force acting against the motion of the pendulum. return: A composition of the pendulum """ a = theta P = ps.Point(W / 2, 0.9 * H) # rotation point path = ps.Arc(P, L, -ps.Angle(np.pi / 2), a) mass_pt = path.end rod = ps.Line(P, mass_pt) theta = ps.AngularDimension( r"$\theta$", P + ps.Point(0, -L / 4), P + (mass_pt - P).unit_vector * (L / 4), P ) theta.extension_lines = False mass = ps.Circle(mass_pt, L / 30.0).set_fill_color(ps.Style.Color.BLUE) rod_vec = rod.end - rod.start length = ps.LinearDimension("$L$", mass_pt, P) # Displace length indication length = length.translate(ps.Point(-np.cos(a), -np.sin(a)) * (L / 15.0)) length.style.line_width = 0.1 gravity_start = ps.Point(0.8 * L, 0) gravity = ps.Gravity(P + gravity_start, L / 3) dashed_thin_black_line = ps.Style() dashed_thin_black_line.line_style = ps.Style.LineStyle.DASHED dashed_thin_black_line.line_color = ps.Style.Color.BLACK dashed_thin_black_line.line_width = 1.0 path.style = dashed_thin_black_line vertical = ps.Line(rod.start, rod.start + ps.Point(0, -L)) vertical.style = dashed_thin_black_line rod.style = dashed_thin_black_line comp = ps.Composition( { "body": mass, "rod": rod, "vertical": vertical, "theta": theta, "path": path, "g": gravity, # "L": length, } ) magnitude = 1.2 * L / 6 # length of a unit force in figure force = mg # constant (scaled eq: about 1) force *= magnitude mg_force = ( ps.Force( "$mg$", mass_pt, mass_pt + ps.Point(0, 1) * force, text_position=ps.TextPosition.END, ) if force != 0 else None ) force = S force *= magnitude rod_force = ( ps.Force( "S", mass_pt, mass_pt - rod_vec.unit_vector * force, text_position=ps.TextPosition.END, ) if force != 0 else None ) force = drag force *= magnitude air_force = ( ps.Force( "", mass_pt, mass_pt - rod_vec.normal * force, ) if force != 0 else None ) x0y0 = ps.Text("$(x_0,y_0)$", P + ps.Point(-0.4, -0.1)) ir = ps.Force( r"$\mathbf{i}_r$", P, P + rod_vec.unit_vector * (L / 10), text_position=ps.TextPosition.END, # spacing=ps.Point(0.015, 0) ) ith = ps.Force( r"$\mathbf{i}_{\theta}$", P, P + rod_vec.normal * (L / 10), text_position=ps.TextPosition.END, # spacing=ps.Point(0.02, 0.005) ) body_diagram = ps.Composition( { "mg": mg_force, "S": rod_force, "air": air_force, "ir": ir, "ith": ith, "origin": x0y0, } ) comp = comp.merge(body_diagram) return comp
9dd7918b00bae82600d4bb064461f7b6e58e9fb2
3,649,964
def get_items(): """Fetches items from `INITIAL_OFFSET` in batches of `PAGINATION_OFFSET` until there are no more """ offset = INITIAL_OFFSET items = [] while True: batch = get_page_of_items(JSON_ENDPOINT.format(offset)) if not batch: break items.extend(batch) offset += PAGINATION_OFFSET return items
b0950ee8eb291cceb1f0871e918b5aff26b6c2ab
3,649,965
def tcombinations_with_replacement(iterable, r): """ >>> tcombinations_with_replacement('ABCD', 0) ((),) >>> tcombinations_with_replacement('ABCD', 1) (('A',), ('B',), ('C',), ('D',)) >>> tcombinations_with_replacement('ABCD', 2) (('A', 'A'), ('A', 'B'), ('A', 'C'), ('A', 'D'), ('B', 'B'), ('B', 'C'), ('B', 'D'), ('C', 'C'), ('C', 'D'), ('D', 'D')) >>> tcombinations_with_replacement('ABCD', 3) (('A', 'A', 'A'), ('A', 'A', 'B'), ('A', 'A', 'C'), ('A', 'A', 'D'), ('A', 'B', 'B'), ('A', 'B', 'C'), ('A', 'B', 'D'), ('A', 'C', 'C'), ('A', 'C', 'D'), ('A', 'D', 'D'), ('B', 'B', 'B'), ('B', 'B', 'C'), ('B', 'B', 'D'), ('B', 'C', 'C'), ('B', 'C', 'D'), ('B', 'D', 'D'), ('C', 'C', 'C'), ('C', 'C', 'D'), ('C', 'D', 'D'), ('D', 'D', 'D')) >>> tcombinations_with_replacement('ABCD', 4) (('A', 'A', 'A', 'A'), ('A', 'A', 'A', 'B'), ('A', 'A', 'A', 'C'), ('A', 'A', 'A', 'D'), ('A', 'A', 'B', 'B'), ('A', 'A', 'B', 'C'), ('A', 'A', 'B', 'D'), ('A', 'A', 'C', 'C'), ('A', 'A', 'C', 'D'), ('A', 'A', 'D', 'D'), ('A', 'B', 'B', 'B'), ('A', 'B', 'B', 'C'), ('A', 'B', 'B', 'D'), ('A', 'B', 'C', 'C'), ('A', 'B', 'C', 'D'), ('A', 'B', 'D', 'D'), ('A', 'C', 'C', 'C'), ('A', 'C', 'C', 'D'), ('A', 'C', 'D', 'D'), ('A', 'D', 'D', 'D'), ('B', 'B', 'B', 'B'), ('B', 'B', 'B', 'C'), ('B', 'B', 'B', 'D'), ('B', 'B', 'C', 'C'), ('B', 'B', 'C', 'D'), ('B', 'B', 'D', 'D'), ('B', 'C', 'C', 'C'), ('B', 'C', 'C', 'D'), ('B', 'C', 'D', 'D'), ('B', 'D', 'D', 'D'), ('C', 'C', 'C', 'C'), ('C', 'C', 'C', 'D'), ('C', 'C', 'D', 'D'), ('C', 'D', 'D', 'D'), ('D', 'D', 'D', 'D')) >>> tcombinations_with_replacement('ABC', 4) (('A', 'A', 'A', 'A'), ('A', 'A', 'A', 'B'), ('A', 'A', 'A', 'C'), ('A', 'A', 'B', 'B'), ('A', 'A', 'B', 'C'), ('A', 'A', 'C', 'C'), ('A', 'B', 'B', 'B'), ('A', 'B', 'B', 'C'), ('A', 'B', 'C', 'C'), ('A', 'C', 'C', 'C'), ('B', 'B', 'B', 'B'), ('B', 'B', 'B', 'C'), ('B', 'B', 'C', 'C'), ('B', 'C', 'C', 'C'), ('C', 'C', 'C', 'C')) """ return tuple(combinations_with_replacement(iterable, r))
432c826751bcfc1aa7bfef36cd25a198e6fe7b72
3,649,966
import random def random_organism(invalid_data): """ Generate Random Organism return: string containing "organism" name from CanCOGeN vocabulary. """ return random.choice(covid19_vocab_dict.get('organism')), global_valid_data
9319d04ddf874c43d84489c2b20c52cee334a8c1
3,649,968
def comput_mean_ndcg(df, k): """ Input:rating_info (usr_id, movie_id, rating) output: 平均ndcg 对每一种人 得到他真实分数和预测分数的dataframe 然后得到values """ #df.insert(df.shape[1], 'pred', pred) #print(df.groupby('user_id')) piece = dict(list(df.groupby('user_id'))) ndcg_list = [] for user in df["user_id"].unique(): user_rating = piece[user]["rating"].values user_pred_rating = piece[user]['pred'].values ndcg_score = ndcg(user_pred_rating, user_rating, k) ndcg_list.append(ndcg_score) ndcg_list = np.array(ndcg_list) #return ndcg_list.mean() return ndcg_list
70bcb630b7df9940a76499f34a769bd2dca90283
3,649,970
from typing import List def to_im_list(IMs: List[str]): """Converts a list of string to IM Objects""" return [IM.from_str(im) for im in IMs]
9694500ae2f5fa7c203100a5a1756fe71af862cc
3,649,971
def ref_ellipsoid(refell, UNITS='MKS'): """ Computes parameters for a reference ellipsoid Arguments --------- refell: reference ellipsoid name Keyword arguments ----------------- UNITS: output units MKS: meters, kilograms, seconds CGS: centimeters, grams, seconds """ if refell.upper() in ('CLK66','NAD27'): #-- Clarke 1866 a_axis = 6378206.4#-- [m] semimajor axis of the ellipsoid flat = 1.0/294.9786982#-- flattening of the ellipsoid elif refell.upper() in ('GRS80','NAD83'): #-- Geodetic Reference System 1980 #-- North American Datum 1983 a_axis = 6378135.0#-- [m] semimajor axis of the ellipsoid flat = 1.0/298.26#-- flattening of the ellipsoid GM = 3.986005e14#-- [m^3/s^2] Geocentric Gravitational Constant elif (refell.upper() == 'GRS67'): #-- Geodetic Reference System 1967 #-- International Astronomical Union (IAU ellipsoid) a_axis = 6378160.0#-- [m] semimajor axis of the ellipsoid flat = 1.0/298.247167427#-- flattening of the ellipsoid GM = 3.98603e14#-- [m^3/s^2] Geocentric Gravitational Constant omega = 7292115.1467e-11#-- angular velocity of the Earth [rad/s] elif (refell.upper() == 'WGS72'): #-- World Geodetic System 1972 a_axis = 6378135.0#-- [m] semimajor axis of the ellipsoid flat = 1.0/298.26#-- flattening of the ellipsoid elif (refell.upper() == 'WGS84'): #-- World Geodetic System 1984 a_axis = 6378137.0#-- [m] semimajor axis of the ellipsoid flat = 1.0/298.257223563#-- flattening of the ellipsoid elif (refell.upper() == 'ATS77'): #-- Quasi-earth centred ellipsoid for ATS77 a_axis = 6378135.0#-- [m] semimajor axis of the ellipsoid flat = 1.0/298.257#-- flattening of the ellipsoid elif (refell.upper() == 'KRASS'): #-- Krassovsky (USSR) a_axis = 6378245.0#-- [m] semimajor axis of the ellipsoid flat = 1.0/298.3#-- flattening of the ellipsoid elif (refell.upper() == 'INTER'): #-- International a_axis = 6378388.0#-- [m] semimajor axis of the ellipsoid flat = 1/297.0#-- flattening of the ellipsoid elif (refell.upper() == 'MAIRY'): #-- Modified Airy (Ireland 1965/1975) a_axis = 6377340.189#-- [m] semimajor axis of the ellipsoid flat = 1/299.3249646#-- flattening of the ellipsoid elif (refell.upper() == 'TOPEX'): #-- TOPEX/POSEIDON ellipsoid a_axis = 6378136.3#-- [m] semimajor axis of the ellipsoid flat = 1.0/298.257#-- flattening of the ellipsoid GM = 3.986004415e14#-- [m^3/s^2] elif (refell.upper() == 'EGM96'): #-- EGM 1996 gravity model a_axis = 6378136.3#-- [m] semimajor axis of the ellipsoid flat = 1.0/298.256415099#-- flattening of the ellipsoid GM = 3.986004415e14#-- [m^3/s^2] elif (refell.upper() == 'HGH80'): #-- Hughes 1980 Ellipsoid used in some NSIDC data a_axis = 6378273.0#-- [m] semimajor axis of the ellipsoid flat = 1.0/298.279411123064#-- flattening of the ellipsoid else: raise ValueError('Incorrect reference ellipsoid Name') if refell.upper() not in ('GRS80','GRS67','NAD83','TOPEX','EGM96'): #-- for ellipsoids not listing the Geocentric Gravitational Constant GM = 3.986004418e14#-- [m^3/s^2] if refell.upper() not in ('GRS67'): #-- for ellipsoids not listing the angular velocity of the Earth omega = 7292115e-11#-- [rad/s] #-- convert units to CGS if (UNITS == 'CGS'): a_axis *= 100.0 GM *= 10e6 #-- DERIVED PARAMETERS: #-- mean radius of the Earth having the same volume #-- (4pi/3)R^3 = (4pi/3)(a^2)b = (4pi/3)(a^3)(1D -f) rad_e = a_axis*(1.0 -flat)**(1.0/3.0) #-- semiminor axis of the ellipsoid b_axis = (1.0 -flat)*a_axis#-- [m] #-- Ratio between ellipsoidal axes ratio = (1.0 -flat) #-- Polar radius of curvature pol_rad=a_axis/(1.0 -flat) #-- Linear eccentricity lin_ecc = np.sqrt((2.0*flat - flat**2)*a_axis**2) #-- first numerical eccentricity ecc1 = lin_ecc/a_axis #-- second numerical eccentricity ecc2 = lin_ecc/b_axis #-- m parameter [omega^2*a^2*b/(GM)] #-- p. 70, Eqn.(2-137) mp = omega**2*((1 -flat)*a_axis**3)/GM #-- q, q_0 #-- p. 67, Eqn.(2-113) q = 0.5*((1.0 + 3.0/(ecc2**2))*np.arctan(ecc2)-3.0/ecc2) q_0 = 3*(1.0 +1.0/(ecc2**2))*(1.0 -1.0/ecc2*np.arctan(ecc2))-1.0 #-- J_2 p. 75 Eqn.(2-167), p. 76 Eqn.(2-172) j_2 = (ecc1**2)*(1.0 - 2.0*mp*ecc2/(15.0*q))/3.0 #-- Normalized C20 terms. #-- p. 60, Eqn.(2-80) C20 = -j_2/np.sqrt(5.0) #-- Normal gravity at the equator. #-- p. 71, Eqn.(2-141) ga = GM/(a_axis*b_axis)*(1.0 -mp -mp*ecc2*q_0/(6.0*q)) #-- Normal gravity at the pole. #-- p. 71, Eqn.(2-142) gb = GM/(a_axis**2.0)*(1.0 +mp*ecc2*q_0/(3.0*q)) #-- ratio between gravity at pole versus gravity at equator dk = b_axis*gb/(a_axis*ga) - 1.0 #-- Normal potential at the ellipsoid #-- p. 68, Eqn.(2-123) U0 = GM/lin_ecc*np.arctan(ecc2)+(1.0/3.0)*omega**2*a_axis**2 #-- Surface area of the reference ellipsoid [m^2] area = np.pi*a_axis**2.*(2.+((1.-ecc1**2)/ecc1)*np.log((1.+ecc1)/(1.-ecc1))) #-- Volume of the reference ellipsoid [m^3] vol = (4.0*np.pi/3.0)*(a_axis**3.0)*(1.0-ecc1**2.0)**0.5 return {'a':a_axis, 'b':b_axis, 'f':flat, 'rad_p':pol_rad, 'rad_e':rad_e, 'ratio':ratio, 'GM':GM, 'omega':omega, 'C20':C20, 'J2':j_2, 'U0':U0, 'dk':dk, 'norm_a':ga, 'norm_b':gb, 'mp':mp, 'q':q, 'q0':q_0, 'ecc':lin_ecc, 'ecc1':ecc1,'ecc2':ecc2, 'area':area, 'volume':vol}
8f5032af1375d758445ed139bc4a2d6989f15dc5
3,649,972
def is_number(s): """ Check if it is a number. Args: s: The variable that needs to be checked. Returns: bool: True if float, False otherwise. """ try: float(s) return True except ValueError: return False
071aeac26a5a907caf1764dc20d7de1c6408714b
3,649,973
import itertools def combineSets(listOfSets): """ Combines sets of strings by taking the cross product of the sets and \ concatenating the elements in the resulting tuples :param listOfSets: 2-D list of strings :returns: a list of strings """ totalCrossProduct = [''] for i in range(len(listOfSets)): currentProduct = [] for crossProduct in itertools.product(totalCrossProduct, listOfSets[i]): currentProduct.append((crossProduct[0].strip() + ' ' + crossProduct[1].strip()).strip()) totalCrossProduct = currentProduct return totalCrossProduct
26a383d224716fd8f4cf8589607e2df1ccb82a7e
3,649,975