content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def __get_app_package_path(package_type, app_or_model_class): """ :param package_type: :return: """ models_path = [] found = False if isinstance(app_or_model_class, str): app_path_str = app_or_model_class elif hasattr(app_or_model_class, '__module__'): app_path_str = app_or_model_class.__module__ else: raise RuntimeError('Unable to get module path.') for item in app_path_str.split('.'): if item in ['models', 'admin']: models_path.append(package_type) found = True break else: models_path.append(item) if not found: models_path.append(package_type) return '.'.join(models_path)
f08685ef47af65c3e74a76de1f64eb509ecc17b9
3,658,900
import base64 def dict_from_payload(base64_input: str, fport: int = None): """ Decodes a base64-encoded binary payload into JSON. Parameters ---------- base64_input : str Base64-encoded binary payload fport: int FPort as provided in the metadata. Please note the fport is optional and can have value "None", if not provided by the LNS or invoking function. If fport is None and binary decoder can not proceed because of that, it should should raise an exception. Returns ------- JSON object with key/value pairs of decoded attributes """ bytes = base64.b64decode(base64_input) value= (bytes[0] << 8 | bytes[1]) & 0x3FFF battery = value/1000 door_open_status = 0 if bytes[0] & 0x40: water_leak_status = 1 water_leak_status = 0 if bytes[0] & 0x80: door_open_status = 1 mod = bytes[2] if mod == 1: open_times = bytes[3] << 16 | bytes[4] << 8 | bytes[5] open_duration = bytes[6] << 16 | bytes[7] << 8 | bytes[8] result = { "mod": mod, "battery": battery, "door_open_status": door_open_status, "open_times": open_times, "open_duration": open_duration } return result if mod == 2: leak_times = bytes[3] << 16 | bytes[4] << 8 | bytes[5] leak_duration = bytes[6] << 16 | bytes[7] << 8 | bytes[8] result = { "mod": mod, "battery": battery, "leak_times": leak_times, "leak_duration": leak_duration } return result result = { "battery": battery, "mod": mod }
05fe484eef6c4376f0b6bafbde81c7cc4476b83e
3,658,901
def handle(req): """POST""" im = Image.open(BytesIO(req.files[list(req.files.keys())[0]].body)) w, h = im.size im2 = ImageOps.mirror(im.crop((0, 0, w / 2, h))) im.paste(im2, (int(w / 2), 0)) io = BytesIO() im.save(io, format='PNG') return req.Response( body=io.getvalue(), mime_type='image/png', encoding='UTF-8')
d62afe253e331b4d7f037bdc56fa927bceb8bc03
3,658,902
import glob def read_sachs_all(folder_path): """Reads all the sachs data specified in the folder_path. Args: folder_path: str specifying the folder containing the sachs data Returns: An np.array containing all the sachs data """ sachs_data = list() # Divides the Sachs dataset into environments. for _, file in enumerate(glob.glob(f'{folder_path}*.xls')): sachs_df = pd.read_excel(file) sachs_array = sachs_df.to_numpy() sachs_data.append(sachs_array) sachs_data_envs = np.vstack(sachs_data) return sachs_data_envs
151f1eec79251019d1a1c2b828531f6c1f01d605
3,658,903
def user_permitted_tree(user): """Generate a dictionary of the representing a folder tree composed of the elements the user is allowed to acccess. """ # Init user_tree = {} # Dynamically collect permission to avoid hardcoding # Note: Any permission to an element is the same as read permission so # they are all included. file_perm_list = [ f'data_driven_acquisition.{perm}' for perm in get_perms_for_model('data_driven_acquisition.File').values_list( 'codename', flat=True) ] folder_perm_list = [ f'data_driven_acquisition.{perm}' for perm in get_perms_for_model('data_driven_acquisition.Folder').values_list( 'codename', flat=True) ] # Collect all permistted elements permitted_folders = get_objects_for_user( user, folder_perm_list, any_perm=True).all() permitted_files = get_objects_for_user( user, file_perm_list, any_perm=True).all() # Add all permitted folders to the user tree with their content and parents. for folder_obj in permitted_folders: # Get the folder content as tree tree = get_folder_tree(folder_obj) # Try to place the tree in the user tree if not place_folder_in_tree(user_tree, folder_obj, tree): # The parent is not in the user tree. # Cresting the parent folder at root level and then completing the # climb to the package level, mergin as needed. user_tree[folder_obj] = tree user_tree = climb_to_package(user_tree, folder_obj) # Add all permitted files to the user tree with theirs parents. for file_obj in permitted_files: # Add to user tree iof the parent folder is already there. if not place_file_in_tree(user_tree, file_obj): # Cold not find the parent folder in the tree. # Creating a base tree with the parent folder # the file at root level and the climbing up to the Package # Merging when required tree = { "files": [file_obj, ] } user_tree[file_obj.parent] = tree user_tree = climb_to_package(user_tree, file_obj.parent) return user_tree
fd9b7d60da7e085e948d4def0ababc3d0cb8233f
3,658,904
def extract_failure(d): """ Returns the failure object the given deferred was errback'ed with. If the deferred has result, not a failure a `ValueError` is raised. If the deferred has no result yet a :class:`NotCalledError` is raised. """ if not has_result(d): raise NotCalledError() else: result = [] def callback(value): result.append(value) d.addBoth(callback) result = result[0] if isinstance(result, failure.Failure): return result else: raise ValueError("Deferred was called back with a value: %r" % result)
7bc160a8ebd1c5cdeab1a91a556576c750d342f8
3,658,905
def convert_where_clause(clause: dict) -> str: """ Convert a dictionary of clauses to a string for use in a query Parameters ---------- clause : dict Dictionary of clauses Returns ------- str A string representation of the clauses """ out = "{" for key in clause.keys(): out += "{}: ".format(key) #If the type of the right hand side is string add the string quotes around it if type(clause[key]) == str: out += '"{}"'.format(clause[key]) else: out += "{}".format(clause[key]) out += "," out += "}" return out
8b135c799df8d16c116e6a5282679ba43a054684
3,658,906
from unittest.mock import call def all_metadata_async(): """Retrieves all available metadata for an instance async""" loop = trollius.get_event_loop() res = loop.run_until_complete(call()) return res
9759331fbd72271820896bd2849139dc13fc9d39
3,658,907
def median_std_from_ma(data: np.ma, axis=0): """On the assumption that there are bit-flips in the *data*, attempt to find a value that might represent the standard deviation of the 'real' data. The *data* object must be a numpy masked array. The value of *axis* determines which way the data are handled. The default is 0 to scan vertially to accumulate statistics for columns. In this case, only those columns with the most unmasked data are evaluated. For them, the standard deviation is found for each column, and the returned value is the median of those standard deviations. If *axis* is 1, then this is applied to rows, not columns. """ valid_points = data.count(axis=axis) std_devs = np.std(data, axis=axis) return median_std(valid_points, std_devs)
587702c52bb2000ebfe920202270610e4ed49d8c
3,658,908
def __check_value_range(x: int) -> bool: """ Checks if integer is in valid value range to be a coordinate for Tic-Tac-Toe. """ if x < 1 or x > 3: print(__standard_error_text + "Coordinates have to be between 1 and 3.\n") return False return True
45f21b3292097baea31846b8bcc51435ae15134c
3,658,909
def find_option(opt): """ This function checks for option defined with optcode; it could be implemented differently - by checking entries in world.cliopts """ # received msg from client must not be changed - make a copy of it tmp = world.climsg[world.clntCounter].copy() # 0 - ether, 1 - ipv6, 2 - udp, 3 - dhcpv6, 4 - opts if type(tmp) == Ether: tmp = tmp.getlayer(4) else: tmp = tmp.getlayer(3) while tmp: if tmp.optcode == int(opt): return True tmp = tmp.payload return False
23904c16f9206f9030a40e17be5f6b01cb0439cf
3,658,910
from typing import Set def generic_add_model_components( m, d, reserve_zone_param, reserve_zone_set, reserve_generator_set, generator_reserve_provision_variable, total_reserve_provision_expression, ): """ Generic treatment of reserves. This function creates model components related to a particular reserve requirement, including 1) an expression aggregating generator-level provision to total provision :param m: :param d: :param reserve_zone_param: :param reserve_zone_set: :param reserve_generator_set: :param generator_reserve_provision_variable: :param total_reserve_provision_expression: :return: """ # Reserve generators operational generators in timepoint # This will be the intersection of the reserve generator set and the set of # generators operational in the timepoint op_set = str(reserve_generator_set) + "_OPERATIONAL_IN_TIMEPOINT" setattr( m, op_set, Set( m.TMPS, initialize=lambda mod, tmp: getattr(mod, reserve_generator_set) & mod.OPR_PRJS_IN_TMP[tmp], ), ) # Reserve provision def total_reserve_rule(mod, ba, tmp): return sum( getattr(mod, generator_reserve_provision_variable)[g, tmp] for g in getattr(mod, op_set)[tmp] if getattr(mod, reserve_zone_param)[g] == ba ) setattr( m, total_reserve_provision_expression, Expression(getattr(m, reserve_zone_set), m.TMPS, rule=total_reserve_rule), )
2c7eef877e0ba7744ba624205fcf590071a95b84
3,658,911
def return_all_content(content): """Help function to return untruncated stripped content.""" return mark_safe(str(content).replace('><', '> <')) if content else None
e24a1ee812a3a011cf6e369ba96bc2989ad7603d
3,658,912
def get_trailing_returns(uid): """ Get trailing return chart """ connection = pymysql.connect(host=DB_SRV, user=DB_USR, password=DB_PWD, db=DB_NAME, charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) cursor = connection.cursor(pymysql.cursors.SSCursor) sql = "SELECT instruments.fullname, instruments.is_benchmark, "+\ "instruments.market, instruments.symbol, instruments.asset_class "+\ "FROM instruments JOIN symbol_list ON symbol_list.symbol = instruments.symbol "+\ "WHERE symbol_list.uid=" + str(uid) cursor.execute(sql) res = cursor.fetchall() for row in res: fullname = row[0].replace("'", "") is_benchmark = row[1] market = row[2] symbol_is_portf = row[3] asset_class = row[4] if symbol_is_portf.find(get_portf_suffix()) > -1: sql = "SELECT date FROM chart_data WHERE uid=" + str(uid) + " ORDER BY date DESC LIMIT 1" else: sql = "SELECT price_instruments_data.date FROM price_instruments_data JOIN symbol_list "+\ "ON symbol_list.symbol = price_instruments_data.symbol "+\ "WHERE symbol_list.uid=" + str(uid) +" ORDER BY date DESC LIMIT 1" cursor.execute(sql) res = cursor.fetchall() as_date = '' l_as_date = '' for row in res: as_date = row[0] if as_date != '': l_as_date = 'Trailing returns as of '+ as_date.strftime("%d-%b-%Y") font_size = 10 l_y1 = '1-Year' l_m6 = '6-month' l_m3 = '3-month' l_m1 = '1-month' l_w1 = '1-week' minb = 0 mini = 0 maxb = 0 maxi = 0 benchmark_header = '' benchmark_data_y1 = '' benchmark_data_m6 = '' benchmark_data_m3 = '' benchmark_data_m1 = '' benchmark_data_w1 = '' if not is_benchmark: sql = "SELECT symbol_list.uid, instruments.fullname "+\ "FROM symbol_list JOIN instruments "+\ "ON symbol_list.symbol = instruments.symbol "+\ "WHERE instruments.market='"+\ str(market) +"' AND instruments.asset_class='"+\ str(asset_class) +"' AND instruments.is_benchmark=1" cursor.execute(sql) res = cursor.fetchall() benchmark_uid = 0 for row in res: benchmark_uid = row[0] benchmark_fullname = row[1].replace("'", "") if benchmark_uid != 0: benchmark_header = ", ' " +\ benchmark_fullname +\ " ', {type: 'string', role: 'annotation'}" benchmark_data_y1 = ','+ get_chart_data(benchmark_uid, 'y1') benchmark_data_m6 = ','+ get_chart_data(benchmark_uid, 'm6') benchmark_data_m3 = ','+ get_chart_data(benchmark_uid, 'm3') benchmark_data_m1 = ','+ get_chart_data(benchmark_uid, 'm1') benchmark_data_w1 = ','+ get_chart_data(benchmark_uid, 'w1') minb = get_minmax(benchmark_uid, 'min') maxb = get_minmax(benchmark_uid, 'max') data = ''+\ '["'+ l_y1 + '",' + get_chart_data(uid, 'y1') + benchmark_data_y1 +']' + ',' +\ '["'+ l_m6 + '",' + get_chart_data(uid, 'm6') + benchmark_data_m6 + ']' + ',' +\ '["'+ l_m3 + '",' + get_chart_data(uid, 'm3') + benchmark_data_m3 + ']' + ',' +\ '["'+ l_m1 + '",' + get_chart_data(uid, 'm1') + benchmark_data_m1 + ']' + ',' +\ '["'+ l_w1 + '",' + get_chart_data(uid, 'w1') + benchmark_data_w1 + ']' mini = get_minmax(uid, 'min') maxi = get_minmax(uid, 'max') if minb < mini: mini = minb if maxb > maxi: maxi = maxb header = " ['x', ' " +\ fullname + " ', {type: 'string', role: 'annotation'}"+\ benchmark_header +" ]," chart_content = "" +\ "<script>" +\ "google.charts.load('current', {packages: ['corechart', 'bar']});" +\ "google.charts.setOnLoadCallback(drawAnnotations);" +\ "function drawAnnotations() {" +\ " var data = google.visualization.arrayToDataTable([" +\ header +\ data +\ " ]);" +\ " var options = {" +\ " fontSize: "+ str(font_size) + "," +\ " legend: {position:'top', textStyle: {color:"+\ theme_return_this("'black'", "'white'") +"} }," +\ " title: ''," +\ " backgroundColor: 'transparent',"+\ " chartArea: {width: '50%'}," +\ " annotations: {" +\ " alwaysOutside: true," +\ " textStyle: {" +\ " auraColor: 'none'," +\ " color: '#555'" +\ " }," +\ " boxStyle: {" +\ " stroke: '#ccc'," +\ " strokeWidth: 1," +\ " gradient: {" +\ " color1: 'yellow'," +\ " color2: 'white'," +\ " x1: '0%', y1: '0%'," +\ " x2: '100%', y2: '100%'" +\ " }" +\ " }" +\ " }," +\ " series: {0:{color: "+\ theme_return_this("'blue'", "'orange'") +"}, 1:{color: '#c9d6ea'} }," +\ " chartArea: {width:'80%',height:'80%'}," +\ " hAxis: {" +\ " title: '" + l_as_date + "', " +\ " titleTextStyle:{ color:"+\ theme_return_this("'black'", "'white'") +"},"+\ " viewWindow:{min:"+\ str(mini) +",max:"+\ str(maxi) +"}," +\ " gridlines: { color: 'transparent' },"+\ " textStyle: { color: "+\ theme_return_this("'black'", "'white'") +" } "+\ " }," +\ " vAxis: {" +\ " title: '', " +\ " textStyle: { color: "+\ theme_return_this("'black'", "'white'") +" } "+\ " }" +\ " };" +\ " var chart = "+\ "new google.visualization.BarChart(document.getElementById('trail_chart'));" +\ " chart.draw(data, options);" +\ " }" +\ " </script>" +\ " <div id='trail_chart' class='sa-chart-hw-290'></div>" cursor.close() connection.close() return chart_content
96e8ea67b1b91c3dfc6994b5ff56d9384aca6da5
3,658,913
def bitsNotSet(bitmask, maskbits): """ Given a bitmask, returns True where any of maskbits are set and False otherwise. Parameters ---------- bitmask : ndarray Input bitmask. maskbits : ndarray Bits to check if set in the bitmask """ goodLocs_bool = np.zeros(bitmask.shape).astype(bool) for m in maskbits: bitind = bm.bit_set(m, bitmask) goodLocs_bool[bitind] = True return goodLocs_bool
746c054310ac06c58cc32e5635d270c64481a527
3,658,914
def plot(foo, x, y): """x, y are tuples of 3 values: xmin, xmax, xnum""" np_foo = np.vectorize(foo) x_space = np.linspace(*x) y_space = np.linspace(*y) xx, yy = np.meshgrid(x_space, y_space) xx = xx.flatten() yy = yy.flatten() zz = np_foo(xx, yy) num_x = x[-1] num_y = y[-1] points = np.array([xx, yy, zz]).T scale = coin.SoScale() scale.scaleFactor.setValue(1, 1, abs(x[1] - x[0]) / abs(max(zz) - min(zz))) return [scale, simple_quad_mesh(points, num_x, num_y)]
67bfcc70a71140efa3d08960487a69943d2acdc8
3,658,915
import struct def _StructPackEncoder(wire_type, format): """Return a constructor for an encoder for a fixed-width field. Args: wire_type: The field's wire type, for encoding tags. format: The format string to pass to struct.pack(). """ value_size = struct.calcsize(format) def SpecificEncoder(field_number, is_repeated, is_packed): local_struct_pack = struct.pack if is_packed: tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED) local_EncodeVarint = _EncodeVarint def EncodePackedField(write, value): write(tag_bytes) local_EncodeVarint(write, len(value) * value_size) for element in value: write(local_struct_pack(format, element)) return EncodePackedField elif is_repeated: tag_bytes = TagBytes(field_number, wire_type) def EncodeRepeatedField(write, value): for element in value: write(tag_bytes) write(local_struct_pack(format, element)) return EncodeRepeatedField else: tag_bytes = TagBytes(field_number, wire_type) def EncodeField(write, value): write(tag_bytes) return write(local_struct_pack(format, value)) return EncodeField return SpecificEncoder
7c58d955a903bac423799c99183066268fb7711b
3,658,916
def transition_temperature(wavelength): """ To get temperature of the transition in K Wavelength in micros T = h*f / kB """ w = u.Quantity(wavelength, u.um) l = w.to(u.m) c = _si.c.to(u.m / u.s) h = _si.h.to(u.eV * u.s) kb = _si.k_B.to(u.eV / u.K) f = c/l t = h*f/kb return t
dbec1ee2c1ad01cd257791624105ff0c4de6e708
3,658,917
def truncate_string(string: str, max_length: int) -> str: """ Truncate a string to a specified maximum length. :param string: String to truncate. :param max_length: Maximum length of the output string. :return: Possibly shortened string. """ if len(string) <= max_length: return string else: return string[:max_length]
c7d159feadacae5a692b1f4d95da47a25dd67c16
3,658,918
import requests import json def geoinfo_from_ip(ip: str) -> dict: # pylint: disable=invalid-name """Looks up the geolocation of an IP address using ipinfo.io Example ipinfo output: { "ip": "1.1.1.1", "hostname": "one.one.one.one", "anycast": true, "city": "Miami", "region": "Florida", "country": "US", "loc": "25.7867,-80.1800", "org": "AS13335 Cloudflare, Inc.", "postal": "33132", "timezone": "America/New_York", "readme": "https://ipinfo.io/missingauth" } """ valid_ip = ip_address(ip) url = f"https://ipinfo.io/{valid_ip}/json" resp = requests.get(url) if resp.status_code != 200: raise Exception(f"Geo lookup failed: GET {url} returned {resp.status_code}") geoinfo = json.loads(resp.text) return geoinfo
956a9d12b6264dc283f64ee792144946e313627b
3,658,919
def mpileup2acgt(pileup, quality, depth, reference, qlimit=53, noend=False, nostart=False): """ This function was written by Francesco Favero, from: sequenza-utils pileup2acgt URL: https://bitbucket.org/sequenza_tools/sequenza-utils original code were protected under GPLv3 license. Parse the mpileup format and return the occurrence of each nucleotides in the given positions. pileup format: 1 chr 2 1-based coordinate 3 reference base 4 depth 5 read bases 6 base qualities 7 mapping qualities # argument pileup = column-6 """ nucleot_dict = {'A': 0, 'C': 0, 'G': 0, 'T': 0} strand_dict = {'A': 0, 'C': 0, 'G': 0, 'T': 0} n = 0 block = {'seq': '', 'length': 0} start = False del_ins = False l_del_ins = '' last_base = None ins_del_length = 0 for base in pileup: if block['length'] == 0: if base == '$': if noend: if last_base: nucleot_dict[last_base.upper()] -= 1 if last_base.isupper(): strand_dict[last_base.upper()] -= 1 last_base = None elif base == '^': start = True block['length'] += 1 block['seq'] = base elif base == '+' or base == '-': del_ins = True block['length'] += 1 block['seq'] = base elif base == '.' or base == ',': ## . on froward, , on reverse if ord(quality[n]) >= qlimit: nucleot_dict[reference] += 1 if base == '.': strand_dict[reference] += 1 last_base = reference else: last_base = reference.lower() else: last_base = None n += 1 elif base.upper() in nucleot_dict: if ord(quality[n]) >= qlimit: nucleot_dict[base.upper()] += 1 if base.isupper(): strand_dict[base.upper()] += 1 last_base = base else: last_base = None n += 1 else: n += 1 else: if start: block['length'] += 1 block['seq'] += base if block['length'] == 3: if not nostart: if base == '.' or base == ',': if ord(quality[n]) >= qlimit: nucleot_dict[reference] += 1 if base == '.': strand_dict[reference] += 1 elif base.upper() in nucleot_dict: if ord(quality[n]) >= qlimit: nucleot_dict[base.upper()] += 1 if base.isupper(): strand_dict[base.upper()] += 1 block['length'] = 0 block['seq'] = '' start = False last_base = None n += 1 elif del_ins: if base.isdigit(): l_del_ins += base block['seq'] += base block['length'] += 1 else: ins_del_length = int(l_del_ins) + 1 + len(l_del_ins) block['seq'] += base block['length'] += 1 if block['length'] == ins_del_length: block['length'] = 0 block['seq'] = '' l_del_ins = '' # ins_del = False ins_del_length = 0 nucleot_dict['Z'] = [strand_dict['A'], strand_dict['C'], strand_dict['G'], strand_dict['T']] return nucleot_dict
bf5a0c5e147ece6e9b3be5906ba81ed54593b257
3,658,920
def normalize_missing(xs): """Normalize missing values to avoid string 'None' inputs. """ if isinstance(xs, dict): for k, v in xs.items(): xs[k] = normalize_missing(v) elif isinstance(xs, (list, tuple)): xs = [normalize_missing(x) for x in xs] elif isinstance(xs, basestring): if xs.lower() in ["none", "null"]: xs = None elif xs.lower() == "true": xs = True elif xs.lower() == "false": xs = False return xs
5d3fef8370e6a4e993eb06d96e5010c4b57907ba
3,658,921
def ini_inventory(nhosts=10): """Return a .INI representation of inventory""" output = list() inv_list = generate_inventory(nhosts) for group in inv_list.keys(): if group == '_meta': continue # output host groups output.append('[%s]' % group) for host in inv_list[group].get('hosts', []): output.append(host) output.append('') # newline # output child groups output.append('[%s:children]' % group) for child in inv_list[group].get('children', []): output.append(child) output.append('') # newline # output group vars output.append('[%s:vars]' % group) for k, v in inv_list[group].get('vars', {}).items(): output.append('%s=%s' % (k, v)) output.append('') # newline return '\n'.join(output)
46182c727e9dbb844281842574bbb54d2530d42b
3,658,922
def get_crp_constrained_partition_counts(Z, Cd): """Compute effective counts at each table given dependence constraints. Z is a dictionary mapping customer to table, and Cd is a list of lists encoding the dependence constraints. """ # Compute the effective partition. counts = defaultdict(int) seen = set() # Table assignment of constrained customers. for block in Cd: seen.update(block) customer = block[0] table = Z[customer] counts[table] += 1 # Table assignment of unconstrained customers. for customer in Z: if customer in seen: continue table = Z[customer] counts[table] += 1 return counts
acda347ec904a7835c63afe5ec6efda5915df405
3,658,923
import re from re import T def create_doc(): """Test basic layer creation and node creation.""" # Stupid tokenizer tokenizer = re.compile(r"[a-zA-Z]+|[0-9]+|[^\s]") doc = Document() main_text = doc.add_text("main", "This code was written in Lund, Sweden.") # 01234567890123456789012345678901234567 # 0 1 2 3 token = doc.add_layer("token", text=main_text.spantype) for m in tokenizer.finditer(str(main_text)): token.add(text=main_text[m.start():m.end()]) named_entity = doc.add_layer("named_entity", text=main_text.spantype, cls=T.string) named_entity.add(text=main_text[25:29], cls="GPE") named_entity.add(text=main_text[31:37], cls="GPE") return doc
36171fd68712861370b6ea1a8ae49aa7ec0c139a
3,658,924
def jissue_get_chunked(jira_in, project, issue_max_count, chunks=100): """ This method is used to get the issue list with references, in case the number of issues is more than 1000 """ result = [] # step and rest simple calc step = issue_max_count / chunks rest = issue_max_count % chunks # iterate the issue gathering for i in range(step): result.extend(jissue_query(jira_in, project, chunks*i, chunks)) result.extend(jissue_query(jira_in, project, issue_max_count-rest, rest)) return result
1c32859b91f139f5b56ce00f38dba38b2297109e
3,658,925
def negative_height_check(height): """Check the height return modified if negative.""" if height > 0x7FFFFFFF: return height - 4294967296 return height
4d319021f9e1839a17b861c92c7319ad199dfb42
3,658,926
from .model_store import get_model_file import os def get_visemenet(model_name=None, pretrained=False, root=os.path.join("~", ".tensorflow", "models"), **kwargs): """ Create VisemeNet model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters. """ net = VisemeNet( **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3 input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\ (1,) + net.in_size + (in_channels,) net.build(input_shape=input_shape) net.load_weights( filepath=get_model_file( model_name=model_name, local_model_store_dir_path=root)) return net
78f6ecd0136b3fae3b0b7ae9b9463e9bbf4afc6b
3,658,927
def linked_gallery_view(request, obj_uuid): """ View For Permalinks """ gallery = get_object_or_404(Gallery, uuid=obj_uuid) images = gallery.images.all().order_by(*gallery.display_sort_string) paginator = Paginator(images, gallery.gallery_pagination_count) page = request.GET.get('page') try: imgs = paginator.page(page) except PageNotAnInteger: imgs = paginator.page(1) except EmptyPage: imgs = paginator.page(paginator.num_pages) context = { "images": imgs, "gallery": gallery, "gallery_name": gallery.title } return render(request, 'image_list.html', context)
c425c6678f2bfb3b76c039ef143d4e7cbc6ee922
3,658,928
def _gm_cluster_assign_id(gm_list, track_id, num_tracks, weight_threshold, z_dim, max_id, max_iteration=1000): """The cluster algorithm that assign a new ID to the track Args: gm_list (:obj:`list`): List of ``GaussianComponent`` representing current multi-target PHD density. track_id (:obj:`int`): Current track id. num_tracks (:obj:`int`): The number of tracks that this list of Gaussian components need to split into. weight_threshold (:obj:`float`): Initial weight threshold for each newly spawned track. z_dim (:obj:`int`): The dimensionality of measurement space. max_id (:obj:`int`): The next track ID number that can be assigned. max_iteration (:obj:`int`): Max number of iterations in case that the clustering algorithm does not converge and oscillates. Returns: A `list` of Gaussian components with updated track ID and the next track ID that can be assigned to new tracks in the future. """ clusters_mean = np.random.uniform(0, 1, (num_tracks, z_dim)) previous_clusters_mean = None cluster_gms = [[] for i in range(num_tracks)] count = 0 while np.any(clusters_mean != previous_clusters_mean) and \ count < max_iteration: previous_clusters_mean = np.copy(clusters_mean) # There n_tracks means, calculate the distance between each track, # and sorted from high to low gm_distance_matrix = _gm_cluster_distance(gm_list=gm_list, clusters_mean=clusters_mean, num_tracks=num_tracks, z_dim=z_dim) # Assign GM to each mean where the weight of each cluster equals or # just higher than the weight threshold. cluster_gms = _gm_group_cluster(gm_list=gm_list, distance_matrix=gm_distance_matrix, weight_threshold=weight_threshold) # Update mean for i in range(num_tracks): new_mean = np.zeros((z_dim,), dtype=np.float32) new_weight = 0. for gm in cluster_gms[i]: new_mean += gm.mean.flatten()[0:z_dim] * gm.weight new_weight += gm.weight if new_weight == 0.: new_weight = 1 clusters_mean[i, :] = new_mean / new_weight # Update count count += 1 # Assign ID to each cluster for i in range(num_tracks): # For every new track, start counting with max_id if track_id == 0 and i == 0: for gm in cluster_gms[i]: gm.mean[-1, :] = max_id max_id += 1 elif i != 0: for gm in cluster_gms[i]: gm.mean[-1, :] = max_id max_id += 1 return gm_list, max_id
f079867333a9e66f7b48782b899f060c38694220
3,658,929
def get_bprop_scatter_nd(self): """Generate bprop for ScatterNd""" op = P.GatherNd() def bprop(indices, x, shape, out, dout): return zeros_like(indices), op(dout, indices), zeros_like(shape) return bprop
3f2f5247b03ba49918e34534894c9c1761d02f07
3,658,930
from typing import Optional import os import hashlib def get_md5(location: str, ignore_hidden_files: bool=True) -> Optional[str]: """ Gets an MD5 checksum of the file or directory at the given location. :param location: location of file or directory :param ignore_hidden_files: whether hidden files should be ignored when calculating an checksum for a directory :return: the MD5 checksum or `None` if the given location does not exist """ if not os.path.exists(location): return None if os.path.isfile(location): with open(location, "rb") as file: content = file.read() return hashlib.md5(content).hexdigest() else: return dirhash(location, "md5", ignore_hidden=ignore_hidden_files)
4d409355c60d2561d452a45c324f5da129a2acd9
3,658,931
import requests def delete_policy_rule(policy_key, key, access_token): """ Deletes a policy rule with the given key. Returns the response JSON. See http://localhost:8080/docs#/Policy/delete_rule_api_v1_policy__policy_key__rule__rule_key__delete """ return requests.delete( f"{FIDESOPS_URL}/api/v1/policy/{policy_key}/rule/{key}", headers=oauth_headers(access_token=access_token), )
b53e52b2498707b82e3ceaf89be667886c75ca3c
3,658,932
def knn_search_parallel(data, K, qin=None, qout=None, tree=None, t0=None, eps=None, leafsize=None, copy_data=False): """ find the K nearest neighbours for data points in data, using an O(n log n) kd-tree, exploiting all logical processors on the computer. if eps <= 0, it returns the distance to the kth point. On the other hand, if eps > 0 """ # print("starting the parallel search") if eps is not None: assert data.shape[0]==len(eps) # build kdtree if copy_data: dataCopy = data.copy() # print('copied data') else: dataCopy = data if tree is None and leafsize is None: tree = ss.cKDTree(dataCopy) elif tree is None: tree = ss.cKDTree(dataCopy, leafsize=leafsize) if t0 is not None: print('time to tree formation: %f' %(clock()-t0)) ndata = data.shape[0] nproc = 20 # print('made the tree') # compute chunk size chunk_size = int(data.shape[0] / (4*nproc)) chunk_size = 100 if chunk_size < 100 else chunk_size if qin==None or qout==None: # set up a pool of processes qin = processing.Queue(maxsize=int(ndata/chunk_size)) qout = processing.Queue(maxsize=int(ndata/chunk_size)) if eps is None: pool = [processing.Process(target=__remote_process_query, args=(rank, qin, qout, tree, K, leafsize)) for rank in range(nproc)] else: pool = [processing.Process(target=__remote_process_ball, args=(rank, qin, qout, tree, leafsize)) for rank in range(nproc)] for p in pool: p.start() # put data chunks in input queue cur, nc = 0, 0 while 1: _data = data[cur:cur+chunk_size, :] if _data.shape[0] == 0: break if eps is None: qin.put((nc,_data)) else: _eps = eps[cur:cur+chunk_size] qin.put((nc,_data,_eps)) cur += chunk_size nc += 1 # read output queue knn = [] while len(knn) < nc: knn += [qout.get()] # avoid race condition _knn = [n for i,n in sorted(knn)] knn = [] for tmp in _knn: knn += [tmp] # terminate workers for p in pool: p.terminate() if eps is None: output = np.zeros((sum([ x.shape[0] for x in knn]),knn[0].shape[1])) else: output = np.zeros(sum([ len(x) for x in knn])) outputi = 0 for x in knn: if eps is None: nextVal = x.shape[0] else: nextVal = len(x) output[outputi:(outputi+nextVal)] = x outputi += nextVal return output
cc0dfaee8d1990f1d336e6a5e71973e1b4702e25
3,658,933
import urllib import typing def find_absolute_reference( target: str, domain: str, remote_url: urllib.parse.ParseResult, https_mode: _constants.HTTPSMode = _constants.DEFAULT_HTTPS_MODE, base: typing.Optional[urllib.parse.ParseResult] = None ) -> typing.Optional[str]: """ Transform the partly defined target string to a full URL The implementation of this method is partly based on RFC 3986, section 5.1 and 5.2 (with modifications). :param target: anything that seems to be an URI, relative or absolute :param domain: remote network location name (usually domain name) :param remote_url: remote URL that was used before, i.e. the referrer to the new target (and most likely also the origin of the reference) :param https_mode: definition how to treat the HTTPS mode (for the scheme) :param base: optional base URI used to correctly find absolute paths for relative resource indicators (uses the remote URL if absent) :return: a full URL that can be used to request further resources, if possible and the target matched the criteria (otherwise None); one of those criteria is the same remote netloc, which is enforced to limit the width of our requests to not query the whole web """ def merge_paths(a: urllib.parse.ParseResult, b: str) -> str: """ Merge two paths, where `a` should be a base and `b` should be a reference """ if not b.startswith("/"): b = "/" + b if a.netloc != "" and a.path == "": return b return "/".join(a.path.split("/")[:-1]) + b url = urllib.parse.urlparse(target) scheme, netloc, path, params, query, fragment = url # TODO: section 5.1, order of precedence if base is None: base = remote_url # Unknown schemes are ignored (e.g. mailto:) and a given schema indicates # an absolute URL which should not be processed (only filtered) if scheme != "" and scheme.lower() not in ("http", "https"): return elif scheme == "": if https_mode == _constants.HTTPSMode.DEFAULT: scheme = remote_url.scheme elif https_mode in (_constants.HTTPSMode.HTTPS_ONLY, _constants.HTTPSMode.HTTPS_FIRST): scheme = "https" elif https_mode == _constants.HTTPSMode.HTTP_ONLY: scheme = "http" elif netloc != "" and netloc.lower() == domain.lower(): return urllib.parse.urlunparse( (scheme, netloc, remove_dot_segments(path), params, query, "") ) # Other network locations are ignored (so we don't traverse the whole web) if netloc != "" and netloc.lower() != domain.lower(): return elif netloc != "": return urllib.parse.urlunparse( (scheme, netloc, remove_dot_segments(path), params, query, "") ) netloc = domain # Determine the new path if path == "": path = base.path if query == "": query = base.query else: if path.startswith("/"): path = remove_dot_segments(path) else: path = remove_dot_segments(merge_paths(base, path)) return urllib.parse.urlunparse( (scheme, netloc, remove_dot_segments(path), params, query, "") )
2c49fbe31e7490d53bb4549a73af6529fd41aa2a
3,658,934
from typing import Tuple def compute_vectors_from_coordinates( x: np.ndarray, y: np.ndarray, fps: int = 1 ) -> Tuple[Vector, Vector, Vector, Vector, np.array]: """ Given the X and Y position at each frame - Compute vectors: i. velocity vector ii. unit tangent iii. unit norm iv. acceleration and scalar quantities: i. speed ii. curvature See: https://stackoverflow.com/questions/28269379/curve-curvature-in-numpy """ # compute velocity vector dx_dt = np.gradient(x) dy_dt = np.gradient(y) velocity = ( np.array([[dx_dt[i], dy_dt[i]] for i in range(dx_dt.size)]) * fps ) # compute scalr speed vector ds_dt = np.sqrt(dx_dt * dx_dt + dy_dt * dy_dt) # get unit tangent vector tangent = np.array([1 / ds_dt] * 2).transpose() * velocity unit_tangent = tangent / np.apply_along_axis( np.linalg.norm, 1, tangent ).reshape(len(tangent), 1) # get unit normal vector tangent_x = tangent[:, 0] tangent_y = tangent[:, 1] deriv_tangent_x = np.gradient(tangent_x) deriv_tangent_y = np.gradient(tangent_y) dT_dt = np.array( [ [deriv_tangent_x[i], deriv_tangent_y[i]] for i in range(deriv_tangent_x.size) ] ) length_dT_dt = np.sqrt( deriv_tangent_x * deriv_tangent_x + deriv_tangent_y * deriv_tangent_y ) normal = np.array([1 / length_dT_dt] * 2).transpose() * dT_dt # get acceleration and curvature d2s_dt2 = np.gradient(ds_dt) d2x_dt2 = np.gradient(dx_dt) d2y_dt2 = np.gradient(dy_dt) curvature = ( np.abs(d2x_dt2 * dy_dt - dx_dt * d2y_dt2) / (dx_dt * dx_dt + dy_dt * dy_dt) ** 1.5 ) t_component = np.array([d2s_dt2] * 2).transpose() n_component = np.array([curvature * ds_dt * ds_dt] * 2).transpose() acceleration = t_component * tangent + n_component * normal return ( Vector(velocity), Vector(tangent), Vector( -unit_tangent[:, 1], unit_tangent[:, 0] ), # normal as rotated tangent Vector(acceleration), curvature, )
073262b521f3da79945674cc60ea26fee4c87529
3,658,935
import requests def get_now(pair): """ Return last info for crypto currency pair :param pair: ex: btc-ltc :return: """ info = {'marketName': pair, 'tickInterval': 'oneMin'} return requests.get('https://bittrex.com/Api/v2.0/pub/market/GetLatestTick', params=info).json()
b5db7ba5c619f8369c052a37e010229db7f78186
3,658,936
from buildchain import versions from typing import Any import sys def fixture_buildchain_template_context() -> Any: """Emulate .in template context for buildchain.""" buildchain_path = paths.REPO_ROOT / "buildchain" sys.path.insert(0, str(buildchain_path)) # pylint: disable=import-error,import-outside-toplevel # pylint: enable=import-error,import-outside-toplevel sys.path.pop(0) return { "VERSION": versions.VERSION, }
365597c84659c8aa6aabaef9b0d75763e0ad9131
3,658,937
def hsv(h: float, s: float, v: float) -> int: """Convert HSV to RGB. :param h: Hue (0.0 to 1.0) :param s: Saturation (0.0 to 1.0) :param v: Value (0.0 to 1.0) """ return 0xFFFF
638c1784f54ee51a3b7439f15dab45053a8c3099
3,658,938
def make_exponential_mask(img, locations, radius, alpha, INbreast=False): """Creating exponential proximity function mask. Args: img (np.array, 2-dim): the image, only it's size is important locations (np.array, 2-dim): array should be (n_locs x 2) in size and each row should correspond to a location [x,y]. Don't need to be integer, truncation is applied. NOTICE [x,y] where x is row number (distance from top) and y column number (distance from left) radius (int): radius of the exponential pattern alpha (float): decay rate INbreast (bool, optional): Not needed anymore, handled when parsing INbreast dataset Returns: mask (np.array, 0.0-1.0): Exponential proximity function """ # create kernel which we will be adding at locations # Kernel has radial exponential decay form kernel = np.zeros((2*radius+1,2*radius+1)) for i in range(0, kernel.shape[0]): for j in range(0, kernel.shape[1]): d = np.sqrt((i-radius)**2+(j-radius)**2) if d<= radius: kernel[i,j]=(np.exp(alpha*(1-d/radius))-1)/(np.exp(alpha)-1) # pad original img to avoid out of bounds errors img = np.pad(img, radius+1, 'constant').astype(float) # update locations locations = np.array(locations)+radius+1 locations = np.round(locations).astype(int) # initialize mask mask = np.zeros_like(img) for location in locations: if INbreast: y, x = location else: x, y = location # add kernel mask[x-radius:x+radius+1, y-radius:y+radius+1] =np.maximum(mask[x-radius:x+radius+1, y-radius:y+radius+1],kernel) # unpad mask = mask[radius+1:-radius-1,radius+1:-radius-1] return mask
14be02cee27405c3a4abece7654b7bf902a43a47
3,658,939
from typing import Tuple def delete(client, url: str, payload: dict) -> Tuple[dict, bool]: """Make DELETE requests to K8s (see `k8s_request`).""" resp, code = request(client, 'DELETE', url, payload, headers=None) err = (code not in (200, 202)) if err: logit.error(f"{code} - DELETE - {url} - {resp}") return (resp, err)
8ed463e063b06a48b410112f163830778f887551
3,658,940
def f(x): """The objective is defined as the cost + a per-demographic penalty for each demographic not reached.""" n = len(x) assert n == n_venues reached = np.zeros(n_demographics, dtype=int) cost = 0.0 for xi, ri, ci in zip(x, r, c): if xi: reached = reached | ri # cost += ci for ri, pi in zip(reached, p): if ri == 0: cost += pi return cost
d6724595086b0facccae84fd4f3460195bc84a1f
3,658,941
def clamp(minVal, val, maxVal): """Clamp a `val` to be no lower than `minVal`, and no higher than `maxVal`.""" return max(minVal, min(maxVal, val))
004b9a393e69ca30f925da4cb18a8f93f12aa4ef
3,658,942
def get_closest_spot( lat: float, lng: float, area: config.Area ) -> t.Optional[config.Spot]: """Return closest spot if image taken within 100 m""" if not area.spots: return None distances = [ (great_circle((spot.lat, spot.lng), (lat, lng)).meters, spot) for spot in area.spots ] distance, closest_spot = min(distances) return closest_spot if distance < 100 else None
55424c3b5148209e62d51cc7c6e5759353f5cb0a
3,658,943
def drawBezier( page: Page, p1: point_like, p2: point_like, p3: point_like, p4: point_like, color: OptSeq = None, fill: OptSeq = None, dashes: OptStr = None, width: float = 1, morph: OptStr = None, closePath: bool = False, lineCap: int = 0, lineJoin: int = 0, overlay: bool = True, stroke_opacity: float = 1, fill_opacity: float = 1, oc: int = 0, ) -> Point: """Draw a general cubic Bezier curve from p1 to p4 using control points p2 and p3.""" img = page.newShape() Q = img.drawBezier(Point(p1), Point(p2), Point(p3), Point(p4)) img.finish( color=color, fill=fill, dashes=dashes, width=width, lineCap=lineCap, lineJoin=lineJoin, morph=morph, closePath=closePath, stroke_opacity=stroke_opacity, fill_opacity=fill_opacity, oc=oc, ) img.commit(overlay) return Q
7bd3c0b8e3ca8717447213c6c2ac8bc94ea0f029
3,658,944
from functools import reduce import operator def product(numbers): """Return the product of the numbers. >>> product([1,2,3,4]) 24 """ return reduce(operator.mul, numbers, 1)
102ac352025ffff64a862c4c5ccbdbc89bdf807e
3,658,945
def load_ref_system(): """ Returns l-phenylalanine as found in the IQMol fragment library. All credit to https://github.com/nutjunkie/IQmol """ return psr.make_system(""" N 0.7060 -1.9967 -0.0757 C 1.1211 -0.6335 -0.4814 C 0.6291 0.4897 0.4485 C -0.8603 0.6071 0.4224 C -1.4999 1.1390 -0.6995 C -2.8840 1.2600 -0.7219 C -3.6384 0.8545 0.3747 C -3.0052 0.3278 1.4949 C -1.6202 0.2033 1.5209 C 2.6429 -0.5911 -0.5338 O 3.1604 -0.2029 -1.7213 O 3.4477 -0.8409 0.3447 H -0.2916 -2.0354 -0.0544 H 1.0653 -2.2124 0.8310 H 0.6990 -0.4698 -1.5067 H 1.0737 1.4535 0.1289 H 0.9896 0.3214 1.4846 H -0.9058 1.4624 -1.5623 H -3.3807 1.6765 -1.6044 H -4.7288 0.9516 0.3559 H -3.5968 0.0108 2.3601 H -1.1260 -0.2065 2.4095 H 4.1118 -0.2131 -1.6830 """)
724e0d37ae5d811da156ad09d4b48d43f3e20d6a
3,658,946
import csv def parse_dependency_file(filename): """Parse a data file containing dependencies. The input file is the following csv format: name,minerals,gas,build time,dependencies command center,400,0,100, orbital command,150,0,35,command center|barracks Notice that the "dependencies" column is a list, deliminated with | # TODO: We should lowercase everthing in this file # TODO: We should validate all names and dependencies are valid units/buildings/research # TODO: Should we store this stuff in memory rather than reading a file? Or memcache it? """ reader = csv.DictReader(open(filename, 'rb'), delimiter=',', quotechar='"') data = list(reader) # Force to a list # Ensure values for these keys are integers int_keys = ['minerals', 'gas', 'supply', 'build time', 'research time'] result = {} for line in data: # Change "thing1 |thing2 | thing3 " into ["thing1", "thing2", "thing3"] line['dependencies'] = [s.strip() for s in line['dependencies'].split("|") if s] for key in int_keys: if key in line: line[key] = int(line[key]) result[line['name']] = line return result
0c5194e85184e5f828354bf45a14c8d14714e8e0
3,658,947
from typing import List def range_with_bounds(start: int, stop: int, interval: int) -> List[int]: """Return list""" result = [int(val) for val in range(start, stop, interval)] if not isclose(result[-1], stop): result.append(stop) return result
1667657d75f918d9a7527048ad4207a497a20316
3,658,948
import warnings def iou_score(box1, box2): """Returns the Intersection-over-Union score, defined as the area of the intersection divided by the intersection over the union of the two bounding boxes. This measure is symmetric. Args: box1: The coordinates for box 1 as a list of points box2: The coordinates for box 2 in same format as box1. """ if len(box1) == 2: x1, y1 = box1[0] x2, y2 = box1[1] box1 = np.array([[x1, y1], [x2, y1], [x2, y2], [x1, y2]]) if len(box2) == 2: x1, y1 = box2[0] x2, y2 = box2[1] box2 = np.array([[x1, y1], [x2, y1], [x2, y2], [x1, y2]]) if any(cv2.contourArea(np.int32(box2)[:, np.newaxis, :]) == 0 for box in [box1, box2]): warnings.warn('A box with zero area was detected.') return 0 pc = pyclipper.Pyclipper() pc.AddPath(np.int32(box1), pyclipper.PT_SUBJECT, closed=True) pc.AddPath(np.int32(box2), pyclipper.PT_CLIP, closed=True) intersection_solutions = pc.Execute(pyclipper.CT_INTERSECTION, pyclipper.PFT_EVENODD, pyclipper.PFT_EVENODD) union_solutions = pc.Execute(pyclipper.CT_UNION, pyclipper.PFT_EVENODD, pyclipper.PFT_EVENODD) union = sum(cv2.contourArea(np.int32(points)[:, np.newaxis, :]) for points in union_solutions) intersection = sum( cv2.contourArea(np.int32(points)[:, np.newaxis, :]) for points in intersection_solutions) return intersection / union
746129ac390e045887ca44095af02370abb71d81
3,658,949
def _actually_on_chip(ra, dec, obs_md): """ Take a numpy array of RA in degrees, a numpy array of Decin degrees and an ObservationMetaData and return a boolean array indicating which of the objects are actually on a chip and which are not """ out_arr = np.array([False]*len(ra)) d_ang = 2.11 good_radii = np.where(angularSeparation(ra, dec, obs_md.pointingRA, obs_md.pointingDec)<d_ang) if len(good_radii[0]) > 0: chip_names = chipNameFromRaDecLSST(ra[good_radii], dec[good_radii], obs_metadata=obs_md).astype(str) vals = np.where(np.char.find(chip_names, 'None')==0, False, True) out_arr[good_radii] = vals return out_arr
809bfb59f63a62ab236fb2a3199e28b4f0ee93fd
3,658,950
from typing import Tuple def outlier_dataset(seed=None) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: """Generates Outliers dataset, containing 10'000 inliers and 50 outliers Args: seed: random seed for generating points Returns: Tuple containing the inlier features, inlier labels, outlier features and outlier labels """ if seed is not None: np.random.seed(seed) inlier_feats = np.concatenate( [np.random.normal(1, 1, 5000), np.random.normal(-1, 1, 5000)] ) inlier_labels = np.concatenate( [ np.ones((5000,)), -1 * np.ones((5000,)), ] ) outlier_feats = np.concatenate( [np.random.normal(-200, 1, 25), np.random.normal(200, 1, 25)] ) outlier_labels = np.concatenate( [ np.ones((25,)), -1 * np.ones((25,)), ] ) return inlier_feats, inlier_labels, outlier_feats, outlier_labels
1b19e66f151290047017bce76e581ae0e725626c
3,658,951
def posts(request, payload={}, short_id=None): """ Posts endpoint of the example.com public api Request with an id parameter: /public_api/posts/1qkx8 POST JSON in the following format: POST /public_api/posts/ {"ids":["1qkx8","ma6fz"]} """ Metrics.api_comment.record(request) ids = payload.get('ids') if short_id and not ids: try: comment = Comment.details_by_id(long_id(short_id), promoter=PublicAPICommentDetails) (comment,) = CachedCall.multicall([comment]) return comment.to_client() except (ObjectDoesNotExist, util.Base36DecodeException): raise ServiceError("Post not found") elif ids: ids = [long_id(x) for x in set(ids)] calls = [Comment.details_by_id(id, ignore_not_found=True, promoter=PublicAPICommentDetails) for id in ids] comments = CachedCall.multicall(calls, skip_decorator=True) return {'posts': [x.to_client() for x in comments if x]}
c8ec491638417fe972fe75c0cf9f26fe1cf877ae
3,658,952
import collections import math def compute_bleu(reference_corpus, translation_corpus, max_order=4, smooth=False): """Computes BLEU score of translated segments against one or more references. Args: reference_corpus: list of lists of references for each translation. Each reference should be tokenized into a list of tokens. translation_corpus: list of translations to score. Each translation should be tokenized into a list of tokens. max_order: Maximum n-gram order to use when computing BLEU score. smooth: Whether or not to apply Lin et al. 2004 smoothing. Returns: 3-Tuple with the BLEU score, n-gram precisions, geometric mean of n-gram precisions and brevity penalty. bleu:float,翻译句子的bleu得分, precisions:list, 包含每种ngram的准确率, bp:brevity penalty, 短句惩罚系数, ratio:translation_length / min(reference_length), translation_length:int,翻译长度, reference_length:int,最短的reference长度 """ matches_by_order = [0] * max_order possible_matches_by_order = [0] * max_order reference_length = 0 translation_length = 0 for (references, translation) in zip(reference_corpus, translation_corpus): reference_length += min(len(r) for r in references) translation_length += len(translation) merged_ref_ngram_counts = collections.Counter() for reference in references: # 同时考虑多个references merged_ref_ngram_counts |= _get_ngrams(reference, max_order) # 位或 translation_ngram_counts = _get_ngrams(translation, max_order) overlap = translation_ngram_counts & merged_ref_ngram_counts # 位与 # matches_by_order:{len(ngram):sum of ngram overlap counts} for ngram in overlap: matches_by_order[len(ngram) - 1] += overlap[ngram] # possible_matches_by_order(可匹配n-gram总数):{len(ngram):sum of each ngram possible matches} for order in range(1, max_order + 1): possible_matches = len(translation) - order + 1 if possible_matches > 0: possible_matches_by_order[order - 1] += possible_matches precisions = [0] * max_order for i in range(0, max_order): if smooth: precisions[i] = ((matches_by_order[i] + 1.) / (possible_matches_by_order[i] + 1.)) else: if possible_matches_by_order[i] > 0: precisions[i] = (float(matches_by_order[i]) / possible_matches_by_order[i]) else: precisions[i] = 0.0 if min(precisions) > 0: p_log_sum = sum((1. / max_order) * math.log(p) for p in precisions) geo_mean = math.exp(p_log_sum) else: geo_mean = 0 # 翻译长度惩罚(对较短的翻译基于较大的惩罚,以防止短翻译准确率会更高的问题) ratio = float(translation_length) / reference_length if ratio > 1.0: bp = 1. else: bp = math.exp(1 - 1. / ratio) bleu = geo_mean * bp return (bleu, precisions, bp, ratio, translation_length, reference_length)
4a7f45ea988e24ada554b38cea84083effe164bd
3,658,953
def rf_agg_local_mean(tile_col): """Compute the cellwise/local mean operation between Tiles in a column.""" return _apply_column_function('rf_agg_local_mean', tile_col)
d65f6c7de674aac10ee91d39c8e5bc4ea6284e58
3,658,954
import json def Shot(project, name): """ 샷 정보를 가지고 오는 함수. (딕셔너리, err)값을 반환한다. """ restURL = "http://10.0.90.251/api/shot?project=%s&name=%s" % (project, name) try: data = json.load(urllib2.urlopen(restURL)) except: return {}, "RestAPI에 연결할 수 없습니다." if "error" in data: return {}, data["error"] return data["data"], None
6bd7ac1e3663faf8b120c03a1e873255557bc30d
3,658,955
def inversion_double(in_array): """ Get the input boolean array along with its element-wise logical not beside it. For error correction. >>> inversion_double(np.array([1,0,1,1,1,0,0,1], dtype=np.bool)) array([[ True, False, True, True, True, False, False, True], [False, True, False, False, False, True, True, False]]) """ return np.stack((in_array, np.logical_not(in_array)))
84253bdec88d665ad8f68b0eb252f3111f4a91ac
3,658,956
def solution(N): """ This is a fairly simple task. What we need to do is: 1. Get string representation in binary form (I love formatted string literals) 2. Measure biggest gap of zeroes (pretty self explanatory) """ # get binary representation of number binary_repr = f"{N:b}" # initialise counters current_gap, max_gap = 0, 0 for b in binary_repr: # end of gap, update max if b == '1': max_gap = max(current_gap, max_gap) current_gap = 0 # increase gap counter else: current_gap += 1 return max_gap
54b9dffe219fd5d04e9e3e3b07e4cb0120167a6f
3,658,957
from typing import Tuple def distributed_compute_expectations( building_blocks: Tuple[cw.ComplexDeviceArray], operating_axes: Tuple[Tuple[int]], pbaxisums: Tuple[Tuple[cw.ComplexDeviceArray]], pbaxisums_operating_axes: Tuple[Tuple[Tuple[int]]], pbaxisum_coeffs: Tuple[Tuple[float]], num_discretes: int, ) -> ShardedDeviceArray: """ Compute the expectation values of several observables given in `pbaxisums`. This function uses a single pmap and can be memory intesive for pbaxisums with many long prob-basis-axis-strings. Args: building_blocks: The building_blocks in super-matrix format (i.e. 128x128) operating_axes: The discrete axes on which `building_blocks` act. pbaxisums: Supermatrices of large_block representation of pauli sum operators. A single pbaxistring is represented as an innermost list of matrix-large_blocks. The outermost list iterates through different prob-basis-axis-sums, the intermediate list iterates through pbaxistrings within a pbaxisum. pbaxisums_operating_axes: The discrete axes on which the pbaxisums act. pbaxisum_coeffs: The coefficients of the prob-basis-axis-strings appearing in the union of all prob-basis-axis-sum operators. num_discretes: The number of discretes needed for the simulation. num_params: The number of parameters on which the acyclic_graph depends. Returns: ShardedDeviceArray: The expectation values. """ num_pbaxisums = len(pbaxisums) expectation_values = jnp.zeros(num_pbaxisums) final_state = helpers.get_final_state(building_blocks, operating_axes, num_discretes) for m, pbaxisum in enumerate(pbaxisums): pbaxisum_op_axes = pbaxisums_operating_axes[m] pbaxisum_coeff = pbaxisum_coeffs[m] # `psi` is brought into natural discrete order # don't forget to also align the axes here! coeff = pbaxisum_coeff[0] psi = helpers.apply_building_blocks( final_state, pbaxisum[0], pbaxisum_op_axes[0] ).align_axes() expectation_value = ( helpers.scalar_product_real(psi, final_state) * coeff ) for n in range(1, len(pbaxisum)): pbaxistring = pbaxisum[n] op_axes = pbaxisum_op_axes[n] coeff = pbaxisum_coeff[n] psi = helpers.apply_building_blocks( final_state, pbaxistring, op_axes ).align_axes() expectation_value += ( helpers.scalar_product_real(psi, final_state) * coeff ) # at this point all `psis` are in natural discrete ordering, # with the same `labels` values as `final_state` (i.e. # `labels = [0,1,2,3,..., num_discretes - 1]`). They also all have the # same (sorted) `perm` ordering due to the call to `align_axes()`. # compute the expectation values. Note that `psi` and `final_state` # have identical `perm` and `labels`. expectation_values = expectation_values.at[m].set( expectation_value.real[0] ) return expectation_values
d26c7595c604f7c52b3546083837de35ef4b4202
3,658,958
def extractStudents(filename): """ Pre: The list in xls file is not empty Post: All students are extract from file Returns students list """ list = [] try: # open Excel file wb = xlrd.open_workbook(str(filename)) except IOError: print ("Oops! No file "+filename+ " has been found !") else: sh = wb.sheet_by_name(wb.sheet_names()[0]) for rownum in range(1,sh.nrows):#1 to remove title line student = sh.row_values(rownum) list.append(student) return list
e10d942c4e1742b4e8de9ec6a1248f27b2a4b1d5
3,658,959
import sys def dmenu_select(num_lines, prompt="Entries", inp=""): """Call dmenu and return the selected entry Args: num_lines - number of lines to display prompt - prompt to show inp - bytes string to pass to dmenu via STDIN Returns: sel - string """ cmd = dmenu_cmd(num_lines, prompt) sel, err = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE, env=bwm.ENV).communicate(input=inp) if err: cmd = [cmd[0]] + ["-dmenu"] if "rofi" in cmd[0] else [""] Popen(cmd[0], stdin=PIPE, stdout=PIPE, env=bwm.ENV).communicate(input=err) sys.exit() if sel is not None: sel = sel.decode(bwm.ENC).rstrip('\n') return sel
45a0c4add05185278d258c2c9374403cb22459fd
3,658,960
def clean_ip(ip): """ Cleans the ip address up, useful for removing leading zeros, e.g.:: 1234:0:01:02:: -> 1234:0:1:2:: 1234:0000:0000:0000:0000:0000:0000:000A -> 1234::a 1234:0000:0000:0000:0001:0000:0000:0000 -> 1234:0:0:0:1:: 0000:0000:0000:0000:0001:0000:0000:0000 -> ::1:0:0:0 :type ip: string :param ip: An IP address. :rtype: string :return: The cleaned up IP. """ theip = normalize_ip(ip) segments = ['%x' % int(s, 16) for s in theip.split(':')] # Find the longest consecutive sequence of zeroes. seq = {0: 0} start = None count = 0 for n, segment in enumerate(segments): if segment != '0': start = None count = 0 continue if start is None: start = n count += 1 seq[count] = start # Replace those zeroes by a double colon. count = max(seq) start = seq[count] result = [] for n, segment in enumerate(segments): if n == start and count > 1: if n == 0: result.append('') result.append('') if n == 7: result.append('') continue elif start < n < start + count: if n == 7: result.append('') continue result.append(segment) return ':'.join(result)
f0828e793a3adfef536bf7cb76d73a9af097aa00
3,658,961
from utils.loader import get_save_path from utils.var_dim import squeezeToNumpy from utils.loader import dataLoader_test as dataLoader from utils.print_tool import datasize from utils.loader import get_module import torch import logging import os import yaml import tqdm from pathlib import Path def export_descriptor(config, output_dir, args): """ # input 2 images, output keypoints and correspondence save prediction: pred: 'image': np(320,240) 'prob' (keypoints): np (N1, 2) 'desc': np (N2, 256) 'warped_image': np(320,240) 'warped_prob' (keypoints): np (N2, 2) 'warped_desc': np (N2, 256) 'homography': np (3,3) 'matches': np [N3, 4] """ # basic settings device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") logging.info("train on device: %s", device) with open(os.path.join(output_dir, "config.yml"), "w") as f: yaml.dump(config, f, default_flow_style=False) writer = SummaryWriter(getWriterPath(task=args.command, date=True)) save_path = get_save_path(output_dir) save_output = save_path / "../predictions" os.makedirs(save_output, exist_ok=True) ## parameters outputMatches = True subpixel = config["model"]["subpixel"]["enable"] patch_size = config["model"]["subpixel"]["patch_size"] # data loading task = config["data"]["dataset"] data = dataLoader(config, dataset=task) test_set, test_loader = data["test_set"], data["test_loader"] datasize(test_loader, config, tag="test") # model loading Val_model_heatmap = get_module("", config["front_end_model"]) ## load pretrained val_agent = Val_model_heatmap(config["model"], device=device) val_agent.loadModel() ## tracker tracker = PointTracker(max_length=2, nn_thresh=val_agent.nn_thresh) ###### check!!! count = 0 for i, sample in tqdm(enumerate(test_loader)): img_0, img_1 = sample["image"], sample["warped_image"] # first image, no matches # img = img_0 def get_pts_desc_from_agent(val_agent, img, device="cpu"): """ pts: list [numpy (3, N)] desc: list [numpy (256, N)] """ heatmap_batch = val_agent.run( img.to(device) ) # heatmap: numpy [batch, 1, H, W] # heatmap to pts pts = val_agent.heatmap_to_pts() # print("pts: ", pts) if subpixel: pts = val_agent.soft_argmax_points(pts, patch_size=patch_size) # heatmap, pts to desc desc_sparse = val_agent.desc_to_sparseDesc() # print("pts[0]: ", pts[0].shape, ", desc_sparse[0]: ", desc_sparse[0].shape) # print("pts[0]: ", pts[0].shape) outs = {"pts": pts[0], "desc": desc_sparse[0]} return outs def transpose_np_dict(outs): for entry in list(outs): outs[entry] = outs[entry].transpose() outs = get_pts_desc_from_agent(val_agent, img_0, device=device) pts, desc = outs["pts"], outs["desc"] # pts: np [3, N] if outputMatches == True: tracker.update(pts, desc) # save keypoints pred = {"image": squeezeToNumpy(img_0)} pred.update({"prob": pts.transpose(), "desc": desc.transpose()}) # second image, output matches outs = get_pts_desc_from_agent(val_agent, img_1, device=device) pts, desc = outs["pts"], outs["desc"] if outputMatches == True: tracker.update(pts, desc) pred.update({"warped_image": squeezeToNumpy(img_1)}) # print("total points: ", pts.shape) pred.update( { "warped_prob": pts.transpose(), "warped_desc": desc.transpose(), "homography": squeezeToNumpy(sample["homography"]), } ) if outputMatches == True: matches = tracker.get_matches() print("matches: ", matches.transpose().shape) pred.update({"matches": matches.transpose()}) print("pts: ", pts.shape, ", desc: ", desc.shape) # clean last descriptor tracker.clear_desc() filename = str(count) path = Path(save_output, "{}.npz".format(filename)) np.savez_compressed(path, **pred) # print("save: ", path) count += 1 print("output pairs: ", count)
79f512e5c9d100d8f2001410dafd06f6cbdf79fa
3,658,962
def meta_caption(meta) -> str: """makes text from metadata for captioning video""" caption = "" try: caption += meta.title + " - " except (TypeError, LookupError, AttributeError): pass try: caption += meta.artist except (TypeError, LookupError, AttributeError): pass return caption
6ef117eb5d7a04adcee25a755337909bfe142014
3,658,963
def ticket_id_correctly_formatted(s: str) -> bool: """Checks if Ticket ID is in the form of 'PROJECTNAME-1234'""" return matches(r"^\w+-\d+$|^---$|^-$")(s)
2bb1624ac2080852badc6ab2badcb2e1229f5fcc
3,658,964
def test_1(): """ f(x) = max(.2, sin(x)^2) """ test_graph = FunctionTree('Test_1') max_node = Max('max') const_node = Constant('0.2', .2) square_node = Square('square') sin_node = Sin('sin') test_graph.insert_node(max_node, 'Output', 'x') test_graph.insert_node(square_node, 'max', 'x') test_graph.insert_node(const_node, 'max') test_graph.insert_node(sin_node, 'square', 'x') return test_graph
c6b47e386cdb7caa2290df2250fee3ad6aecbab7
3,658,965
def export_vector(vector, description, output_name, output_method='asset'): """Exports vector to GEE Asset in GEE or to shapefile in Google Drive. Parameters ---------- vector : ee.FeatureCollection Classified vector segments/clusters. description : str Description of the exported layer. output_name : str Path for the output file. Path must exist within Google Earth Engine Assets path or Google Drive. output_method : str Export method/destination. Options include 'asset' for export to Google Earth Engine Assets or 'drive' for export to Google Drive. Returns ------- output_message : str Message indicating location of the exported layer. Example ------- >>> import ee >>> peak_green = ee.Image('LANDSAT/LC08/C01/T1_SR/LC08_008057_20170602') >>> post_harvest = ee.Image('LANDSAT/LC08/C01/T1_SR/LC08_008057_20170906') >>> image_collection = ee.ImageCollection([peak_green, post_harvest]) >>> ndvi_diff = ndvi_diff_landsat8(image_collection, 1, 0) >>> study_area_boundary = ee.FeatureCollection("users/calekochenour/vegetation-change/drtt_study_area_boundary") >>> ndvi_change_thresholds = [-2.0, -0.5, -0.5, -0.35] >>> change_features = segment_snic(ndvi_diff, study_area_boundary, ndvi_change_thresholds) >>> change_primary_vector = raster_to_vector(change_features.get('primary'), study_area_boundary) >>> change_secondary_vector = raster_to_vector(change_features.get('secondary'), study_area_boundary) >>> change_primary_export = export_vector(vector=change_primary_vector, description='Primary Change', output_name=change_primary_asset_name, output_method='asset' >>> change_secondary_export = export_vector(vector=change_secondary_vector, description='Secondary Change', output_name=change_secondary_asset_name, output_method='asset') """ # Create export task for Google Drive if output_method.lower() == "drive": # Export vectors as shapefile to Google Drive task = ee.batch.Export.table.toDrive(**{ 'collection': vector, 'description': output_name, 'fileFormat': 'SHP'}) # Assign output message output_message = f"Exporting {output_name.split('/')[-1]} to Google Drive..." # Create task for GEE Asset elif output_method.lower() == "asset": # Export vectors to GEE Asset task = ee.batch.Export.table.toAsset(**{ 'collection': vector, 'description': description, 'assetId': output_name}) # Assign output message output_message = f"Exporting {output_name.split('/')[-1]} to GEE Asset..." else: # Rasie error raise ValueError("Invalid export method. Please specify 'Drive' or 'Asset'.") # Start export task task.start() # Return output message return print(output_message)
19cfa1a907aec4f25b1d8392f02a628f9e07ed7c
3,658,966
def optimize_centers_mvuiq(A, B, Q, centers, keep_sparsity=True): """ minimize reconstruction error after weighting by matrix A and make it unbiased min_{c_i} \|A.(\sum_i Q_i c_i) - B\|_F^2 such that sum(B-A(\sum_i Q_i c_i)) = 0 """ num_levels = len(centers) thr = sla.norm(A) * 1e-6 # 1- compute A*(Q==i) and store it. find the non-empty quantization bins in the process valid_idx = [] AQ = [np.zeros(1) for _ in range(num_levels)] for i in range(num_levels): AQ[i] = np.matmul(A, Q == i) if (sla.norm(AQ[i]) >= thr) and ((centers[i] != 0) or not keep_sparsity): # check whether the i-th bin has any effect on the quantization performance and # do not consider sparse values (center=0) valid_idx += [i] if not valid_idx: return # 2- find the optimum reconstruction points for the non-empty quantization bins # 2.a- create matrix M, used in the optimization problem num_valid = len(valid_idx) d = np.sum(B) f = np.zeros(num_valid) M = np.zeros(shape=(num_valid, num_valid)) e = np.zeros(shape=num_valid) for r in range(num_valid): f[r] = np.sum(AQ[valid_idx[r]]) for c in range(r, num_valid): # trace(AQ[valid_idx[c]].T @ AQ[valid_idx[r]]) M[r, c] = np.sum(AQ[valid_idx[c]] * AQ[valid_idx[r]]) M[c, r] = M[r, c] # trace(B.T @ AQ[valid_idx[r]]) e[r] = np.sum(AQ[valid_idx[r]] * B) # 2.b- solve for min |Mx-e| such that fx=d if num_valid == 0: v = 0 elif num_valid == 1: v = d / f[0] elif num_valid == 2: # for the special binary case, the solution can be found easily scale = sla.norm(f) + 1e-12 f /= scale d /= scale u = np.array([-f[1], f[0]]) a = (e - d * M.dot(f)).dot(u) / (M.dot(u).dot(u) + 1e-12) v = d * f + a * u else: # use quadratic programming (Goldfarb-Idnani algorithm) to solve the problem d = np.array([d]).astype(np.float) f = np.reshape(f, newshape=(-1, 1)) v = quadprog.solve_qp(M, e, f, d, 1)[0] # 3- copy the found center points centers[valid_idx] = v return centers
5a059bf9a88ed31a6cc75cecd2b0f7ef4273c5af
3,658,967
def container_instance_task_arns(cluster, instance_arn): """Fetch tasks for a container instance ARN.""" arns = ecs.list_tasks(cluster=cluster, containerInstance=instance_arn)['taskArns'] return arns
ca5f0be6aa054f7d839435a8c32c395429697639
3,658,968
def benchmark(pipelines=None, datasets=None, hyperparameters=None, metrics=METRICS, rank='f1', distributed=False, test_split=False, detrend=False, output_path=None): """Evaluate pipelines on the given datasets and evaluate the performance. The pipelines are used to analyze the given signals and later on the detected anomalies are scored against the known anomalies using the indicated metrics. Finally, the scores obtained with each metric are averaged accross all the signals, ranked by the indicated metric and returned on a ``pandas.DataFrame``. Args: pipelines (dict or list): dictionary with pipeline names as keys and their JSON paths as values. If a list is given, it should be of JSON paths, and the paths themselves will be used as names. If not give, all verified pipelines will be used for evaluation. datasets (dict or list): dictionary of dataset name as keys and list of signals as values. If a list is given then it will be under a generic name ``dataset``. If not given, all benchmark datasets will be used used. hyperparameters (dict or list): dictionary with pipeline names as keys and their hyperparameter JSON paths or dictionaries as values. If a list is given, it should be of corresponding order to pipelines. metrics (dict or list): dictionary with metric names as keys and scoring functions as values. If a list is given, it should be of scoring functions, and they ``__name__`` value will be used as the metric name. If not given, all the available metrics will be used. rank (str): Sort and rank the pipelines based on the given metric. If not given, rank using the first metric. distributed (bool): Whether to use dask for distributed computing. If not given, use ``False``. test_split (bool or float): Whether to use the prespecified train-test split. If float, then it should be between 0.0 and 1.0 and represent the proportion of the signal to include in the test split. If not given, use ``False``. detrend (bool): Whether to use ``scipy.detrend``. If not given, use ``False``. output_path (str): Location to save the intermediatry results. If not given, intermediatry results will not be saved. Returns: pandas.DataFrame: Table containing the scores obtained with each scoring function accross all the signals for each pipeline. """ pipelines = pipelines or VERIFIED_PIPELINES datasets = datasets or BENCHMARK_DATA if isinstance(pipelines, list): pipelines = {pipeline: pipeline for pipeline in pipelines} if isinstance(datasets, list): datasets = {'dataset': datasets} if isinstance(hyperparameters, list): hyperparameters = {pipeline: hyperparameter for pipeline, hyperparameter in zip(pipelines.keys(), hyperparameters)} if isinstance(metrics, list): metrics_ = dict() for metric in metrics: if callable(metric): metrics_[metric.__name__] = metric elif metric in METRICS: metrics_[metric] = METRICS[metric] else: raise ValueError('Unknown metric: {}'.format(metric)) metrics = metrics_ results = _evaluate_datasets( pipelines, datasets, hyperparameters, metrics, distributed, test_split, detrend) if output_path: LOGGER.info('Saving benchmark report to %s', output_path) results.to_csv(output_path) return _sort_leaderboard(results, rank, metrics)
09e7ebda30d0e9eec1b11a68fbc566bf8f39d841
3,658,969
def notNone(arg,default=None): """ Returns arg if not None, else returns default. """ return [arg,default][arg is None]
71e6012db54b605883491efdc389448931f418d0
3,658,970
def get_scorer(scoring): """Get a scorer from string """ if isinstance(scoring, str) and scoring in _SCORERS: scoring = _SCORERS[scoring] return _metrics.get_scorer(scoring)
fbf1759ae4c6f93be036a6af479de89a732bc520
3,658,971
from typing import Iterator def triangle_num(value: int) -> int: """Returns triangular number for a given value. Parameters ---------- value : int Integer value to use in triangular number calculaton. Returns ------- int Triangular number. Examples: >>> triangle_num(0) 0 >>> triangle_num(1) 1 >>> triangle_num(4) 10 >>> triangle_num(10) 55 >>> triangle_num("A") Traceback (most recent call last): ... TypeError: '>' not supported between instances of 'str' and 'int' >>> triangle_num(-1) Traceback (most recent call last): ... TypeError: Please use positive integer value """ if value >= 0: tot : list = [0] def recur(n: int, t: list) -> Iterator: if n > 0: t[0] += n n -= 1 return recur(n, t) recur(value, tot) return tot[0] raise ValueError("Please use positive integer value.")
f22554b2c220d368b1e694021f8026162381a7d0
3,658,972
import torch def locations_sim_euclidean(image:DataBunch, **kwargs): """ A locations similarity function that uses euclidean similarity between vectors. Predicts the anatomical locations of the input image, and then returns the eucliean similarity between the input embryo's locations vector and the locations vectors of the database embryos. Euclidean similarity and distance are computed between unnormalized, one-hot locations vectors. The euclidean similarity between two locations vectors is defined as 1/(1 + euclidean distance). Arguments: - image: The input image DataBunch Returns: A tensor of similarity values (one for each database image). Each similarity score is the euclidean similarity between locations vectors. """ locations_pred = run_inference(image, do_stage=False)[0] _, database_image_locations = retrieve_predictions() euclidean_distance = torch.norm(database_image_locations-locations_pred, dim=1).unsqueeze(1) return 1/(1+euclidean_distance)
d45c33641ac6327963f0634878c99461de9c1052
3,658,973
def _butter_bandpass_filter(data, low_cut, high_cut, fs, axis=0, order=5): """Apply a bandpass butterworth filter with zero-phase filtering Args: data: (np.array) low_cut: (float) lower bound cutoff for high pass filter high_cut: (float) upper bound cutoff for low pass filter fs: (float) sampling frequency in Hz axis: (int) axis to perform filtering. order: (int) filter order for butterworth bandpass Returns: bandpass filtered data. """ nyq = 0.5 * fs b, a = butter(order, [low_cut / nyq, high_cut / nyq], btype="band") return filtfilt(b, a, data, axis=axis)
706770bbf78e103786a6247fc56df7fd8b41665a
3,658,974
def transform_and_normalize(vecs, kernel, bias): """应用变换,然后标准化 """ if not (kernel is None or bias is None): vecs = (vecs + bias).dot(kernel) return vecs / (vecs**2).sum(axis=1, keepdims=True)**0.5
bb32cd5c74df7db8d4a6b6e3ea211b0c9b79db47
3,658,975
def mpesa_response(r): """ Create MpesaResponse object from requests.Response object Arguments: r (requests.Response) -- The response to convert """ r.__class__ = MpesaResponse json_response = r.json() r.response_description = json_response.get('ResponseDescription', '') r.error_code = json_response.get('errorCode') r.error_message = json_response.get('errorMessage', '') return r
e416030d39411ce19aee28735465ba035461f802
3,658,976
def swap_flies(dataset, indices, flies1=0, flies2=1): """Swap flies in dataset. Caution: datavariables are currently hard-coded! Caution: Swap *may* be in place so *might* will alter original dataset. Args: dataset ([type]): Dataset for which to swap flies indices ([type]): List of indices at which to swap flies. flies1 (int or list/tuple, optional): Either a single value for all indices or a list with one value per item in indices. Defaults to 0. flies2 (int or list/tuple, optional): Either a single value for all indices or a list with one value per item in indices. Defaults to 1. Returns: dataset with swapped indices () """ for cnt, index in enumerate(indices): if isinstance(flies1, (list, tuple)) and isinstance(flies2, (list, tuple)): fly1, fly2 = flies1[cnt], flies2[cnt] else: fly1, fly2 = flies1, flies2 if 'pose_positions_allo' in dataset: dataset.pose_positions_allo.values[index:, [fly2, fly1], ...] = dataset.pose_positions_allo.values[index:, [fly1, fly2], ...] if 'pose_positions' in dataset: dataset.pose_positions.values[index:, [fly2, fly1], ...] = dataset.pose_positions.values[index:, [fly1, fly2], ...] if 'body_positions' in dataset: dataset.body_positions.values[index:, [fly2, fly1], ...] = dataset.body_positions.values[index:, [fly1, fly2], ...] return dataset
1f1941d8d6481b63efd1cc54fcf13f7734bccf8b
3,658,977
def periodic_kernel(avetoas, log10_sigma=-7, log10_ell=2, log10_gam_p=0, log10_p=0): """Quasi-periodic kernel for DM""" r = np.abs(avetoas[None, :] - avetoas[:, None]) # convert units to seconds sigma = 10**log10_sigma l = 10**log10_ell * 86400 p = 10**log10_p * 3.16e7 gam_p = 10**log10_gam_p d = np.eye(r.shape[0]) * (sigma/500)**2 K = sigma**2 * np.exp(-r**2/2/l**2 - gam_p*np.sin(np.pi*r/p)**2) + d return K
14dc89fbbf501ee42d7778bd14a9e35d22bc69ea
3,658,978
def emails(request): """ A view to send emails out to hunt participants upon receiving a valid post request as well as rendering the staff email form page """ teams = Hunt.objects.get(is_current_hunt=True).real_teams people = [] for team in teams: people = people + list(team.person_set.all()) email_list = [person.user.email for person in people] if request.method == 'POST': email_form = EmailForm(request.POST) if email_form.is_valid(): subject = email_form.cleaned_data['subject'] message = email_form.cleaned_data['message'] email_to_chunks = [email_list[x: x + 80] for x in range(0, len(email_list), 80)] for to_chunk in email_to_chunks: email = EmailMessage(subject, message, '[email protected]', [], to_chunk) email.send() return HttpResponseRedirect('') else: email_form = EmailForm() context = {'email_list': (', ').join(email_list), 'email_form': email_form} return render(request, 'email.html', add_apps_to_context(context, request))
93cc8099e8f73b2607ab736a2aae4ae59ca1fe4d
3,658,979
def _stochastic_universal_sampling(parents: Population, prob_distribution: list, n: int): """ Stochastic universal sampling (SUS) algorithm. Whenever more than one sample is to be drawn from the distribution the use of the stochastic universal sampling algorithm is preferred compared to roulette wheel algorithm. Parameters ---------- :param parents: beagle.Population Population from which n individuals are going to be selected. :param prob_distribution: list Cumulative probability distribution. :param n: int Length of the selected population. Returns ------- :return: list of beagle.Individual Selected individuals. Exceptions ----------- :raise Exception If the algorithm enters an infinite loop because random_num is greater than 1 an exception will occur. """ current_member, i = 0, 0 mating_pool = [None] * n random_num = np.random.uniform(low=0, high=(1/n)) while current_member < n: while random_num <= prob_distribution[i]: mating_pool[current_member] = parents[i] random_num += 1 / n current_member += 1 if random_num > 1: raise Exception( 'The SUS algorithm has entered an infinite loop. Verify that the selected population ' 'sizes are suitable for this type of operator.') i += 1 mating_pool = [deepcopy(individual) for individual in mating_pool] # Make a deepcopy of each selected individual return mating_pool
fb6b58cbdedbd133a7ba72470c2fc6586265ed4c
3,658,980
import traceback import os def format_exception_with_frame_info(e_type, e_value, e_traceback, shorten_filenames=False): """Need to suppress thonny frames to avoid confusion""" _traceback_message = "Traceback (most recent call last):\n" _cause_message = getattr( traceback, "_cause_message", ("\nThe above exception was the direct cause " + "of the following exception:") + "\n\n", ) _context_message = getattr( traceback, "_context_message", ("\nDuring handling of the above exception, " + "another exception occurred:") + "\n\n", ) def rec_format_exception_with_frame_info(etype, value, tb, chain=True): # Based on # https://www.python.org/dev/peps/pep-3134/#enhanced-reporting # and traceback.format_exception if etype is None: etype = type(value) if tb is None: tb = value.__traceback__ if chain: if value.__cause__ is not None: yield from rec_format_exception_with_frame_info(None, value.__cause__, None) yield (_cause_message, None, None, None) elif value.__context__ is not None and not value.__suppress_context__: yield from rec_format_exception_with_frame_info(None, value.__context__, None) yield (_context_message, None, None, None) if tb is not None: yield (_traceback_message, None, None, None) tb_temp = tb for entry in traceback.extract_tb(tb): assert tb_temp is not None # actual tb doesn't end before extract_tb if "cpython_backend" not in entry.filename and ( not entry.filename.endswith(os.sep + "ast.py") or entry.name != "parse" or etype is not SyntaxError ): fmt = ' File "{}", line {}, in {}\n'.format( entry.filename, entry.lineno, entry.name ) if entry.line: fmt += " {}\n".format(entry.line.strip()) yield (fmt, id(tb_temp.tb_frame), entry.filename, entry.lineno) tb_temp = tb_temp.tb_next assert tb_temp is None # tb was exhausted for line in traceback.format_exception_only(etype, value): if etype is SyntaxError and line.endswith("^\n"): # for some reason it may add several empty lines before ^-line partlines = line.splitlines() while len(partlines) >= 2 and partlines[-2].strip() == "": del partlines[-2] line = "\n".join(partlines) + "\n" yield (line, None, None, None) items = rec_format_exception_with_frame_info(e_type, e_value, e_traceback) return list(items)
f698739f88271bc6c15554be3750bc550f5102a7
3,658,981
def _add_simple_procparser(subparsers, name, helpstr, func, defname='proc', xd=False, yd=False, dualy=False, other_ftypes=True): """Add a simple subparser.""" parser = _add_procparser(subparsers, name, helpstr, func, defname=defname) _add_def_args(parser, xd=xd, yd=yd, dualy=dualy) return parser
d7ba916453921d4ad362367c43f597f81fb2db9b
3,658,982
import subprocess def get_volumes(fn): """Return number of volumes in nifti""" return int(subprocess.check_output(['fslnvols', fn]))
67596f0583295f837197e20dd95b1c04fe705ea4
3,658,983
import asyncio import subprocess async def _execSubprocess(command: str) -> tuple[int, bytes]: """Execute a command and check for errors. Args: command (str): commands as a string Returns: tuple[int, bytes]: tuple of return code (int) and stdout (str) """ async with SEM: process = await asyncio.create_subprocess_shell( command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, ) out = await process.communicate() exitCode = process.returncode return exitCode, out[0]
f289282c45d219000d6ce8b5b4880801869d07f5
3,658,984
def comprspaces(*args): """ .. function:: comprspaces(text1, [text2,...]) -> text This function strips (from the beginning and the end) and compresses the spaces in its input. Examples: >>> table1(''' ... ' an example with spaces ' 'another example with spaces ' ... ''') >>> sql("select comprspaces(a,b) from table1") comprspaces(a,b) -------------------------------------------------- an example with spaces another example with spaces """ if len(args) == 1: return reduce_spaces.sub(' ', strip_remove_newlines.sub('', args[0])) out=[] for i in args: o=reduce_spaces.sub(' ', strip_remove_newlines.sub('', i)) out+=[o] return ' '.join(out)
7cf4d23dac7fb0d36f9224598f103b5918167bd5
3,658,985
import socket def find_available_port(): """Find an available port. Simple trick: open a socket to localhost, see what port was allocated. Could fail in highly concurrent setups, though. """ s = socket.socket() s.bind(('localhost', 0)) _address, port = s.getsockname() s.close() return port
1d81ff79fa824bc8b38c121a632890973f0639ea
3,658,986
import sys def redirect_std(): """ Connect stdin/stdout to controlling terminal even if the scripts input and output were redirected. This is useful in utilities based on termenu. """ stdin = sys.stdin stdout = sys.stdout if not sys.stdin.isatty(): sys.stdin = open_raw("/dev/tty", "r", 0) if not sys.stdout.isatty(): sys.stdout = open_raw("/dev/tty", "w", 0) return stdin, stdout
22321f8a0309273f409dc2876616856834f52113
3,658,987
def merge_deep(dct1, dct2, merger=None): """ Deep merge by this spec below :param dct1: :param dct2: :param merger Optional merger :return: """ my_merger = merger or Merger( # pass in a list of tuples,with the # strategies you are looking to apply # to each type. [ (list, ["append"]), (dict, ["merge"]) ], # next, choose the fallback strategies, # applied to all other types: ["override"], # finally, choose the strategies in # the case where the types conflict: ["override"] ) return my_merger.merge(dct1, dct2)
1257e7a8242fde6a70feb3cfe373979bbf439726
3,658,988
def step( context, bind_to, data, title='', area=False, x_is_category=False, labels=False, vertical_grid_line=False, horizontal_grid_line=False, show_legend=True, zoom=False, group_tooltip=True, height=None, width=None ): """Generates javascript code to show a 'step' chart. Args: context: Context of template. bind_to: A string that specifics an HTML element (eg: id or class) that chart will be shown in that. (like: '#chart') data: It is dictinary that contains data of chart, some informations about extra lines, grouping of data and chart axis labels. eg: { 'x': ['2017-5-19', '2017-5-20', '2017-5-21', '2017-5-22'], 'horizontal_lines': [40], # 'vertical_lines': [40], 'data': [ {'title': 'A', 'values': [26, 35, 52, 34, 45, 74], 'color': '#FF34FF'}, # {'title': 'B', 'values': [54, 25, 52, 26, 20, 89]}, ], # 'groups': [('A', 'B')] } vertical_lines works just if x_is_category seted to False. title: A string that will be shown on top of the chart. area: It's a boolean option. If true, the area under the curve will be colored. x_is_category: It's a boolean option. If false, labels of X axis will be considered as real number and sortable. (they will be sorted automatically) labels: It's a boolean option. If true, value of record will be shown on column. vertical_grid_line: It's boolean option, If true some vertical rows will be drawn in chart. (grid lines) horizontal_grid_line: It's boolean option, If true some horizontal rows will be drawn in chart. (grid lines) show_legend: It's boolean option, If false, legends of the chart will be hidden. zoom: It's boolean option, If true, end user can scroll on chart to zoom in and zoom out. group_tooltip: It's boolean option, If true, data of all records in that point whill be shown to gather. height: It's an integer option, it will determine heigth of chart in pixel. width: It's an integer option, it will determine width of chart in pixel. Returns: A string contains chart js code and import code of C3 static files, if it did not imported yet. You can see structure of chart in chart_structur variable. """ # step chart structure in JS chart_structur = ( '\n<script type="text/javascript">' '\n var chart = c3.generate({' '\n bindto: "%s",' '\n data: {' '\n x: %s,' '\n columns: [' '\n %s' '\n ],' '\n type : "%s",' '\n colors: {' '\n %s' '\n },' '\n groups: [' '\n %s' '\n ],' '\n labels : %s' '\n },' '\n title: { text: "%s"},' '\n axis: { x: { type: "%s" } },' '\n grid: {' '\n x: { show: %s ,lines: [%s] },' '\n y: { show: %s ,lines: [%s] },' '\n },' '\n legend: { show: %s },' '\n zoom: { enabled: %s },' '\n tooltip: { grouped: %s },' '\n size: { height: %s, width: %s }' '\n });' '\n</script>' ) # convert parameters to strings to be acceptable in JS and C3 syntax. if area: _type = 'area-step' else: _type = 'step' if x_is_category: x_type = 'category' else: x_type = '' if labels: labels = 'true' else: labels = 'false' if vertical_grid_line: vertical_grid_line = 'true' else: vertical_grid_line = 'false' if horizontal_grid_line: horizontal_grid_line = 'true' else: horizontal_grid_line = 'false' if show_legend: show_legend = 'true' else: show_legend = 'false' if zoom: zoom = 'true' else: zoom = 'false' if group_tooltip: group_tooltip = 'true' else: group_tooltip = 'false' if height is not None: height = int(height) else: height = 'null' if width is not None: width = int(width) else: width = 'null' # read horizontal line points from data horizontal_lines = str() if 'horizontal_lines' in data.keys(): for line in data['horizontal_lines']: horizontal_lines = ''.join([horizontal_lines, '{ value: %s}' % line, ',']) # read vertical line points from data # raise an exception if x_is_category set to true and vertical_lines exists vertical_lines = str() if 'vertical_lines' in data.keys(): if x_is_category: raise Exception( "It's meaningless to use vertical_lines with x_is_category." ) for line in data['vertical_lines']: vertical_lines = ''.join( [vertical_lines, '{ value: %s}' % line, ',']) # reads 'x' field of data and creates X axis labels. # a hash is used to naming X axis labels x_labels = str() if 'x' in data.keys(): if x_is_category: x_labels = data['x'] else: x_labels = list(filter(lambda x: int(x), data['x'])) x_labels = ','.join([repr(str(label)) for label in x_labels]) x_labels = '["2d2014226823e74c2accfcce8e0ca141", %s],' % x_labels x_label_list_name = '"2d2014226823e74c2accfcce8e0ca141"' else: x_labels = '' x_label_list_name = "null" # read records points to draw on chart data_title_list = list() chart_data = str() for item in data['data']: values = ','.join([str(v) for v in item['values']]) item_data = '["%s", %s], ' % (item['title'], values) chart_data = ' '.join([chart_data, item_data]) data_title_list.append(item['title']) # add X axis labels to chart data chart_data = ''.join([chart_data, x_labels]) # read colors of data chart_color = str() for item in data['data']: if 'color' in item.keys(): item_color = '"%s": "%s", ' % (item['title'], item['color']) chart_color = ' '.join([chart_color, item_color]) # read grouping details of data total_group_string = str() if 'groups' in data.keys(): for group in data['groups']: group_string = str() for item in group: # raise an exception if mentioned key were not exist in data if item not in data_title_list: raise ValueError("%s is not exists in your data!" % item) group_string = ''.join([group_string, ',', repr(item)]) total_group_string = ''.join( [total_group_string, '[', group_string, ']', ',']) # pass arguments to chart structure chart = chart_structur % ( bind_to, x_label_list_name, chart_data, _type, chart_color, total_group_string, labels, title, x_type, vertical_grid_line, vertical_lines, horizontal_grid_line, horizontal_lines, show_legend, zoom, group_tooltip, height, width ) # add import C3 elements to it, if it does not imported yet and return it. if not ('import_js_c3' in context and context['import_js_c3']): context['import_js_c3'] = True return mark_safe('%s\n%s' % (import_c3(), chart)) else: return mark_safe(chart)
e135f1315dc635cc12dec403b3b6a268ed1c0a2b
3,658,989
import requests def get(user_request, url, **kwargs): """ A wrapper of requests.get. This method will automatically add user's session key as the cookie to enable sso Sends a GET request. Returns :class:`Response` object. :param user_request: The http request contains the authentication key and is triggered by user. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. """ _set_session_key(user_request, kwargs) if debug: log(user_request, url, "GET", kwargs=kwargs) return requests.get(url, **kwargs)
203ec28f62536dc84bfc86e6208f25af7f17212b
3,658,990
from typing import List def get_baseline_y(line: PageXMLTextLine) -> List[int]: """Return the Y/vertical coordinates of a text line's baseline.""" if line_starts_with_big_capital(line): return [point[1] for point in line.baseline.points if point[1] < line.baseline.bottom - 20] else: return [point[1] for point in line.baseline.points]
7195f801e3012f5514b0d4eea7d5df9a36764412
3,658,991
import time def get_device_type(dev, num_try=1): """ Tries to get the device type with delay """ if num_try >= MAX_DEVICE_TYPE_CHECK_RETRIES: return time.sleep(1) # if devtype is checked to early it is reported as 'unknown' iface = xwiimote.iface(dev) device_type = iface.get_devtype() if not device_type or device_type == 'unknown': return get_device_type(dev, num_try + 1) return device_type
0caec78baeeb3da7ba3b99d68d80b9d1439af294
3,658,992
def index(): """ This is the grocery list. Concatenates the ingredients from all the upcoming recipes The ingredients dict that we pass to the template has this structure { "carrot": { "g": 200, "number": 4, "str": "200g, 4number", }, "salt": { "g": 20, "pinch": 3, "str": "20g, 3pinch", }, } If two ingredients have the same unit, I add the quantities, but trying to unify all the different ways of expressing ingredient units would be a lost cause. We add the str key because doing formatting work in the template is so much fun """ recipes = Recipe.query.filter_by(upcoming=True) ingredients = dict() for recipe in recipes: recipe_d = recipe.to_dict() for ingredient in recipe_d["ingredients"]: #worth changing the ingredients to a named tuple ? #would be better at least here name, unit, quantity = (ingredient["name"], ingredient["unit"], ingredient["quantity"]) quantity = quantity * recipe.upcoming_servings / recipe.servings if name in ingredients: if unit in ingredients[name]: ingredients[name][unit] += quantity else: ingredients[name][unit] = quantity else: ingredients[name] = { unit: quantity, } for name, d in ingredients.items(): s = ", ".join("{:g}{}".format( round(quantity, 2), unit) for unit, quantity in d.items()) ingredients[name]["str"] = s return render_template("grocery_list.html", title="Grocery list", recipes=recipes, ingredients=ingredients)
343f54d097c95e92bbca1bbe087168a348d42771
3,658,993
def test_Fit_MinFunc(): """ There are times where I don't pass just a simple function to the fitting algorithm. Instead I need to calculate the error myself and pass that to the model. This tests that ability. """ init = { 'm': 20, 'b': -10 } def func(X, *args): vecLinear = np.vectorize(funcs.linear) yThr = vecLinear(linearData['X'], *args) return np.sqrt(np.sum((linearData['Y'] - yThr) ** 2)) LinMod = model(func) LinMod.setParams(init) LinMod.fit(linearData['X'], linearData['Y']) results = LinMod.parameters.copy() for key in linearParams.keys(): error = np.abs((results[key]-linearParams[key])/linearParams[key])*100 assert error < 15
4884302ad03cb04e4d293e05b743f1d2aaf51141
3,658,994
def BOP(data): """ Balance of Power Indicator :param pd.DataFrame data: pandas DataFrame with open, high, low, close data :return pd.Series: with indicator data calculation results """ fn = Function('BOP') return fn(data)
14502e0c1fd6f5224edfa403ae58e75a4056c74c
3,658,995
def get_infinite(emnist_client_data, num_pseudo_clients): """Converts a Federated EMNIST dataset into an Infinite Federated EMNIST set. Infinite Federated EMNIST expands each writer from the EMNIST dataset into some number of pseudo-clients each of whose characters are the same but apply a fixed random affine transformation to the original user's characters. The distribution over affine transformation is approximately equivalent to the one described at https://www.cs.toronto.edu/~tijmen/affNIST/. It applies the following transformations in this order: 1. A random rotation chosen uniformly between -20 and 20 degrees. 2. A random shearing adding between -0.2 to 0.2 of the x coordinate to the y coordinate (after centering). 3. A random scaling between 0.8 and 1.25 (sampled log uniformly). 4. A random translation between -5 and 5 pixels in both the x and y axes. Args: emnist_client_data: The `tff.simulation.ClientData` to convert. num_pseudo_clients: How many pseudo-clients to generate for each real client. Each pseudo-client is formed by applying a given random affine transformation to the characters written by a given real user. The first pseudo-client for a given user applies the identity transformation, so the original users are always included. Returns: An expanded `tff.simulation.ClientData`. """ num_client_ids = len(emnist_client_data.client_ids) return transforming_client_data.TransformingClientData( raw_client_data=emnist_client_data, make_transform_fn=_make_transform_fn, num_transformed_clients=(num_client_ids * num_pseudo_clients))
68b4ed0643e48adba2478022eff10a52222f75df
3,658,996
def create_plot(df, title, carbon_unit, cost_unit, ylimit=None): """ :param df: :param title: string, plot title :param carbon_unit: string, the unit of carbon emissions used in the database/model, e.g. "tCO2" :param cost_unit: string, the unit of cost used in the database/model, e.g. "USD" :param ylimit: float/int, upper limit of y-axis; optional :return: """ if df.empty: return figure() # Set up data source source = ColumnDataSource(data=df) # Determine column types for plotting, legend and colors # Order of stacked_cols will define order of stacked areas in chart x_col = "period" line_col = "carbon_cap" stacked_cols = ["in_zone_project_emissions", "import_emissions_degen"] # Stacked Area Colors colors = ["#666666", "#999999"] # Set up the figure plot = figure( plot_width=800, plot_height=500, tools=["pan", "reset", "zoom_in", "zoom_out", "save", "help"], title=title, x_range=df[x_col] # sizing_mode="scale_both" ) # Add stacked bar chart to plot bar_renderers = plot.vbar_stack( stackers=stacked_cols, x=x_col, source=source, color=colors, width=0.5, ) # Add Carbon Cap target line chart to plot target_renderer = plot.circle( x=x_col, y=line_col, source=source, size=20, color="black", fill_alpha=0.2, line_width=2, ) # Create legend items legend_items = [ ("Project Emissions", [bar_renderers[0]]), ("Import Emissions", [bar_renderers[1]]), ("Carbon Target", [target_renderer]), ] # Add Legend legend = Legend(items=legend_items) plot.add_layout(legend, "right") plot.legend[0].items.reverse() # Reverse legend to match stacked order plot.legend.click_policy = "hide" # Add interactivity to the legend # Note: Doesn't rescale the graph down, simply hides the area # Note2: There's currently no way to auto-size legend based on graph size(?) # except for maybe changing font size automatically? show_hide_legend(plot=plot) # Hide legend on double click # Format Axes (labels, number formatting, range, etc.) plot.xaxis.axis_label = "Period" plot.yaxis.axis_label = "Emissions ({})".format(carbon_unit) plot.yaxis.formatter = NumeralTickFormatter(format="0,0") plot.y_range.end = ylimit # will be ignored if ylimit is None # Add delivered RPS HoverTool r_delivered = bar_renderers[0] # renderer for delivered RPS hover = HoverTool( tooltips=[ ("Period", "@period"), ( "Project Emissions", "@%s{0,0} %s (@fraction_of_project_emissions{0%%})" % (stacked_cols[0], carbon_unit), ), ], renderers=[r_delivered], toggleable=False, ) plot.add_tools(hover) # Add curtailed RPS HoverTool r_curtailed = bar_renderers[1] # renderer for curtailed RPS hover = HoverTool( tooltips=[ ("Period", "@period"), ( "Import Emissions", "@%s{0,0} %s (@fraction_of_import_emissions{0%%})" % (stacked_cols[1], carbon_unit), ), ], renderers=[r_curtailed], toggleable=False, ) plot.add_tools(hover) # Add RPS Target HoverTool hover = HoverTool( tooltips=[ ("Period", "@period"), ("Carbon Target", "@%s{0,0} %s" % (line_col, carbon_unit)), ( "Marginal Cost", "@carbon_cap_marginal_cost_per_emission{0,0} %s/%s" % (cost_unit, carbon_unit), ), ], renderers=[target_renderer], toggleable=False, ) plot.add_tools(hover) return plot
e320a523bbdbfc12a3e84948935803da5304624e
3,658,997
def get_app(name, **kwargs): """Returns an instantiated Application based on the name. Args: name (str): The name of the application kwargs (dict): Keyword arguments used for application instantiation Returns: deepcell.applications.Application: The instantiated application """ name = str(name).lower() app_map = dca.settings.VALID_APPLICATIONS try: return app_map[name]['class'](**kwargs) except KeyError: raise ValueError('{} is not a valid application name. ' 'Valid applications: {}'.format( name, list(app_map.keys())))
1fe9d1e300a086b7184760556c65470c62a0cc14
3,658,998
def worker_complete(): """Complete worker.""" participant_id = request.args.get('participant_id') if not participant_id: return error_response( error_type="bad request", error_text='participantId parameter is required' ) try: _worker_complete(participant_id) except KeyError: return error_response(error_type='ParticipantId not found: {}'.format(participant_id)) return success_response(status="success")
e30b45e84025b11bcf6640931f72d9fc4f4f9873
3,658,999