content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
import eazy.igm def simulate_eazy_sed_from_coeffs( eazycoeffs, eazytemplatedata, z, returnfluxunit='', returnwaveunit='A', limitwaverange=True, savetofile='', **outfile_kwargs): """ Generate a simulated SED from a given set of input eazy-py coefficients and eazypy templates. NB: Requires the eazy-py package to apply the IGM absorption! (https://github.com/gbrammer/eazy-py) Optional Args: returnfluxunit: ['AB', 'flambda'] TODO: add Jy 'AB'= return log(flux) as AB magnitudes 'flambda' = return flux density in erg/s/cm2/A returnwaveunit: ['A' or 'nm'] limitwaverange: limit the output wavelengths to the range covered by PFS savetofile: filename for saving the output spectrum as a two-column ascii data file (suitable for use with the SubaruPFS ETC from C. Hirata. Returns ------- obswave : observed-frame wavelength, Angstroms or nm obsflux : flux density of best-fit template, erg/s/cm2/A or AB mag """ # the input data units are Angstroms for wavelength # and cgs for flux: erg/cm2/s/Ang obswave = eazytemplatedata[0] * (1 + z) obsfluxmatrix = eazytemplatedata[1:] sedsimflux = np.dot(eazycoeffs, obsfluxmatrix) fnu_factor = 10 ** (-0.4 * (25 + 48.6)) flam_spec = 1. / (1 + z) ** 2 obsflux = sedsimflux * fnu_factor * flam_spec try: igmz = eazy.igm.Inoue14().full_IGM(z, obswave) obsflux *= igmz except: pass if limitwaverange: # to simplify things, we only write out the data over the Subaru PFS # + WFIRST prism wavelength range, from 200 to 2500 nm # (3000 to 25000 Angstroms) iuvoir = np.where((obswave>2000) & (obswave<25000))[0] obswave = obswave[iuvoir] obsflux = obsflux[iuvoir] if returnfluxunit=='AB': # convert from flux density f_lambda into AB mag: mAB_from_flambda = lambda f_lambda, wave: -2.5 * np.log10( 3.34e4 * wave * wave * f_lambda / 3631) obsflux = mAB_from_flambda(obsflux, obswave) if returnwaveunit=='nm': obswave = obswave / 10. if savetofile: out_table = Table() outcol1 = Column(data=obswave, name='wave') outcol2 = Column(data=obsflux, name='flux') out_table.add_columns([outcol1, outcol2]) out_table.write(savetofile, **outfile_kwargs) return obswave, obsflux
41c975efa4136e9958c2b5680db2e6f9a67a4291
3,656,100
def get_timezones_all(): """Dump the list of timezones from ptyz into a format suitable for use with the Django Forms API's ChoiceField """ # TODO: Find a more user-friendly way of managing 500+ timezones output = [] for tz in all_timezones: output.append( (tz, tz) ) return output
e00bd0bc3321b9ab91526e67200ce3ef3629014a
3,656,101
def create_logistic_vector(input_vector, cutoff): """ Creates a vector of 0s and 1s based on an input vector of numbers with a cut-off point. """ output_vector = np.zeros(len(input_vector)) n = 0 for i in range(len(input_vector)): if input_vector[i] > cutoff: output_vector[i] = 1 else: output_vector[i] = -1 # Set to -1 rather than 0 to help make later calculations easier. n += 1 return output_vector
bb4756f745f56fae7d8d4f0b1a1758ef6d70fb5a
3,656,102
def profile(username): """ user profile """ user = User.query.filter_by(username=username).first_or_404() return render_template("user/profile.jinja.html", user=user)
31e2a1b108c0652356cea92f32e9077105733726
3,656,103
def action_rescale(action): """Rescale Distribution actions to exp one""" return np.array([0 if abs(a) < 0.5 else 10 ** (a-3) if a > 0 else -(10 ** (-a - 3)) for a in action * 3])
534509b76410eaeadee599e1ac510def8243e7ba
3,656,104
def multi_knee(points: np.ndarray, t1: float = 0.99, t2: int = 3) -> np.ndarray: """ Recursive knee point detection based on the curvature equations. It returns the knee points on the curve. Args: points (np.ndarray): numpy array with the points (x, y) t1 (float): coefficient of determination threshold (default 0.99) t2 (int): number of points threshold (default 3) Returns: np.ndarray: knee points on the curve """ return mk.multi_knee(knee, points, t1, t2)
ee44d9f51c843a0fb8e18f10057b5c6510dd8f3a
3,656,105
def parse_dotted_path(path): """ Extracts attribute name from dotted path. """ try: objects, attr = path.rsplit('.', 1) except ValueError: objects = None attr = path return objects, attr
4685fad6461286b957a8d0056df2146fdd0f2e55
3,656,106
def resource_media_fields(document, resource): """ Returns a list of media fields defined in the resource schema. :param document: the document eventually containing the media files. :param resource: the resource being consumed by the request. .. versionadded:: 0.3 """ media_fields = app.config['DOMAIN'][resource]['_media'] return [field for field in media_fields if field in document]
b54bc5f7fd35626866d70ce6d46bf0b84b9cf1b8
3,656,107
import os import logging def read_config_file(f): """Read a config file.""" if isinstance(f, basestring): f = os.path.expanduser(f) try: config = ConfigObj(f, interpolation=False, encoding='utf8') except ConfigObjError as e: log(LOGGER, logging.ERROR, "Unable to parse line {0} of config file " "'{1}'.".format(e.line_number, f)) log(LOGGER, logging.ERROR, "Using successfully parsed config values.") return e.config except (IOError, OSError) as e: log(LOGGER, logging.WARNING, "You don't have permission to read " "config file '{0}'.".format(e.filename)) return None return config
7dd67233c8ab1a09568b0cca143a96e27ba15722
3,656,108
from typing import List from typing import Tuple def _parse_moving(message: List[str]) -> Tuple[Actions, str]: """Parses the incoming message list to determine if movement is found. Args: message: list of words in the player message Returns: a tuple of the action and direction """ short_dir = ['n', 'ne', 'e', 'se', 's', 'sw', 'w', 'nw'] long_dir = [ 'north', 'northeast', 'east', 'southeast', 'south', 'southwest', 'west', 'northwest' ] for d in long_dir: if d in message: return (Actions.MOVE, d) for d in short_dir: if d in message: direction = long_dir[short_dir.index(d)] return (Actions.MOVE, direction) return (Actions.UNKNOWN, '')
9785cbeb39dbc9ba980f605b648fb77855fe863d
3,656,109
def _Net_forward_all(self, blobs=None, **kwargs): """ Run net forward in batches. Take blobs: list of blobs to extract as in forward() kwargs: Keys are input blob names and values are blob ndarrays. Refer to forward(). Give all_outs: {blob name: list of blobs} dict. """ # Collect outputs from batches all_outs = {out: [] for out in set(self.outputs + (blobs or []))} for batch in self._batch(kwargs): outs = self.forward(blobs=blobs, **batch) for out, out_blob in outs.items(): all_outs[out].extend(out_blob.copy()) # Package in ndarray. for out in all_outs: all_outs[out] = np.asarray(all_outs[out]) # Discard padding. pad = len(next(iter(all_outs.values()))) - len(next(iter(kwargs.values()))) if pad: for out in all_outs: all_outs[out] = all_outs[out][:-pad] return all_outs
f5b6acf347e4fb85e0148d2d176042559f93b6a1
3,656,110
def email_members_old(request, course_prefix, course_suffix): """ Displays the email form and handles email actions Right now this is blocking and does not do any batching. Will have to make it better """ error_msg="" success_msg="" form = EmailForm() if request.method == "POST": form = EmailForm(data=request.POST) if form.is_valid(): sender = request.common_page_data['course'].title + ' Staff <[email protected]>' recipient_qset = User.objects.none() #get recipients in a QuerySet if form.cleaned_data['to'] == "all" : recipient_qset = request.common_page_data['course'].get_all_members() elif form.cleaned_data['to'] == "students" : recipient_qset = request.common_page_data['course'].get_all_students() elif form.cleaned_data['to'] == "staff" : recipient_qset = request.common_page_data['course'].get_all_course_admins() elif form.cleaned_data['to'] == "myself": recipient_qset = User.objects.filter(id=request.user.id) #pdb.set_trace() courses.email_members.tasks.email_with_celery.delay( form.cleaned_data['subject'], form.cleaned_data['message'], sender, recipient_qset.values_list('email',flat=True), course_title=request.common_page_data['course'].title, course_url=request.build_absolute_uri(reverse('courses.views.main', args=[course_prefix, course_suffix]))) success_msg = "Your email was successfully queued for sending" #form = EmailForm() else: error_msg = "Please fix the errors below:" context = RequestContext(request) return render_to_response('email/email.html', {'form': form, 'error_msg': error_msg, 'success_msg': success_msg, 'course': request.common_page_data['course'], 'common_page_data': request.common_page_data}, context_instance=context)
eeafa4d9c4ca0b0ad1b7ffaecceb03c188e02813
3,656,111
def Padding_op(Image, strides, offset_x, offset_y): """ Takes an image, offset required to fit output image dimensions with given strides and calculates the padding it needs for perfect fit. :param Image: :param strides: :param offset_x: :param offset_y: :return: Padded image """ if config['volumetric']: raise Exception("3D Padding not yet implemented!") padding_x = strides[0] - offset_x padding_y = strides[1] - offset_y Padded_Image = np.zeros(shape=(Image.shape[0] + padding_x, Image.shape[1] + padding_y, Image.shape[2]), dtype=Image.dtype) Padded_Image[padding_x // 2:(padding_x // 2) + (Image.shape[0]), padding_y // 2:(padding_y // 2) + Image.shape[1], :] = Image return Padded_Image
d3f046069a597f2d7e3204b543f1f60c8e1e5b23
3,656,112
def area_triangle(point_a: array_like, point_b: array_like, point_c: array_like) -> np.float64: """ Return the area of a triangle defined by three points. The points are the vertices of the triangle. They must be 3D or less. Parameters ---------- point_a, point_b, point_c : array_like The three vertices of the triangle. Returns ------- np.float64 The area of the triangle. References ---------- http://mathworld.wolfram.com/TriangleArea.html Examples -------- >>> from skspatial.measurement import area_triangle >>> area_triangle([0, 0], [0, 1], [1, 0]) 0.5 >>> area_triangle([0, 0], [0, 2], [1, 1]) 1.0 >>> area_triangle([3, -5, 1], [5, 2, 1], [9, 4, 2]).round(2) 12.54 """ vector_ab = Vector.from_points(point_a, point_b) vector_ac = Vector.from_points(point_a, point_c) # Normal vector of plane defined by the three points. vector_normal = vector_ab.cross(vector_ac) return 0.5 * vector_normal.norm()
0c21ca96f8a6fd4d088cf0fa47a260b3bc582966
3,656,113
import os def file_exists(target, parse=False): """Checks if a file exists""" if parse: target = envar_parser(target) if os.path.isfile(target): return True else: return False
99332f2fd2d434a19372b453a6235f4f2d7f5ab3
3,656,114
def test_valid(line): """Test for 40 character hex strings Print error on failure""" base_error = '*** WARNING *** line in torrent list' if len(line) != 40: print(base_error, 'incorrect length:', line) elif any(char not in HEX for char in line): print(base_error, 'has non-hex digits:', line) else: return True
ca6517a8dd622b07703b30af7842685b9b6d5865
3,656,115
def readIMAGCDF(filename, headonly=False, **kwargs): """ Reading Intermagnet CDF format (1.0,1.1,1.2) """ debug = kwargs.get('debug') cdfdat = cdf.CDF(filename) if debug: logger.info("readIMAGCDF: FOUND IMAGCDF file created with version {}".format(cdfdat.version())) if debug: for line in cdfdat: logger.info("{}".format(line)) # get Attribute list attrslist = [att for att in cdfdat.attrs] # get Data list datalist = [att for att in cdfdat] headers={} arraylist = [] array = [[] for elem in KEYLIST] startdate = cdfdat[datalist[-1]][0] flagruleversion = '' flagruletype = '' flaglist = [] # ################################# # Get header info: # ################################# if 'FormatDescription' in attrslist: form = cdfdat.attrs['FormatDescription'] headers['DataFormat'] = str(cdfdat.attrs['FormatDescription']) if 'FormatVersion' in attrslist: vers = cdfdat.attrs['FormatVersion'] headers['DataFormat'] = str(form) + '; ' + str(vers) if 'Title' in attrslist: pass if 'IagaCode' in attrslist: headers['StationIAGAcode'] = str(cdfdat.attrs['IagaCode']) headers['StationID'] = str(cdfdat.attrs['IagaCode']) if 'ElementsRecorded' in attrslist: headers['DataComponents'] = str(cdfdat.attrs['ElementsRecorded']) if 'PublicationLevel' in attrslist: headers['DataPublicationLevel'] = str(cdfdat.attrs['PublicationLevel']) if 'PublicationDate' in attrslist: headers['DataPublicationDate'] = str(cdfdat.attrs['PublicationDate']) if 'ObservatoryName' in attrslist: headers['StationName'] = str(cdfdat.attrs['ObservatoryName']) if 'Latitude' in attrslist: headers['DataAcquisitionLatitude'] = str(cdfdat.attrs['Latitude']) if 'Longitude' in attrslist: headers['DataAcquisitionLongitude'] = str(cdfdat.attrs['Longitude']) if 'Elevation' in attrslist: headers['DataElevation'] = str(cdfdat.attrs['Elevation']) if 'Institution' in attrslist: headers['StationInstitution'] = str(cdfdat.attrs['Institution']) if 'VectorSensOrient' in attrslist: headers['DataSensorOrientation'] = str(cdfdat.attrs['VectorSensOrient']) if 'StandardLevel' in attrslist: headers['DataStandardLevel'] = str(cdfdat.attrs['StandardLevel']) if 'StandardName' in attrslist: headers['DataStandardName'] = str(cdfdat.attrs['StandardName']) if 'StandardVersion' in attrslist: headers['DataStandardVersion'] = str(cdfdat.attrs['StandardVersion']) if 'PartialStandDesc' in attrslist: headers['DataPartialStandDesc'] = str(cdfdat.attrs['PartialStandDesc']) if 'Source' in attrslist: headers['DataSource'] = str(cdfdat.attrs['Source']) if 'TermsOfUse' in attrslist: headers['DataTerms'] = str(cdfdat.attrs['TermsOfUse']) if 'References' in attrslist: headers['DataReferences'] = str(cdfdat.attrs['References']) if 'UniqueIdentifier' in attrslist: headers['DataID'] = str(cdfdat.attrs['UniqueIdentifier']) if 'ParentIdentifiers' in attrslist: headers['SensorID'] = str(cdfdat.attrs.get('ParentIdentifiers')) if 'ReferenceLinks' in attrslist: headers['StationWebInfo'] = str(cdfdat.attrs['ReferenceLinks']) if 'FlagRulesetType' in attrslist: flagruletype = str(cdfdat.attrs['FlagRulesetType']) if 'FlagRulesetVersion' in attrslist: flagruleversion = str(cdfdat.attrs['FlagRulesetVersion']) # New in 0.3.99 - provide a SensorID as well consisting of IAGA code, min/sec # and numerical publevel # IAGA code if headers.get('SensorID','') == '': try: #TODO determine resolution headers['SensorID'] = "{}_{}_{}".format(headers.get('StationIAGAcode','xxx').upper()+'sec',headers.get('DataPublicationLevel','0'),'0001') except: pass # ################################# # Get data: # ################################# # Reorder datalist and Drop time column # ######################################################### # 1. Get the amount of Times columns and associated lengths # ######################################################### #print "Analyzing file structure and returning values" #print datalist zpos = KEYLIST.index('z') # used for idf records mutipletimerange = False newdatalist = [] tllist = [] indexarray = np.asarray([]) for elem in datalist: if elem.endswith('Times') and not elem.startswith('Flag'): #print "Found Time Column" # Get length tl = int(str(cdfdat[elem]).split()[1].strip('[').strip(']')) #print "Length", tl tllist.append([tl,elem]) if len(tllist) < 1: #print "No time column identified" # Check for starttime and sampling rate in header if 'StartTime' in attrslist and 'SamplingPeriod' in attrslist: # TODO Write that function st = str(cdfdat.attrs['StartTime']) sr = str(cdfdat.attrs['SamplingPeriod']) else: logger.error("readIMAGCDF: No Time information available - aborting") return elif len(tllist) > 1: tl = [el[0] for el in tllist] if not max(tl) == min(tl): logger.warning("readIMAGCDF: Time columns of different length. Choosing longest as basis") newdatalist.append(['time',max(tllist)[1]]) try: indexarray = np.nonzero(np.in1d(date2num(cdfdat[max(tllist)[1]][...]),date2num(cdfdat[min(tllist)[1]][...])))[0] except: indexarray = np.asarray([]) mutipletimerange = True else: logger.info("readIMAGCDF: Equal length time axes found - assuming identical time") if 'GeomagneticVectorTimes' in datalist: newdatalist.append(['time','GeomagneticVectorTimes']) else: newdatalist.append(['time',tllist[0][1]]) # Take the first one else: #print "Single time axis found in file" newdatalist.append(['time',tllist[0][1]]) def Ruleset2Flaglist(flagginglist,rulesettype,rulesetversion): if flagruletype in ['Conrad', 'conrad', 'MagPy','magpy']: if flagruleversion in ['1.0','1',1]: flagcolsconrad = [flagginglist[0],flagginglist[1],flagginglist[3],flagginglist[4],flagginglist[5],flagginglist[6],flagginglist[2]] flaglisttmp = [] for elem in flagcolsconrad: flaglisttmp.append(cdfdat[elem][...]) flaglist = np.transpose(flaglisttmp) flaglist = [list(elem) for elem in flaglist] return list(flaglist) else: logger.warning("readIMAGCDF: Could not interprete Ruleset") if not flagruletype == '': logger.info("readIMAGCDF: Found flagging ruleset {} vers.{} - extracting flagging information".format(flagruletype,flagruleversion)) flagginglist = [elem for elem in datalist if elem.startswith('Flag')] flaglist = Ruleset2Flaglist(flagginglist,flagruletype,flagruleversion) datalist = [elem for elem in datalist if not elem.endswith('Times') and not elem.startswith('Flag')] # ######################################################### # 2. Sort the datalist according to KEYLIST # ######################################################### for key in KEYLIST: possvals = [key] if key == 'x': possvals.extend(['h','i']) if key == 'y': possvals.extend(['d','e']) if key == 'df': possvals.append('g') if key == 'f': possvals.append('s') for elem in datalist: try: label = cdfdat[elem].attrs['LABLAXIS'].lower() if label in possvals: newdatalist.append([key,elem]) except: pass # for lines which have no Label if not len(datalist) == len(newdatalist)-1: logger.warning("readIMAGCDF: error encountered in key assignment - please check") # 3. Create equal length array reducing all data to primary Times and filling nans for non-exist # (4. eventually completely drop time cols and just store start date and sampling period in header) # Deal with scalar data (independent or whatever for elem in newdatalist: #print ("Here", elem) if elem[0] == 'time': try: ar = date2num(cdfdat[elem[1]][...]) except: ar = date2num(np.asarray([cdf.lib.tt2000_to_datetime(el) for el in cdfdat[elem[1]][...]])) arlen= len(ar) arraylist.append(ar) ind = KEYLIST.index('time') array[ind] = ar else: ar = cdfdat[elem[1]][...] if elem[0] in NUMKEYLIST: with np.errstate(invalid='ignore'): ar[ar > 88880] = float(nan) ind = KEYLIST.index(elem[0]) headers['col-'+elem[0]] = cdfdat[elem[1]].attrs['LABLAXIS'].lower() headers['unit-col-'+elem[0]] = cdfdat[elem[1]].attrs['UNITS'] if len(indexarray) > 0 and elem[0] in ['f','df']: ## this is no good - point to depend_0 newar = np.asarray([np.nan]*arlen) #print (len(newar),len(ar),len(indexarray)) newar[indexarray] = ar #print (len(newar)) array[ind] = newar arraylist.append(newar) else: array[ind] = ar arraylist.append(ar) # if idf -> add f column also to z if elem[0] in ['f','F'] and headers.get('DataComponents','') in ['DIF','dif','idf','IDF'] and not len(array[zpos]) > 0: array[zpos] = ar arraylist.append(ar) headers['col-z'] = cdfdat[elem[1]].attrs['LABLAXIS'].lower() headers['unit-col-z'] = cdfdat[elem[1]].attrs['UNITS'] ndarray = np.array(array) stream = DataStream() stream = [LineStruct()] result = DataStream(stream,headers,ndarray) if not flagruletype == '' and len(flaglist) > 0: result = result.flag(flaglist) #t2 = datetime.utcnow() #print "Duration for conventional stream assignment:", t2-t1 return result
9d87de2c650b140cf42cab4814659cf842f4b5a5
3,656,116
def custom_formatter(code, msg): """ 自定义结果格式化函数 :param code: 响应码 :param msg: 响应消息 """ return { "code": code, "msg": "hello", "sss": "tt", }
59a7e3f9f03f9afc42b8faec6ebe23f5373d0bf0
3,656,117
def get_sampler_callback(rank, num_replicas, noniid=0, longtail=0): """ noniid: noniid controls the noniidness. - noniid = 1 refers to completely noniid - noniid = 0 refers to iid. longtail: longtail controls the long-tailness. - Class i takes (1-longtail) ** i percent of data. """ assert noniid >= 0 and noniid <= 1, f"`noniid` in [0, 1], get {noniid}" assert longtail >= 0 and longtail <= 1, f"`longtail` in [0, 1], get {longtail}" if longtail > 0: return lambda x: NONIIDLTSampler( alpha=1 - noniid, beta=1 - longtail, num_replicas=num_replicas, rank=rank, shuffle=True, dataset=x, ) if noniid == 0: # Byzantine workers return lambda x: DistributedSampler( num_replicas=num_replicas, rank=rank, shuffle=True, dataset=x, ) if noniid > 0: return lambda x: DecentralizedMixedSampler( noniid_percent=noniid, num_replicas=num_replicas, rank=rank, shuffle=True, dataset=x, ) raise NotImplementedError("")
05e526ba903ebd834f248d965253344136e8a8a8
3,656,118
def alloc_bitrate(frame_nos, chunk_frames, pref_bitrate, nrow_tiles, ncol_tiles): """ Allocates equal bitrate to all the tiles """ vid_bitrate = [] for i in range(len(chunk_frames)): chunk = chunk_frames[i] chunk_bitrate = [[-1 for x in range(ncol_tiles)] for y in range(nrow_tiles)] chunk_weight = [[1. for x in range(ncol_tiles)] for y in range(nrow_tiles)] total_weight = sum(sum(x) for x in chunk_weight) for x in range(nrow_tiles): for y in range(ncol_tiles): chunk_bitrate[x][y] = chunk_weight[x][y]*pref_bitrate/total_weight; vid_bitrate.append(chunk_bitrate) return vid_bitrate
1883f480852d49e63c0408c9ef0daeba9e50db6b
3,656,119
from typing import Collection from typing import Any def file_filter(extensions: Collection[str]) -> Any: """Register a page content filter for file extensions.""" def wrapper(f): for ext in extensions: _file_filters[ext] = f return f return wrapper
bef1a304497ffaac3f294607d8a393e505c1eb19
3,656,120
def epc_calc_img_size(reg_dict): """ Calcalute the output image size from the EPC660 sensor Parameters ---------- reg_dict : dict Returns ---------- int The number of rows int The number of columns in the image """ col_start, col_end, row_start, row_end = epc_calc_roi(reg_dict) row_bin, col_bin = epc_calc_bin_mode(reg_dict) row_binning, col_binning = epc_calc_binning(reg_dict) row_div = 1 col_div = 1 if row_bin: row_div = (1 << row_binning) if col_bin: col_div = (1 << col_binning) nrows = (2*(row_end-row_start+1))/row_div ncols = (col_end-col_start+1)/col_div return nrows, ncols
698b6ae6a99f8f9621c40ffaee2ab5ea5e584ce1
3,656,121
def simple_url_formatter(endpoint, url): """ A simple URL formatter to use when no application context is available. :param str endpoint: the endpoint to use. :param str url: the URL to format """ return u"/{}".format(url)
74f3e68fe10f7cc6bf8bfe81a7349a995bb79fa3
3,656,122
from typing import List def generate_service( name: str, image: str, ports: List[str] = [], volumes: List[str] = [], dependsOn: List[str] = [], ) -> str: """ Creates a string with docker compose service specification. Arguments are a list of values that need to be added to each section named after the parameter. i.e. the volume arguments are for the volumes section of the service config. """ indent = ' ' service = "{s}{name}:\n{s}{s}image: {image}\n".format( s=indent, name=name, image=image, ) if ports: service += "{s}ports:\n".format(s=indent*2) for port in ports: service += '{s}- "{port}"\n'.format(s=indent*3, port=port) if volumes: service += "{s}volumes:\n".format(s=indent*2) for vol in volumes: service += '{s}- {vol}\n'.format(s=indent*3, vol=vol) if dependsOn: service += "{s}depends_on:\n".format(s=indent*2) for item in dependsOn: service += '{s}- "{dep}"\n'.format(s=indent*3, dep=item) return service
581e37e69d73ab5b6c0ac533bd91e7b5cb5187d9
3,656,123
def read_integer(msg=None, error_msg=None): """ Asks the user for an integer value (int or long) :param msg: The message, displayed to the user. :param error_msg: The message, displayed to the user, in case he did not entered a valid int or long. :return: An int or a long from the user. """ res = raw_input(msg) try: return int(res) except (TypeError, ValueError): pass try: return long(res) except (TypeError, ValueError): pass if error_msg is not None: print(error_msg) return read_integer(msg=msg, error_msg=error_msg)
c3067b436f57583b89ca02bff5e01802845ebf69
3,656,124
def set_async_call_stack_depth(maxDepth: int) -> dict: """Enables or disables async call stacks tracking. Parameters ---------- maxDepth: int Maximum depth of async call stacks. Setting to `0` will effectively disable collecting async call stacks (default). """ return { "method": "Debugger.setAsyncCallStackDepth", "params": {"maxDepth": maxDepth}, }
f3767d85dc12913b11c6b13bb66f866460198672
3,656,125
def percError(predicted, observed): """Percentage Error Parameters ========== predicted : array-like Array-like (list, numpy array, etc.) of predictions observed : array-like Array-like (list, numpy array, etc.) of observed values of scalar quantity Returns ======= perc : float Array of forecast errors expressed as a percentage """ err, pred, obse = forecastError(predicted, observed, full=True) res = err/obse return 100*res
168affcb5af47563c15d27c6e662b0cf6411eca2
3,656,126
def _dict_eq(a, b): """ Compare dictionaries using their items iterators and loading as much as half of each into a local temporary store. For comparisons of ordered dicts, memory usage is nil. For comparisons of dicts whose iterators differ in sequence maximally, memory consumption is O(N). Execution time is O(N). :param a: one dict :param b: another dict :return: True if they're the same, false otherwise """ # The memory consumption here is to make a linear improvement in execution # time. In the case of a dict backed by Redis, it is faster to iterate # over N items than to retrieve each one, by a factor of 10 or more # because of the reduced round-trips to the server. size = len(a) if size != len(b): return False # Iterate over both dicts. Compare items. If the same ones come up # at the same time, great, they match. If different ones come up, # store them in the am and bm collections of misses. Check for prior # misses that may be matched by the new elements. bi = iteritems(b) am = {} bm = {} for ak, av in iteritems(a): bk, bv = next(bi) if ak == bk: if av != bv: return False else: # keys differ if ak in bm: if bm[ak] == av: del bm[ak] else: return False else: am[ak] = av if bk in am: if am[bk] == bv: del am[bk] else: return False else: bm[bk] = bv if len(am) + len(bm) > size: return False return len(am) + len(bm) == 0
68292489e4f6f8f213f4d17cf799052cb99ece37
3,656,127
from typing import Dict from typing import List def avoid_snakes(my_head: Dict[str, int], snakes: List[dict], possible_moves: List[str]) -> List[str]: """ my_head: Dictionary of x/y coordinates of the Battlesnake head. e.g. {"x": 0, "y": 0} snakes: List of dictionaries of x/y coordinates for every segment of a Battlesnake. e.g. [ {"x": 0, "y": 0}, {"x": 1, "y": 0}, {"x": 2, "y": 0} ] possible_moves: List of strings. Moves to pick from. e.g. ["up", "down", "left", "right"] return: The list of remaining possible_moves not blocked by other snakes """ for snake in snakes: for segment in snake["body"]: if my_head["x"] - 1 == segment["x"] and my_head["y"] == segment["y"]: print("Segment to the left") remove_move("left", possible_moves) if my_head["x"] + 1 == segment["x"] and my_head["y"] == segment["y"]: print("Segment to the right") remove_move("right", possible_moves) if my_head["x"] == segment["x"] and my_head["y"] - 1 == segment["y"]: print("Segment below") remove_move("down", possible_moves) if my_head["x"] == segment["x"] and my_head["y"] + 1 == segment["y"]: print("Segment above") remove_move("up", possible_moves) # We're going to be super conservative if we're near another head # to avoid head on collisions if my_head["x"] - 2 == snake["head"]["x"] and my_head["y"] == snake["head"]["y"]: print("Dodge the head!") remove_move("left", possible_moves) if my_head["x"] + 2 == snake["head"]["x"] and my_head["y"] == snake["head"]["y"]: print("Dodge the head!") remove_move("right", possible_moves) if my_head["x"] == snake["head"]["x"] and my_head["y"] - 2 == snake["head"]["y"]: print("Dodge the head!") remove_move("down", possible_moves) if my_head["x"] == snake["head"]["x"] and my_head["y"] + 2 == snake["head"]["y"]: print("Dodge the head!") remove_move("up", possible_moves) if my_head["x"] - 1 == snake["head"]["x"] and my_head["y"] + 1 == snake["head"]["y"]: print("Dodge the head!") remove_move("left", possible_moves) remove_move("up", possible_moves) if my_head["x"] - 1 == snake["head"]["x"] and my_head["y"] - 1 == snake["head"]["y"]: print("Dodge the head!") remove_move("left", possible_moves) remove_move("down", possible_moves) if my_head["x"] + 1 == snake["head"]["x"] and my_head["y"] + 1 == snake["head"]["y"]: print("Dodge the head!") remove_move("right", possible_moves) remove_move("up", possible_moves) if my_head["x"] + 1 == snake["head"]["x"] and my_head["y"] - 1 == snake["head"]["y"]: print("Dodge the head!") remove_move("right", possible_moves) remove_move("down", possible_moves) return possible_moves
dcdd80522486ec1c6001aa8990f2bfaf88235ec1
3,656,128
from django.core.servers.basehttp import FileWrapper import zipfile import logging import os import tarfile import traceback def getChipZip(request, path): """Download the AutoPH file, converted to zip compression""" logger = logging.getLogger(__name__) path = os.path.join("/", path) try: name = os.path.basename(path) name = name.split(".")[0] # initialize zip archive file zipfilename = os.path.join("/tmp", "%s.zip" % name) zipobj = zipfile.ZipFile(zipfilename, mode="w", allowZip64=True) # open tar.bz2 file, extract all members and write to zip archive tf = tarfile.open(os.path.join(path)) for tarobj in tf.getmembers(): contents = tf.extractfile(tarobj) zipobj.writestr(tarobj.name, contents.read()) zipobj.close() response = HttpResponse( FileWrapper(open(zipfilename)), mimetype="application/zip" ) response["Content-Disposition"] = "attachment; filename=%s" % os.path.basename( zipfilename ) os.unlink(zipfilename) return response except Exception as inst: logger.exception(traceback.format_exc()) ctxd = { "error_state": 1, "error": [["Error", "%s" % inst], ["Error type", "%s" % type(inst)]], "locations_list": [], "base_site_name": "Error", "files": [], "protonDiags": [], } ctx = RequestContext(request, ctxd) return render_to_response( "rundb/configure/ion_chips.html", context_instance=ctx )
af836e47967e830e1a36ade074bca655efc8fcbc
3,656,129
def m_unicom_online_time2_0(seq): """ 获取联通手机在网时长所对应的code :param seq: 联通在网时长区间 :return: code example: :seq: [0-1] :return 1 """ if not seq: return [] if seq[0] in ["[0-1]", "(1-2]", "[3-6]"]: seq = ["(0_6)"] elif seq[0] in ["[7-12]"]: seq = ["[6_12)"] elif seq[0] in ["[13-24]"]: seq = ["[12_24)"] elif seq[0] in ["[25-36]", "[37,+)"]: seq = ["[24_+)"] return seq
4a242d76f3d2708b5ad590830156a44fd22e7267
3,656,130
def convert_config_gui_structure(config_gui_structure, port, instance_id, is_port_in_database, conf): """ Converts the internal data structure to a dictionary which follows the "Configuration file structure", see setup.rst :param config_gui_structure: Data structure used to hold and show configuration information in the Gui :return A dictionary which follows the "Configuration file structure", see setup.rst """ config_dict = identify_existing_config_file(port, conf.OPRP_DIR_PATH) if not is_port_in_database and config_dict: file_path = get_config_file_path(port, conf.OPRP_DIR_PATH) LOGGER.error("The identified configuration file does not exist in the database. " "File path: %s" % file_path) if not (is_port_in_database and config_dict): config_dict = get_default_client() config_dict = clear_config_keys(config_dict) if instance_id: config_dict[CONFIG_DICT_INSTANCE_ID_KEY] = instance_id if contains_dynamic_discovery_info(config_gui_structure): gui_config = GuiConfig(config_gui_structure) config_dict['srv_discovery_url'] = gui_config.get_dynamic_discovery_issuer() elif config_gui_structure['fetchStaticProviderInfo']['showInputFields']: config_dict = static_provider_info_to_config_file_dict(config_gui_structure, config_dict) config_dict = client_registration_to_config_file_dict(config_gui_structure, config_dict) config_dict = subject_type_to_config_file_dict(config_dict, config_gui_structure) config_dict = profile_to_config_file_dict(config_dict, config_gui_structure) if config_gui_structure['webfingerSubject'] != "": config_dict['webfinger_subject'] = config_gui_structure['webfingerSubject'] if config_gui_structure['loginHint'] != "": config_dict['login_hint'] = config_gui_structure['loginHint'] if config_gui_structure['uiLocales'] != "": config_dict['ui_locales'] = config_gui_structure['uiLocales'] if config_gui_structure['claimsLocales'] != "": config_dict['claims_locales'] = config_gui_structure['claimsLocales'] if config_gui_structure['acrValues'] != "": config_dict['acr_values'] = config_gui_structure['acrValues'] if config_gui_structure['webfinger_url'] != "": config_dict['webfinger_url'] = config_gui_structure['webfinger_url'] if config_gui_structure['webfinger_email'] != "": config_dict['webfinger_email'] = config_gui_structure['webfinger_email'] return config_dict
3f46a621261ba097918fb5b5d27bd7611910a623
3,656,131
def message_similarity_hard(m1, m2): """ Inputs: One dimension various length numpy array. """ return int(np.all(m1==m2))
8f649a295853c34d692fb96a0a7facbc82d67ddb
3,656,132
def identity_block(input_tensor, kernel_size, filters, stage, block): """ The identity_block is the block that has no conv layer at shortcut Arguments input_tensor: input tensor kernel_size: defualt 3, the kernel size of middle conv layer at main path filters: list of integers, the nb_filters of 3 conv layer at main path stage: integer, current stage label, used for generating layer names block: 'a','b'..., current block label, used for generating layer names """ nb_filter1, nb_filter2, nb_filter3 = filters bn_axis = 3 conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' x = Convolution2D(nb_filter1, 1, 1, name=conv_name_base + '2a')(input_tensor) x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x) x = Activation('relu')(x) x = Convolution2D(nb_filter2, kernel_size, kernel_size, border_mode='same', name=conv_name_base + '2b')(x) x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x) x = Activation('relu')(x) x = Convolution2D(nb_filter3, 1, 1, name=conv_name_base + '2c')(x) x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x) x = merge([x, input_tensor], mode='sum') x = Activation('relu')(x) return x
38a898a3b52f12490584206dfa6ea6b9819a1240
3,656,133
def convert_to_squad(story_summary_content, question_content, set_type): """ :param story_summary_content: :param question_content: :param category_content: :param set_type: :return: formatted SQUAD data At initial version, we are just focusing on the context and question, nothing more, therefore we are ignoring the answer part as of now """ squad_formatted_content = dict() squad_formatted_content['version'] = 'narrativeqa_squad_format' data = [] content = story_summary_content if set_type != 'all': content = story_summary_content[story_summary_content['set'] == set_type] for datum in content.itertuples(index=False): #print(datum.summary) data_ELEMENT = dict() data_ELEMENT['title'] = 'dummyTitle' paragraphs = [] paragraphs_ELEMENT = dict() superdocument = datum.summary paragraphs_ELEMENT['context'] = superdocument qas = [] sub_datum = question_content[question_content['document_id'] == datum.document_id] for q_datum in sub_datum.itertuples(): # print(indx) #print(q_datum) qas_ELEMENT = dict() ANSWERS_ELEMENT = dict() qas_ELEMENT_ANSWERS = [] qas_ELEMENT['id'] = q_datum.document_id + '-' + str(q_datum.Index) qas_ELEMENT['question'] = q_datum.question ANSWERS_ELEMENT['answer_start'] = -1 ANSWERS_ELEMENT['text'] = 'dummyAnswer' qas_ELEMENT_ANSWERS.append(ANSWERS_ELEMENT) qas_ELEMENT['answers'] = qas_ELEMENT_ANSWERS qas.append(qas_ELEMENT) paragraphs_ELEMENT['qas'] = qas paragraphs.append(paragraphs_ELEMENT) data_ELEMENT['paragraphs'] = paragraphs data.append(data_ELEMENT) squad_formatted_content['data'] = data return squad_formatted_content
5b884ef521af4d5835fef25f01cb1f11d68cfafb
3,656,134
import select import time import sys import os def push_output(process, primary_fd, out_buffer: TextBuffer, process_state: ProcessState, is_interactive_session: bool, on_error: callable): """ Receive output from running process and forward to streams, capture :param process: :param primary_fd: :param out_buffer: :param process_state: :param is_interactive_session: :param on_error: :return: """ poller = select.epoll() poller.register(primary_fd, select.EPOLLIN) # terminal window size updating terminal_update_time = 3 # 3 seconds last_terminal_update = time() should_update_terminal_size = True try: copy_terminal_size(sys.stdout, primary_fd) except OSError as e: if e.errno == 25: should_update_terminal_size = False else: raise if is_interactive_session: poller.register(sys.stdin, select.EPOLLIN) while process.poll() is None: for r, flags in poller.poll(timeout=0.01): try: if is_interactive_session and sys.stdin.fileno() is r: d = os.read(r, 10240) os.write(primary_fd, d) elif primary_fd is r: o = os.read(primary_fd, 10240) # terminal window size updating if should_update_terminal_size and time() - last_terminal_update >= terminal_update_time: copy_terminal_size(sys.stdout, primary_fd) last_terminal_update = time() # propagate to stdout if o: decoded = carefully_decode(o, 'utf-8') sys.stdout.write(decoded) sys.stdout.flush() out_buffer.write(decoded) if process_state.has_exited: return True except Exception as exc: process_state.exception = exc process_state.has_exited = True on_error() return
accb4fa66d8746766b4552c09d22b3d2fd3502c0
3,656,135
def check_conditions(conditions, variable_dict, domain_dict, domain_list): """A function that checks if the generated variables pass the conditions and generates new ones until they do. :param conditions: The conditions of the template. :param variable_dict: List of variables. :param domain_dict: the domain of the variables. :param domain_list: a dict with the domain list. :return: List of variables that pass the conditions of the given template. """ conditions = remove_unnecessary(conditions) # Check conditions --> if false: change a variable -> check conditions inserted_conditions = string_replace(conditions, variable_dict) while not parse_expr(latex_to_sympy(inserted_conditions), transformations=standard_transformations + (convert_xor, implicit_multiplication_application,), global_dict=None, evaluate=True): variable_to_change = choice(list(variable_dict.keys())) # Chose a random key from variable_dict if domain_list[variable_to_change]: variable_dict[variable_to_change] = make_number_from_list(domain_dict[variable_to_change]) else: variable_dict[variable_to_change] = new_random_value(variable_to_change, domain_dict) inserted_conditions = string_replace(conditions, variable_dict) return variable_dict
fffd9889d3c149f56041753522aee245135cf0ee
3,656,136
def set_pin_on_teaching_page(request, section_label, pin=True): """ if pin=True, pin the section on teaching page if pin=False, unpin the section from teaching page @except InvalidSectionID @except NotSectionInstructorException @except UserCourseDisplay.DoesNotExist """ section = get_section_by_label(section_label) check_section_instructor(section, get_person_of_current_user(request)) # not to pin a primary section if section.is_primary_section: return False return UserCourseDisplay.set_pin( get_user_model(request), section_label, pin)
385940e3adc286a923a94a3205b56c3817ee6284
3,656,137
from typing import Any def inject_python_resources() -> dict[str, Any]: """ Inject common resources to be used in Jinja templates. """ return dict( isinstance=isinstance, zip=zip, enumerate=enumerate, len=len, str=str, bool=bool, int=int, float=float, )
98fb7fbf39f20b9972ef5c0d35ae12b2864580b2
3,656,138
def get_feature_subsets_options(study, data_types): """Given a study and list of data types, get the relevant feature subsets """ feature_subsets = ['custom'] if 'expression' in data_types: try: feature_subsets.extend(study.expression.feature_subsets.keys()) except AttributeError: pass if 'splicing' in data_types: try: feature_subsets.extend(study.splicing.feature_subsets.keys()) except AttributeError: pass # Cast to "set" to get rid of duplicates, then back to list because you # can't sort a set, then back to list after sorting because you get # an iterator... yeah .... feature_subsets = list(natural_sort(list(set(feature_subsets)))) # Make sure "variant" is first because all datasets have that # first remove 'variant' if it is there, then add it at the front try: feature_subsets.pop(feature_subsets.index('variant')) except ValueError: pass feature_subsets.insert(0, 'variant') return feature_subsets
d9310f00ff001f5ddc643998c7544df6ba5382b5
3,656,139
import json def possibilities(q=0, *num): """ :param q: Número de quadrados a considerar :param num: Em quantos quadrados a soma do nº de bombas é 1 :return: pos -> Possibilidade de distribuição das bombas tot -> Número de quadrados nos quais só há uma bomba i -> Início da contagem dos quadrados onde a soma das bombas é 1 """ lbn = [] lp = [] num = str(num).replace('(', '[').replace(')', ']') num = json.loads(num) for c4 in range(0, len(num)): num[c4] += [''] for c1 in range(0, 2 ** q): pos = [] bn = str(bin(c1)).replace('0b', '') # bn = int(bn, base=2) -> Reverte o processo bn = bn.rjust(q, '0') pos += bn ts = 0 for c2 in range(0, len(num)): i = num[c2][0] tot = num[c2][1] # print(bn, tot, pos) s = 0 for c3 in range(i, tot + i): if pos[c3] == '1': s += 1 if num[c2][3] != '': # print(num[c2], pos[num[c2][3]]) if pos[num[c2][3]] == '1': s += 1 if s == num[c2][2]: ts += 1 # print(bn, s) if ts == len(num): lbn += [bn] for c5 in range(0, q): lp += [0] for item in lbn: for c6 in range(0, q): if item[c6] == '1': lp[c6] += 1 return lp
94c126a1bacf5bb242ad2935f949ab146f847001
3,656,140
import re def parse_page_options(text): """ Parses special fields in page header. The header is separated by a line with 3 dashes. It contains lines of the "key: value" form, which define page options. Returns a dictionary with such options. Page text is available as option named "text". """ if type(text) != unicode: raise TypeError('parse_page_options() expects Unicode text, not "%s".' % text.__class__.__name__) options = dict() text = text.replace('\r\n', '\n') # fix different EOL types parts = text.split(u'\n---\n', 1) if len(parts) > 1: for line in parts[0].split('\n'): if not line.startswith('#'): kv = line.split(':', 1) if len(kv) == 2: k = kv[0].strip() v = kv[1].strip() if k.endswith('s'): v = re.split('[\s,]+', v) options[k] = v options['text'] = parts[-1] return options
b90b1adb7d5d6f8716b9d4e00b0e4b533393f725
3,656,141
def _read_16_bit_message(prefix, payload_base, prefix_type, is_time, data, offset, eieio_header): """ Return a packet containing 16 bit elements """ if payload_base is None: if prefix is None: return EIEIO16BitDataMessage(eieio_header.count, data, offset) elif prefix_type == EIEIOPrefix.LOWER_HALF_WORD: return EIEIO16BitLowerKeyPrefixDataMessage( prefix, eieio_header.count, data, offset) elif prefix_type == EIEIOPrefix.UPPER_HALF_WORD: return EIEIO16BitUpperKeyPrefixDataMessage( prefix, eieio_header.count, data, offset) elif payload_base is not None and not is_time: if prefix is None: return EIEIO16BitPayloadPrefixDataMessage( payload_base, eieio_header.count, data, offset) elif prefix_type == EIEIOPrefix.LOWER_HALF_WORD: return EIEIO16BitPayloadPrefixLowerKeyPrefixDataMessage( prefix, payload_base, eieio_header.count, data, offset) elif prefix_type == EIEIOPrefix.UPPER_HALF_WORD: return EIEIO16BitPayloadPrefixUpperKeyPrefixDataMessage( prefix, payload_base, eieio_header.count, data, offset) elif payload_base is not None and is_time: if prefix is None: return EIEIO16BitTimedPayloadPrefixDataMessage( payload_base, eieio_header.count, data, offset) elif prefix_type == EIEIOPrefix.LOWER_HALF_WORD: return EIEIO16BitTimedPayloadPrefixLowerKeyPrefixDataMessage( prefix, payload_base, eieio_header.count, data, offset) elif prefix_type == EIEIOPrefix.UPPER_HALF_WORD: return EIEIO16BitTimedPayloadPrefixUpperKeyPrefixDataMessage( prefix, payload_base, eieio_header.count, data, offset) return EIEIOWithoutPayloadDataMessage(eieio_header, data, offset)
b552d06b314d47ee3ae928ebcee678c65bd24f84
3,656,142
def test_linear(): """ Tests that KernelExplainer returns the correct result when the model is linear. (as per corollary 1 of https://arxiv.org/abs/1705.07874) """ np.random.seed(2) x = np.random.normal(size=(200, 3), scale=1) # a linear model def f(x): return x[:, 0] + 2.0*x[:, 1] phi = shap.KernelExplainer(f, x).shap_values(x, l1_reg="num_features(2)", silent=True) assert phi.shape == x.shape # corollary 1 expected = (x - x.mean(0)) * np.array([1.0, 2.0, 0.0]) np.testing.assert_allclose(expected, phi, rtol=1e-3)
6e716d6505162aa49507b026672455d357ab7c2b
3,656,143
def csm_shape(csm): """ Return the shape field of the sparse variable. """ return csm_properties(csm)[3]
a74357086a9d7233cabed1c6ddc14fdbdbe0b41f
3,656,144
def hyperlist_to_labellist(hyperlist): """ :param hyperlist: :return: labellist, labels to use for plotting """ return [hyper_to_label(hyper) for hyper in hyperlist]
9587694d783ccbd122b58894a2d80ee5e58dc900
3,656,145
import json def _pretty_print_dict(dictionary): """Generates a pretty-print formatted version of the input JSON. Args: dictionary (dict): the JSON string to format. Returns: str: pretty-print formatted string. """ return json.dumps(_ascii_encode_dict(dictionary), indent=2, sort_keys=True)
17e94d18f824253540fd968c726721542f25a95e
3,656,146
def _bivariate_kdeplot(x, y, filled, fill_lowest, kernel, bw, gridsize, cut, clip, axlabel, cbar, cbar_ax, cbar_kws, ax, **kwargs): """Plot a joint KDE estimate as a bivariate contour plot.""" # Determine the clipping if clip is None: clip = [(-np.inf, np.inf), (-np.inf, np.inf)] elif np.ndim(clip) == 1: clip = [clip, clip] # Calculate the KDE if _has_statsmodels: xx, yy, z = _statsmodels_bivariate_kde(x, y, bw, gridsize, cut, clip) else: xx, yy, z = _scipy_bivariate_kde(x, y, bw, gridsize, cut, clip) # Plot the contours n_levels = kwargs.pop("n_levels", 10) scout, = ax.plot([], []) default_color = scout.get_color() scout.remove() cmap = kwargs.pop("cmap", None) color = kwargs.pop("color", None) if cmap is None and "colors" not in kwargs: if color is None: color = default_color if filled: cmap = light_palette(color, as_cmap=True) else: cmap = dark_palette(color, as_cmap=True) if isinstance(cmap, str): if cmap.endswith("_d"): pal = ["#333333"] pal.extend(color_palette(cmap.replace("_d", "_r"), 2)) cmap = blend_palette(pal, as_cmap=True) else: cmap = mpl.cm.get_cmap(cmap) label = kwargs.pop("label", None) kwargs["cmap"] = cmap contour_func = ax.contourf if filled else ax.contour cset = contour_func(xx, yy, z, n_levels, **kwargs) if filled and not fill_lowest: cset.collections[0].set_alpha(0) kwargs["n_levels"] = n_levels if cbar: cbar_kws = {} if cbar_kws is None else cbar_kws ax.figure.colorbar(cset, cbar_ax, ax, **cbar_kws) # Label the axes if hasattr(x, "name") and axlabel: ax.set_xlabel(x.name) if hasattr(y, "name") and axlabel: ax.set_ylabel(y.name) if label is not None: legend_color = cmap(.95) if color is None else color if filled: ax.fill_between([], [], color=legend_color, label=label) else: ax.plot([], [], color=legend_color, label=label) return ax
ecb60ec3ffdc746f40b89158ef3c5b3a03e85bfc
3,656,147
import os import scipy def load_file(path): """ Load single cell dataset from file """ if os.path.exists(DATA_PATH+path+'.h5ad'): adata = sc.read_h5ad(DATA_PATH+path+'.h5ad') elif os.path.isdir(path): # mtx format adata = read_mtx(path) elif os.path.isfile(path): if path.endswith(('.csv', '.csv.gz')): adata = sc.read_csv(path).T elif path.endswith(('.txt', '.txt.gz', '.tsv', '.tsv.gz')): df = pd.read_csv(path, sep='\t', index_col=0).T adata = AnnData(df.values, dict(obs_names=df.index.values), dict(var_names=df.columns.values)) elif path.endswith('.h5ad'): adata = sc.read_h5ad(path) else: raise ValueError("File {} not exists".format(path)) if not issparse(adata.X): adata.X = scipy.sparse.csr_matrix(adata.X) adata.var_names_make_unique() return adata
bed61b35bf6bf6777f3350d34e754cd80acca327
3,656,148
def unified_load(namespace, subclasses=None, recurse=False): """Provides a unified interface to both the module and class loaders, finding modules by default or classes if given a ``subclasses`` parameter. """ if subclasses is not None: return ClassLoader(recurse=recurse).load(namespace, subclasses=subclasses) else: return ModuleLoader(recurse=recurse).load(namespace)
62f5f4e17d3d232bfa72090a836f89f782068b53
3,656,149
def generate_free_rooms(room_times: dict) -> dict: """ Generates data structure for getting free rooms for each time. """ # create data format free_rooms = {'M': {}, 'Tu': {}, 'W': {}, 'Th': {}, 'F': {} } # add empty lists for each time for dotw in free_rooms: for i in range(0, 144): free_rooms[dotw][i] = [] # iterate through all the rooms. days, and times for room in room_times: for day in room_times[room]: for time in room_times[room][day]: # add the room to the corresponding time free_rooms[day][time].append(room) return free_rooms
e60df355acd84e60c08ba34a45a2131d8d4519b4
3,656,150
def code_parse_line(li, pattern_type="import/import_externa"): """ External Packages """ ### Import pattern if pattern_type == "import": if li.find("from") > -1: l = li[li.find("from") + 4 : li.find("import")].strip().split(",") else: l = li.strip().split("import ")[1].strip().split(",") l = [x for x in l if x != ""] l = np_list_dropduplicate(l) return l # Only external if pattern_type == "import_extern": if li.find("from") > -1: l = li[li.find("from") + 4 : li.find("import")].strip().split(",") else: l = li.strip().split("import ")[1].strip().split(",") l = [x for x in l if x != ""] l = [x for x in l if x[0] != "."] l = [x.split(".")[0].split("as")[0].split("#")[0].strip() for x in l] l = np_list_dropduplicate(l) return l
347b3d0c3192978beb4c26a1950d86482812310b
3,656,151
def get_high(pair, path="https://api.kraken.com/0/public"): """ Get the last 24h high price of `pair`. Parameters ---------- pair : str Code of the requested pair(s). Comma delimited if several pair. path : str Path of the exchange to request. Returns ------- float or dict Last 24h higher price(s). """ return _get_ticker(pair, 'h', path)
8443ad24450e8f7bd2b6fac339e5e2b9149685c1
3,656,152
def SIx(): """ Reads in future LENS SI-x data Returns ---------- leafmean : array leaf indices (ens x year x lat x lon) latmean : array last freeze indices (ens x year x lat x lon) lat : array of latitudes lon : array of longitudes lstfrz : list last freeze indices """ directory = '/volumes/eas-shared/ault/ecrl/spring-indices/data/' versions=['002','003','004','005','006','007','008','009','010','011','012','013','014','015','016','017','018','019','020','021','022','023','024','025','026','027','028','029','030'] leaf=[] lstfrz = [] for version in versions: years = 'b.e11.BRCP85C5CNBDRD.f09_g16.%s.cam.h.SI-x.2006-2080.nc' % version filename = directory + years values = Dataset(filename) lon = values.variables['lon'][189:240] lat = values.variables['lat'][:32] lstfrz_index = values.variables['lstfrz_index'][:,:32,189:240] leaf_index = values.variables['leaf_index'][:,:32,189:240] values.close() leaf.append(leaf_index) lstfrz.append(lstfrz_index) latmean = np.asarray(lstfrz) leafmean = np.asarray(leaf) print 'Done! 1' return leafmean, latmean, lstfrz, lat, lon
0ac033577d73c6567ebef10437a7e44e51bf5c79
3,656,153
import scipy def make_truncnorm_gen_with_bounds(mean, std, low_bound, hi_bound): """ low_bound and hi_bound are in the same units as mean and std """ assert hi_bound > low_bound clipped_mean = min(max(mean, low_bound), hi_bound) if clipped_mean == low_bound: low_sigma = -0.01 * std hi_sigma = (hi_bound - clipped_mean) / std elif clipped_mean == hi_bound: low_sigma = (low_bound - clipped_mean) / std hi_sigma = 0.01 * std else: low_sigma = (low_bound - clipped_mean) / std hi_sigma = (hi_bound - clipped_mean) / std return scipy.stats.truncnorm(low_sigma, hi_sigma, loc=clipped_mean, scale=std)
8e957d99141a56f804bebf931098fa147d066bb8
3,656,154
from invenio_app_ils.ill.api import BORROWING_REQUEST_PID_TYPE from invenio_app_ils.ill.proxies import current_ils_ill from invenio_app_ils.items.api import ITEM_PID_TYPE from invenio_app_ils.proxies import current_app_ils from invenio_app_ils.errors import UnknownItemPidTypeError def resolve_item_from_loan(item_pid): """Resolve the item referenced in loan based on its PID type.""" if item_pid["type"] == ITEM_PID_TYPE: rec_cls = current_app_ils.item_record_cls elif item_pid["type"] == BORROWING_REQUEST_PID_TYPE: rec_cls = current_ils_ill.borrowing_request_record_cls else: raise UnknownItemPidTypeError(pid_type=item_pid["type"]) return rec_cls.get_record_by_pid(item_pid["value"])
f58ea857a445f2e6e01f426656f87a2032ea8306
3,656,155
import os def lascolor(strPathInLAS, strPathOutLAS, strPathTif, strAdlSwitches = None): """ Function lascolor args: strPathInLAS = input LAS file strPathOutLAS = output LAS file strPathTif = Tif source of RGB values strAdlSwitches = optional additional switches Command Syntax: """ strSwitches = '' if strAdlSwitches: strSwitches = strSwitches + ' ' + strAdlSwitches lstCMD = [strPathLtInstall + os.sep + 'lascolor', '-i ' + strPathInLAS.strip(), '-o ' + strPathOutLAS, '-image ' + strPathTif, strSwitches] strCMD = ' '.join(lstCMD) return strCMD
59400923bd0148a3659f7988596b1b7b2d4a70b6
3,656,156
def delta(s1, s2): """ Find the difference in characters between s1 and s2. Complexity: O(n), n - length of s1 or s2 (they have the same length). Returns: dict, format {extra:[], missing:[]} extra: list, letters in s2 but not in s1 missing: list, letters in s1 but not in s2 """ letters = {} for c in s1: if c not in letters: letters[c] = 1 else: letters[c] += 1 extra = [] # letters which are in s2 but not in s1 for c in s2: if c not in letters: extra.append(c) else: letters[c] -=1 missing = [] # letters which are in s1 but not in s2 for (letter, count) in letters.iteritems(): if count > 0: missing.append(letter) return {'extra': extra, 'missing': missing}
e439b5a4cf634f5e53fbf845b5774342cedeb404
3,656,157
def mean_predictive_value(targets, preds, cm=None, w=None, adjusted=False): """ :purpose: Calculates the mean predictive value between a discrete target and pred array :params: targets, preds : discrete input arrays, both of shape (n,) cm : if you have previously calculated a confusion matrix, pass it here to save the computation. set as None, which makes the function calculate the confusion matrix w : weights at each index of true and pred. array of shape (n,) if no w is set, it is initialized as an array of ones such that it will have no impact on the output adjusted : bool. if true, adjust the output for chance (making 0 the worst and 1 the best score). defaults to false :returns: mean_predictive_value : float, the mean predictive value of the targets and preds array :example: >>> from fastdist import fastdist >>> import numpy as np >>> true = np.random.RandomState(seed=0).randint(2, size=10000) >>> pred = np.random.RandomState(seed=1).randint(2, size=10000) >>> fastdist.mean_predictive_value(true, pred) 0.49030739883826424 by saskra """ w = init_w(w, len(targets)) if cm is None: cm = confusion_matrix(targets, preds, w=w) n = cm.shape[0] diag, columns_sums = np.zeros(n), np.zeros(n) for i in range(n): diag[i] = cm[i][i] # sum of the diagonal = true results for j in range(n): columns_sums[j] += cm[i][j] # sums of the columns = predictions per class class_div = diag / columns_sums # fraction of true results among the predicted ones per class div_mean = 0 for i in range(n): div_mean += class_div[i] div_mean /= n # mean fraction of true results among the predicted ones if adjusted: div_mean -= 1 / n div_mean /= 1 - 1 / n return div_mean
9e7b7047d0dcf79509e544ca8bb0d621d1ce283d
3,656,158
def delta(phase,inc, ecc = 0, omega=0): """ Compute the distance center-to-center between planet and host star. ___ INPUT: phase: orbital phase in radian inc: inclination of the system in radian OPTIONAL INPUT: ecc: omega: // OUTPUT: distance center-to-center, double-float number. ___ """ phase = 2*np.pi*phase if ecc == 0 and omega == 0: delta = np.sqrt(1-(np.cos(phase)**2)*(np.sin(inc)**2)) else: delta = (1.-ecc**2.)/(1.-ecc*np.sin(phase-omega))* np.sqrt((1.-(np.cos(phase))**2.*(np.sin(inc))**2)) return delta
797d84618ade3e84b63a1a40e7728de77d5465ca
3,656,159
def theoritical_spectrum(peptide_sequence): """Returns the theoritical spectrum of a given amino acid sequence. INPUT : peptide_sequence: string. The peptide sequence to get its theoritical spectrum OUTPUT: .: List. The theoritical spectrum of the given peptide sequence. """ linear_kmers = [] cyclic_kmers = [] for i in range(len(peptide_sequence)): for j in range(i,len(peptide_sequence)): linear_kmers.append(peptide_sequence[i:j+1]) for i in range(2,len(peptide_sequence)): for j in range(i-1): cyclic_kmers.append(peptide_sequence[i:len(peptide_sequence)]+peptide_sequence[0:j+1]) kmers = linear_kmers+cyclic_kmers return sorted(list(map(get_molecular_weight,kmers)))
1808daed80b553fe3a5a2b38e178956e4a0d7de0
3,656,160
def is_amazon(source_code): """ Method checks whether a given book is a physical book or a ebook giveaway for a linked Amazon account. :param source_code: :return: """ for line in source_code: if "Your Amazon Account" in line: return True return False
31c50622b4bb97a05d8cabb94c58f6e0a8f58971
3,656,161
import os import sys import subprocess def transdecodeToPeptide(sample_name, output_dir, rerun_rules, sample_dir, mets_or_mags = "mets", transdecoder_orf_size = 100, nt_ext = ".fasta", pep_ext = ".faa", run_transdecoder = False): """ Use TransDecoder to convert input nucleotide metatranscriptomic sequences to peptide sequences. """ if (not run_transdecoder): return 0 print("Running TransDecoder for sample " + str(sample_name) + "...", flush = True) os.system("mkdir -p " + os.path.join(output_dir, mets_or_mags, "transdecoder")) if (os.path.isfile(os.path.join(output_dir, mets_or_mags, sample_name + pep_ext))) & (not rerun_rules): print("TransDecoder file already detected for sample " + str(sample_name) + "; will not re-run step.", flush = True) return 0 elif (os.path.isfile(os.path.join(sample_dir, sample_name + pep_ext))) & (not rerun_rules): print("Protein files detected for sample in sample directory; " + "will not TransDecode.", flush = True) os.system("cp " + os.path.join(sample_dir, sample_name + pep_ext) + " " + os.path.join(output_dir, mets_or_mags, sample_name + pep_ext)) return 0 TD_log = open(os.path.join(output_dir,"log","transdecoder_longorfs_" + sample_name + ".log"), "w+") TD_err = open(os.path.join(output_dir,"log","transdecoder_longorfs_" + sample_name + ".err"), "w+") if (not os.path.isfile(os.path.join(sample_dir, sample_name + nt_ext))): print("File: " + os.path.join(sample_dir, sample_name + nt_ext) + " was called by TransDecoder and " "does not exist. Check for typos.") sys.exit(1) rc1 = subprocess.Popen(["TransDecoder.LongOrfs", "-t", os.path.join(sample_dir, sample_name + nt_ext), "-m", str(transdecoder_orf_size)], stdout = TD_log, stderr = TD_err).wait() TD_log.close() TD_err.close() TD_log = open(os.path.join(output_dir,"log","transdecoder_predict_" + sample_name + ".log"), "w+") TD_err = open(os.path.join(output_dir,"log","transdecoder_predict_" + sample_name + ".err"), "w+") rc2 = subprocess.Popen(["TransDecoder.Predict", "-t", os.path.join(sample_dir, sample_name + nt_ext), "--no_refine_starts"], stdout = TD_log, stderr = TD_err).wait() #rc2 = p2.returncode TD_log.close() TD_err.close() if (rc1 + rc2) != 0: print("TransDecoder did not complete successfully for sample " + str(sample_name) + ". Check <output_dir>/log/ folder for details.") sys.exit(1) merged_name = sample_name + nt_ext os.system("mkdir -p " + os.path.join(output_dir, mets_or_mags)) os.system("mkdir -p " + os.path.join(output_dir, mets_or_mags, "transdecoder")) os.replace(merged_name + ".transdecoder.pep", os.path.join(output_dir, mets_or_mags, sample_name + pep_ext)) os.replace(merged_name + ".transdecoder.cds", os.path.join(output_dir, mets_or_mags, "transdecoder", sample_name + ".fasta.transdecoder.cds")) os.replace(merged_name + ".transdecoder.gff3", os.path.join(output_dir, mets_or_mags, "transdecoder", sample_name + ".fasta.transdecoder.gff3")) os.replace(merged_name + ".transdecoder.bed", os.path.join(output_dir, mets_or_mags, "transdecoder", sample_name + ".fasta.transdecoder.bed")) #shutil.rmtree os.system("rm -rf " + merged_name + "*.transdecoder_dir*") return rc1 + rc2
b22f520808104e4fc471c4af5a2288a5f23b84ae
3,656,162
def data_dim(p): """ Return the dimensionality of the dataset """ dataset_class = DATASETS[p.dataset] return dataset_class(p).get_in_dim()
25e32039733e8599c22d696f28bfffbf8b97cf02
3,656,163
import torch def create_supervised_evaluator(model, metrics=None, device=None, non_blocking=False, prepare_batch=_prepare_batch, output_transform= lambda x, y, y_pred: (y_pred, y,)): """ Factory function for creating an evaluator for supervised models. Args: model (`torch.nn.Module`): the model to train. metrics (dict of str - :class:`~ignite.metrics.Metric`): a map of metric names to Metrics. device (str, optional): device type specification (default: None). Applies to both model and batches. non_blocking (bool, optional): if True and this copy is between CPU and GPU, the copy may occur asynchronously with respect to the host. For other cases, this argument has no effect. prepare_batch (callable, optional): function that receives `batch`, `device`, `non_blocking` and outputs tuple of tensors `(batch_x, batch_y)`. output_transform (callable, optional): function that receives 'x', 'y', 'y_pred' and returns value to be assigned to engine's state.output after each iteration. Default is returning `(y_pred, y,)` which fits output expected by metrics. If you change it you should use `output_transform` in metrics. Note: `engine.state.output` for this engine is defind by `output_transform` parameter and is a tuple of `(batch_pred, batch_y)` by default. Returns: Engine: an evaluator engine with supervised inference function. """ metrics = metrics or {} if device: model.to(device) def _inference(engine, batch): model.eval() with torch.no_grad(): # z is optional (e.g. task ids) x, y, *z = prepare_batch(batch, device=device, non_blocking=non_blocking) y_pred = model(*(x, *z)) # if hasattr(model, 'arch_sampler'): # ent = model.arch_sampler.entropy().mean() return output_transform(x, y, y_pred) engine = Engine(_inference) for name, metric in metrics.items(): metric.attach(engine, name) return engine
2af4cc7b12a76c3c12940353a072d8b715fec8c1
3,656,164
import typing def historical_earning_calendar( apikey: str, symbol: str, limit: int = DEFAULT_LIMIT ) -> typing.Optional[typing.List[typing.Dict]]: """ Query FMP /historical/earning_calendar/ API. Note: Between the "from" and "to" parameters the maximum time interval can be 3 months. :param apikey: Your API key. :param symbol: Company ticker. :param limit: Number of rows to return. :return: A list of dictionaries. """ path = f"historical/earning_calendar/{symbol}" query_vars = { "apikey": apikey, "symbol": symbol, "limit": limit, } return __return_json_v3(path=path, query_vars=query_vars)
7f231b253ef4f462ab89826d58546a3259bdd3d2
3,656,165
def config_section_data(): """Produce the default configuration section for app.config, when called by `resilient-circuits config [-c|-u]` """ config_data = u"""[fn_query_tor_network] base_url = https://onionoo.torproject.org/details #The Flag can be 'Running','Exit' for more information on flag settings - https://metrics.torproject.org/onionoo.html flag = Exit # The data fields should be comma separated and no space should be given in between each fields data_fields = exit_addresses,or_addresses,host_name""" return config_data
239436c9b2141e17f6158aab20d7951d79359fcd
3,656,166
def show_object_id_by_date( move_data, create_features=True, kind=None, figsize=(21, 9), return_fig=True, save_fig=True, name='shot_points_by_date.png', ): """ Generates four visualizations based on datetime feature: - Bar chart trajectories by day periods - Bar chart trajectories day of the week - Line chart trajectory by date - Line chart of trajectory byhours of the day. Parameters ---------- move_data : pymove.core.MoveDataFrameAbstract subclass. Input trajectory data. create_features : bool, optional, default True. Represents whether or not to delete features created for viewing. kind: list or None Determines the kinds of each plot figsize : tuple, optional, default (21,9). Represents dimensions of figure. return_fig : bool, optional, default True. Represents whether or not to save the generated picture. save_fig : bool, optional, default True. Represents whether or not to save the generated picture. name : String, optional, default 'shot_points_by_date.png'. Represents name of a file. Returns ------- matplotlib.pyplot.figure or None The generated picture. References ---------- https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.pyplot.plot.html """ if kind is None: kind = ['bar', 'bar', 'line', 'line'] fig, ax = plt.subplots(2, 2, figsize=figsize) move_data.generate_date_features() move_data.generate_hour_features() move_data.generate_time_of_day_features() move_data.generate_day_of_the_week_features() move_data.groupby([PERIOD])[TRAJ_ID].nunique().plot( subplots=True, kind=kind[0], rot=0, ax=ax[0][0], fontsize=12 ) move_data.groupby([DAY])[TRAJ_ID].nunique().plot( subplots=True, kind=kind[1], ax=ax[0][1], rot=0, fontsize=12 ) move_data.groupby([DATE])[TRAJ_ID].nunique().plot( subplots=True, kind=kind[2], grid=True, ax=ax[1][0], rot=90, fontsize=12, ) move_data.groupby([HOUR])[TRAJ_ID].nunique().plot( subplots=True, kind=kind[3], grid=True, ax=ax[1][1], fontsize=12 ) if not create_features: move_data.drop(columns=[DATE, HOUR, PERIOD, DAY], inplace=True) if save_fig: plt.savefig(fname=name, fig=fig) if return_fig: return fig
18bbd54adfba6ecfd0959904d99698cfaac4b198
3,656,167
def raw_escape(pattern, unix=None, raw_chars=True): """Apply raw character transform before applying escape.""" return _wcparse.escape(util.norm_pattern(pattern, False, raw_chars, True), unix=unix, pathname=True, raw=True)
e4df84b21b737f199a7314818cc7f892f93be1b8
3,656,168
def interpolate_effective_area_per_energy_and_fov( effective_area, grid_points, target_point, min_effective_area=1. * u.Unit('m2'), method='linear', ): """ Takes a grid of effective areas for a bunch of different parameters and interpolates (log) effective areas to given value of those parameters Parameters ---------- effective_area: np.array of astropy.units.Quantity[area] grid of effective area, of shape (n_grid_points, n_fov_offset_bins, n_energy_bins) grid_points: np.array list of parameters corresponding to effective_area, of shape (n_grid_points, n_interp_dim) target_point: np.array values of parameters for which the interpolation is performed, of shape (n_interp_dim) min_effective_area: astropy.units.Quantity[area] Minimum value of effective area to be considered for interpolation method: 'linear’, ‘nearest’, ‘cubic’ Interpolation method Returns ------- aeff_interp: astropy.units.Quantity[area] Interpolated Effective area array with shape (n_energy_bins, n_fov_offset_bins) """ # get rid of units effective_area = effective_area.to_value(u.m**2) min_effective_area = min_effective_area.to_value(u.m**2) # remove zeros and log it effective_area[effective_area < min_effective_area] = min_effective_area effective_area = np.log(effective_area) # interpolation aeff_interp = griddata(grid_points, effective_area, target_point, method=method).T # exp it and set to zero too low values aeff_interp = np.exp(aeff_interp) aeff_interp[aeff_interp < min_effective_area * 1.1] = 0 # 1.1 to correct for numerical uncertainty and interpolation return u.Quantity(aeff_interp, u.m**2, copy=False)
58c32f49c96ed7ceb14e734f1386ef0015920204
3,656,169
def extract_edge(stats:np.ndarray, idxs_upper:np.ndarray, runner:int, max_index:int, maximum_offset:float, iso_charge_min:int = 1, iso_charge_max:int = 6, iso_mass_range:int=5)->list: """Extract edges. Args: stats (np.ndarray): Stats array that contains summary statistics of hills. idxs_upper (np.ndarray): Upper index for comparing. runner (int): Index. max_index (int): Unused. maximum_offset (float): Maximum offset when comparing edges. iso_charge_min (int, optional): Minimum isotope charge. Defaults to 1. iso_charge_max (int, optional): Maximum isotope charge. Defaults to 6. iso_mass_range (float, optional): Mass search range. Defaults to 5. Returns: list: List of edges. """ edges = [] mass1 = stats[runner, 0] delta_mass1 = stats[runner, 1] for j in range(runner+1, idxs_upper[runner]): mass2 = stats[j, 0] if np.abs(mass2 - mass1) <= maximum_offset: delta_mass2 = stats[j, 1] for charge in range(iso_charge_min, iso_charge_max + 1): if check_isotope_pattern(mass1, mass2, delta_mass1, delta_mass2, charge, iso_mass_range): edges.append((runner, j)) break return edges
8101a024c20d169f470d4e6632272e0ad00c484b
3,656,170
def _neq_attr(node, attr, gens, container): """ Calcs fitness based on the fact that node's target shall not have an attr with a certain value. """ trg_nd = container.nodes[gens[node]] if attr[0] in trg_nd and attr[1] == trg_nd[attr[0]]: return 10.1 return 0.0
adfa39aa60d0777b2b05f174a9cf61a847e55b1d
3,656,171
def getItem( user, list, itempk ): """ Get a single item from a list. :param user: user who owns list :param list: list containing item :param itempk: private key of item :return: item or None """ itemType = list.itemType item = None if itemType == 'Item': item = Item.objects.get( pk=itempk, list=list, user=user ) elif itemType == 'Link': item = Link.objects.get( pk=itempk, list=list, user=user ) elif itemType == 'Book': item = Book.objects.get( pk=itempk, list=list, user=user ) elif itemType == 'Show' or itemType == 'Movie': item = Video.objects.get( pk=itempk, list=list, user=user ) return item
f0d2c3a6d1881e0e1288aae451a556ebe856242e
3,656,172
def metric_divergence(neighborhood_vectors: np.ndarray, dL: float, polarity: int) -> float: """ Calculates the divergence of a sampling volume neighborhood. Note: For JIT to work, this must be declared at the top level. @param neighborhood_vectors: Sampling volume neighborhood vectors (six 3D vectors) @param dL: Length element @param polarity: Polarity filter (-1: Keep values <= 0; 0: Keep all values; +1: Keep values >= 0) """ dxp = neighborhood_vectors[0][0] dxn = neighborhood_vectors[3][0] dyp = neighborhood_vectors[1][1] dyn = neighborhood_vectors[4][1] dzp = neighborhood_vectors[2][2] dzn = neighborhood_vectors[5][2] value = (dxp - dxn + dyp - dyn + dzp - dzn) / 2 / dL if polarity == -1: if value > 0: return np.NaN else: return -value # Keep divergence positive, especially for use as alpha metric elif polarity == +1: if value < 0: return np.NaN else: return value else: return value
87dd2b19c654143ed54f3783059ece50eb32ec71
3,656,173
import argparse def parse_args(): """ Parse command line arguments. Parameters: None Returns: parser arguments """ parser = argparse.ArgumentParser(description='LeNet model') optional = parser._action_groups.pop() required = parser.add_argument_group('required arguments') required.add_argument('--dataset', dest='dataset', help='Choice of dataset to train model', choices=['mnist', 'cifar10'], default=None) optional.add_argument('--print_model', dest='print_model', help='Print LeNet model', action='store_true') optional.add_argument('--train_model', dest='train_model', help='Train LeNet on MNIST', action='store_true') optional.add_argument('-s', '--save_weights', dest='save_weights', help='Save the trained weights', default=None) optional.add_argument('-w', '--weights', dest='weights', help='Path to weights (hdf5) file', default=None) optional.add_argument('-e', '--epochs', dest='epochs', help='Number of epochs for training', type=int, default=20) optional.add_argument('--data_augmentation', dest='data_augmentation', help='Use data augmentations for input', action='store_true') optional.add_argument('--viz_training', dest='viz_training', help='Visualize the training curve', action='store_true') parser._action_groups.append(optional) return parser.parse_args()
6a93e1083ebc8fad5f0698b2e0a4eb125af2806f
3,656,174
def tag(request): """ Add/Remove tag to email """ if request.is_ajax(): mail = request.POST.get("mail") tag = request.POST.get("tag") op = request.POST.get("op") mail = get_object_or_404(Mail, pk=mail) if op == "ADD": mail.tags.add(tag) elif op == "REMOVE": mail.tags.remove(tag) else: raise Http404("404") return JsonResponse({"ok": True}) raise Http404("404")
b1f5c2e65393be1d68a03b01c522214413e5b321
3,656,175
def sid_to_smiles(sid): """Takes an SID and prints the associated SMILES string.""" substance = pc.Substance.from_sid(sid) cid = substance.standardized_cid compound = pc.get_compounds(cid)[0] return compound.isomeric_smiles
e243e201a8ac4e4ee63332454a8b8c64f0f43692
3,656,176
def view_static(request, **kwargs): """Outputs static page.""" template = kwargs.get('template', None) if not template: raise Http404 template = '.'.join([template, 'html']) title = kwargs.get('title', 'static page') img = kwargs.get('img', 'bgag.jpg') return render_to_response(template, { 'is_mobile': request.user_agent.is_mobile, 'page_title': title, 'menu': MenuItem.active().order_by('order_id'), 'page_img': img, })
b6997e86175688f9b1293b0888faeb337bb5f3b6
3,656,177
def start_call(called_ident, skicall): """When a call is initially received this function is called. Unless you want to divert to another page, this function should return called_ident which would typically be the ident of a Responder or Template page dealing with the call. If a ServeFile exception is raised, which contains a pathlib.Path object of a local server file then that server file will be sent to the client. In this case, the end_call function will not be called.""" # To serve a directory of static files, you can map a url to a server directory with the # skicall.map_url_to_server method, which returns pathlib.Path objects, and then # raise a ServeFile exception, which causes the file to be served. For example: # servedfile = skicall.map_url_to_server("images", "/home/user/thisproject/imagefiles") # if servedfile: # raise ServeFile(servedfile) # Of particular interest at this point are the attributes: # skicall.received_cookies is a dictionary of cookie name:values received from the client # skicall.call_data is a dictionary which you can set with your own data and, as skicall is # passed on to the submit_data and end_call functions defined below, can be used to pass # data to these functions. # Normally you would return called_ident, which is the page being called, or None to cause a # page not found error, or another ident (project, pagenumber) to divert the call to another page. return called_ident
0353d81273ea6638858bf18271f4480895ca1db1
3,656,178
def getmemory(): """ Returns the memory limit for data arrays (in MB). """ return NX_MEMORY
f6850ac2ad5854f9798ef480e9ca105bf31644ed
3,656,179
import this def get_object_syncing_state(): """ Get a dictionary mapping which object trackers are active. The dictionary contains name:bool pairs that can be fed back into the func:`set_object_syncing_state()` function. """ states = { "selection": bool(this._on_selection_changed_cb_id), "duplicate": bool(this._on_before_duplicate_cb_id), "name": bool(this._on_name_changed_cb_id), "existence": bool(this._on_object_deleted_cb_id), "relationship": bool(this._on_parent_changed_cb_id), } return states
c6fa40e7945b8186db06cc00b461fc2fe6a16c36
3,656,180
def determine_nohit_score(cons, invert): """ Determine the value in the matrix assigned to nohit given SeqFindr options :param cons: whether the Seqfindr run is using mapping consensus data or not :param invert: whether the Seqfindr run is inverting (missing hits to be shown as black bars. :type cons: None of boolean :type cons: boolean :returns: the value defined as no hit in the results matrix """ if cons is None: nohit = 0.5 else: nohit = 1.0 if invert: nohit = nohit*-1.0 return nohit
d0539b5ac4dda8b4a15c6800fb4a821cb305b319
3,656,181
def estimate_csd(lfp, coord_electrode, sigma, method='standard', diam=None, h=None, sigma_top=None, tol=1E-6, num_steps=200, f_type='identity', f_order=None): """ Estimates current source density (CSD) from local field potential (LFP) recordings from multiple depths of the cortex. Parameters ---------- lfp : neo.AnalogSignalArray LFP signals from which CSD is estimated. coord_electrode : Quantity array Depth of evenly spaced electrode contact points. sigma : Quantity float Conductivity of tissue. method : string CSD estimation method, either of 'standard': the standard double-derivative method, 'delta': delta-iCSD method, 'step': step-iCSD method, 'spline': spline-iCSD method. Default is 'standard' diam : Quantity float Diamater of the assumed circular planar current sources centered at each contact, required by iCSD methods (= 'delta', 'step', 'spline'). Default is `None`. h : float or np.ndarray * quantity.Quantity assumed thickness of the source cylinders at all or each contact sigma_top : Quantity float Conductivity on top of tissue. When set to `None`, the same value as sigma: is used. Default is `None`. tol : float Tolerance of numerical integration, required by step- and spline-iCSD methods. Default is 1E-6. num_steps : int Number of data points for the spatially upsampled LFP/CSD data, required by spline-iCSD method. Default is 200. f_type : string Type of spatial filter used for smoothing of the result, either of 'boxcar' (uses `scipy.signal.baxcar()`), 'hamming' ( `scipy.signal.hamming()`), 'triangular' (`scipy.signal.tri()`), 'gaussian' (`scipy.signal.gaussian`), 'identity' (no smoothing is applied). Default is 'identity'. f_order : float tuple Parameters to be passed to the scipy.signal function associated with the specified filter type. Returns ------- tuple : (csd, csd_filtered) csd : neo.AnalogSignalArray Estimated CSD csd_filtered : neo.AnalogSignalArray Estimated CSD, spatially filtered Example ------- import numpy as np import matplotlib.pyplot as plt from scipy import io import quantities as pq import neo import icsd #loading test data test_data = io.loadmat('test_data.mat') #prepare lfp data for use, by changing the units to SI and append #quantities, along with electrode geometry and conductivities lfp_data = test_data['pot1'] * 1E-3 * pq.V # [mV] -> [V] z_data = np.linspace(100E-6, 2300E-6, 23) * pq.m # [m] diam = 500E-6 * pq.m # [m] sigma = 0.3 * pq.S / pq.m # [S/m] or [1/(ohm*m)] sigma_top = 0. * pq.S / pq.m # [S/m] or [1/(ohm*m)] lfp = neo.AnalogSignalArray(lfp_data.T, sampling_rate=2.0*pq.kHz) # Input dictionaries for each method params = {} params['delta'] = { 'method': 'delta', 'lfp' : lfp, 'coord_electrode' : z_data, 'diam' : diam, # source diameter 'sigma' : sigma, # extracellular conductivity 'sigma_top' : sigma, # conductivity on top of cortex } params['step'] = { 'method': 'step', 'lfp' : lfp, 'coord_electrode' : z_data, 'diam' : diam, 'sigma' : sigma, 'sigma_top' : sigma, 'tol' : 1E-12, # Tolerance in numerical integration } params['spline'] = { 'method': 'spline', 'lfp' : lfp, 'coord_electrode' : z_data, 'diam' : diam, 'sigma' : sigma, 'sigma_top' : sigma, 'num_steps' : 201, # Spatial CSD upsampling to N steps 'tol' : 1E-12, } params['standard'] = { 'method': 'standard', 'lfp' : lfp, 'coord_electrode' : z_data, 'sigma' : sigma, } #plot LFP signal fig, axes = plt.subplots(len(params)+1, 1, figsize=(6, 8)) ax = axes[0] im = ax.imshow(lfp.magnitude.T, origin='upper', vmin=-abs(lfp).max(), vmax=abs(lfp).max(), cmap='jet_r', interpolation='nearest') ax.axis(ax.axis('tight')) cb = plt.colorbar(im, ax=ax) cb.set_label('LFP (%s)' % lfp_data.dimensionality.string) ax.set_xticklabels([]) ax.set_title('LFP') ax.set_ylabel('ch #') i_ax = 1 for method, param in params.items(): ax = axes[i_ax] i_ax += 1 csd = icsd.estimate_csd(**param) im = ax.imshow(csd.magnitude.T, origin='upper', vmin=-abs(csd).max(), vmax=abs(csd).max(), cmap='jet_r', interpolation='nearest') ax.axis(ax.axis('tight')) ax.set_title(method) cb = plt.colorbar(im, ax=ax) cb.set_label('CSD (%s)' % csd.dimensionality.string) ax.set_xticklabels([]) ax.set_ylabel('ch #') plt.show() """ supported_methods = ('standard', 'delta', 'step', 'spline') icsd_methods = ('delta', 'step', 'spline') if method not in supported_methods: print("Pamareter `method` must be either of {}".format( ", ".join(supported_methods))) raise ValueError elif method in icsd_methods and diam is None: print("Parameter `diam` must be specified for iCSD methods: {}".format( ", ".join(icsd_methods))) raise ValueError if not isinstance(lfp, neo.AnalogSignalArray): print('Parameter `lfp` must be neo.AnalogSignalArray') raise TypeError if f_type is not 'identity' and f_order is None: print("The order of {} filter must be specified".format(f_type)) raise ValueError lfp_pqarr = lfp.magnitude.T * lfp.units if sigma_top is None: sigma_top = sigma arg_dict = {'lfp': lfp_pqarr, 'coord_electrode': coord_electrode, 'sigma': sigma, 'f_type': f_type, 'f_order': f_order, } if method == 'standard': csd_estimator = StandardCSD(**arg_dict) else: arg_dict['diam'] = diam arg_dict['sigma_top'] = sigma_top if method == 'delta': csd_estimator = DeltaiCSD(**arg_dict) else: arg_dict['tol'] = tol if method == 'step': arg_dict['h'] = h csd_estimator = StepiCSD(**arg_dict) else: arg_dict['num_steps'] = num_steps csd_estimator = SplineiCSD(**arg_dict) csd_pqarr = csd_estimator.get_csd() csd_pqarr_filtered = csd_estimator.filter_csd(csd_pqarr) csd = neo.AnalogSignalArray(csd_pqarr.T, t_start=lfp.t_start, sampling_rate=lfp.sampling_rate) csd_filtered = neo.AnalogSignalArray(csd_pqarr_filtered.T, t_start=lfp.t_start, sampling_rate=lfp.sampling_rate) return csd, csd_filtered
eefe158dc93d9d2be23f7754b658c1f812cd8524
3,656,182
def library_get_monomer_desc(res_name): """Loads/caches/returns the monomer description objec MonomerDesc for the given monomer residue name. """ assert isinstance(res_name, str) try: return MONOMER_RES_NAME_CACHE[res_name] except KeyError: pass mon_desc = library_construct_monomer_desc(res_name) if mon_desc is None: return None MONOMER_RES_NAME_CACHE[res_name] = mon_desc return mon_desc
98b4790995bd1d2eba96775e99826fae7b7cfc8a
3,656,183
def parse_single_sequence_example( serialized, context_features=None, sequence_features=None, example_name=None, name=None): # pylint: disable=line-too-long """Parses a single `SequenceExample` proto. Parses a single serialized [`SequenceExample`](https://www.tensorflow.org/code/tensorflow/core/example/example.proto) proto given in `serialized`. This op parses a serialize sequence example into a tuple of dictionaries mapping keys to `Tensor` and `SparseTensor` objects respectively. The first dictionary contains mappings for keys appearing in `context_features`, and the second dictionary contains mappings for keys appearing in `sequence_features`. At least one of `context_features` and `sequence_features` must be provided and non-empty. The `context_features` keys are associated with a `SequenceExample` as a whole, independent of time / frame. In contrast, the `sequence_features` keys provide a way to access variable-length data within the `FeatureList` section of the `SequenceExample` proto. While the shapes of `context_features` values are fixed with respect to frame, the frame dimension (the first dimension) of `sequence_features` values may vary between `SequenceExample` protos, and even between `feature_list` keys within the same `SequenceExample`. `context_features` contains `VarLenFeature` and `FixedLenFeature` objects. Each `VarLenFeature` is mapped to a `SparseTensor`, and each `FixedLenFeature` is mapped to a `Tensor`, of the specified type, shape, and default value. `sequence_features` contains `VarLenFeature` and `FixedLenSequenceFeature` objects. Each `VarLenFeature` is mapped to a `SparseTensor`, and each `FixedLenSequenceFeature` is mapped to a `Tensor`, each of the specified type. The shape will be `(T,) + df.dense_shape` for `FixedLenSequenceFeature` `df`, where `T` is the length of the associated `FeatureList` in the `SequenceExample`. For instance, `FixedLenSequenceFeature([])` yields a scalar 1-D `Tensor` of static shape `[None]` and dynamic shape `[T]`, while `FixedLenSequenceFeature([k])` (for `int k >= 1`) yields a 2-D matrix `Tensor` of static shape `[None, k]` and dynamic shape `[T, k]`. Each `SparseTensor` corresponding to `sequence_features` represents a ragged vector. Its indices are `[time, index]`, where `time` is the `FeatureList` entry and `index` is the value's index in the list of values associated with that time. `FixedLenFeature` entries with a `default_value` and `FixedLenSequenceFeature` entries with `allow_missing=True` are optional; otherwise, we will fail if that `Feature` or `FeatureList` is missing from any example in `serialized`. `example_name` may contain a descriptive name for the corresponding serialized proto. This may be useful for debugging purposes, but it has no effect on the output. If not `None`, `example_name` must be a scalar. Args: serialized: A scalar (0-D Tensor) of type string, a single binary serialized `SequenceExample` proto. context_features: A `dict` mapping feature keys to `FixedLenFeature` or `VarLenFeature` values. These features are associated with a `SequenceExample` as a whole. sequence_features: A `dict` mapping feature keys to `FixedLenSequenceFeature` or `VarLenFeature` values. These features are associated with data within the `FeatureList` section of the `SequenceExample` proto. example_name: A scalar (0-D Tensor) of strings (optional), the name of the serialized proto. name: A name for this operation (optional). Returns: A tuple of two `dict`s, each mapping keys to `Tensor`s and `SparseTensor`s. The first dict contains the context key/values. The second dict contains the feature_list key/values. Raises: ValueError: if any feature is invalid. """ # pylint: enable=line-too-long if not (context_features or sequence_features): raise ValueError("Missing features.") (context_sparse_keys, context_sparse_types, context_dense_keys, context_dense_types, context_dense_defaults, context_dense_shapes) = _features_to_raw_params( context_features, [VarLenFeature, FixedLenFeature]) (feature_list_sparse_keys, feature_list_sparse_types, feature_list_dense_keys, feature_list_dense_types, feature_list_dense_defaults, feature_list_dense_shapes) = _features_to_raw_params( sequence_features, [VarLenFeature, FixedLenSequenceFeature]) return _parse_single_sequence_example_raw( serialized, context_sparse_keys, context_sparse_types, context_dense_keys, context_dense_types, context_dense_defaults, context_dense_shapes, feature_list_sparse_keys, feature_list_sparse_types, feature_list_dense_keys, feature_list_dense_types, feature_list_dense_shapes, feature_list_dense_defaults, example_name, name)
89309aab313b89224a87cb3cf7f4d56356981885
3,656,184
import scipy import os def getBase64PNGImage(pD, cmapstr, logfloor_quantile=0): """ Get an image as a base64 string """ D = np.array(pD) if logfloor_quantile > 0: floor = np.quantile(pD.flatten(), logfloor_quantile) D = np.log(D + floor) c = plt.get_cmap(cmapstr) D = D-np.min(D) D = np.round(255.0*D/np.max(D)) C = c(np.array(D, dtype=np.int32)) scipy.misc.imsave("temp.png", C) b = getBase64File("temp.png") os.remove("temp.png") return "data:image/png;base64, " + b
9fa7a41624d14943e48221e9277320324f575d33
3,656,185
import sys def method_dispatcher(*args, **kwargs): """Try to dispatch to the right HTTP method handler. If an HTTP method isn't on the approved list, defer to the error handler. Otherwise, the HTTP Method is processed by the appropriate handler. :param args: Expect arguments in format (http_method, url). :param kwargs: Optional arguments like HTTP headers, cookies and etc. :returns: Result from the handler. :rtype: func """ http_method, url = args if http_method not in constants.HTTP_METHOD_NAMES: raise HTTPMethodNotSupportedError(http_method) handler = getattr(sys.modules[__name__], http_method) return handler(*args, **kwargs)
7c680aae25de4b158db0befa0963923ddb903f8a
3,656,186
def _seqfix(ref_seq, seq, comp_len, rev): """ Fill or trim a portion of the beginning of a sequence relative to a reference sequence Args: ref_seq (str): reference sequence e.g. germline gene seq (str): sequence to compare to reference comp_len (int): length of subsequence to compare e.g. necessary to exclude the CDR3 rev (bool): whether to reverse the sequences for J gene filling / trimming Returns: seq_fixed (str): sequence filled / trimmed as necessary """ if rev: ref_comp = ref_seq[::-1][:comp_len] seq_comp = seq[::-1][:comp_len] else: ref_comp = ref_seq[:comp_len] seq_comp = seq[:comp_len] ref_aligned, seq_aligned = global_pw_align(ref_comp, seq_comp) # replace N's in seq if present seq_aligned = _replace_Ns_with_ref(ref_aligned, seq_aligned) if ref_aligned.startswith('-'): # need to trim sequence fixed = _trim_extra_nt(ref_aligned, seq_aligned) elif seq_aligned.startswith('-'): # need to fill sequence fixed = _fill_missing_nt(ref_aligned, seq_aligned) else: fixed = seq_aligned if rev: seq_fixed = seq[:-comp_len] + fixed[::-1] else: seq_fixed = fixed + seq[comp_len:] return seq_fixed.replace('-', '')
222ba3a8e2c4bced8ebcde6662890c10a0b41cf8
3,656,187
import torch from typing import Tuple def get_dedup_tokens(logits_batch: torch.Tensor) \ -> Tuple[torch.Tensor, torch.Tensor]: """Converts a batch of logits into the batch most probable tokens and their probabilities. Args: logits_batch (Tensor): Batch of logits (N x T x V). Returns: Tuple: Deduplicated tokens. The first element is a tensor (token indices) and the second element is a tensor (token probabilities) """ logits_batch = logits_batch.softmax(-1) out_tokens, out_probs = [], [] for i in range(logits_batch.size(0)): logits = logits_batch[i] max_logits, max_indices = torch.max(logits, dim=-1) max_logits = max_logits[max_indices!=0] max_indices = max_indices[max_indices!=0] cons_tokens, counts = torch.unique_consecutive( max_indices, return_counts=True) out_probs_i = torch.zeros(len(counts), device=logits.device) ind = 0 for i, c in enumerate(counts): max_logit = max_logits[ind:ind + c].max() out_probs_i[i] = max_logit ind = ind + c out_tokens.append(cons_tokens) out_probs.append(out_probs_i) out_tokens = pad_sequence(out_tokens, batch_first=True, padding_value=0.).long() out_probs = pad_sequence(out_probs, batch_first=True, padding_value=0.) return out_tokens, out_probs
885048842e6d1b50cd5b98c5b455aeb71e49c191
3,656,188
def com(im): """ Compute the center of mass of im. Expects that im is leveled (ie zero-centered). Ie, a pure noise image should have zero mean. Sometimes this is improved if you square the im first com(im**2) Returns: y, x in array form. """ im = np.nan_to_num(im) mass = np.sum(im) ry = ( np.arange(im.shape[0]) + 0.5 ) # 0.5 because we want the mass of a pixel at the center of the pixel rx = np.arange(im.shape[1]) + 0.5 y = np.sum(ry * np.sum(im, axis=1)) x = np.sum(rx * np.sum(im, axis=0)) return utils.np_safe_divide(np.array([y, x]), mass)
5e1a7c20075df3fe5804213e5fdddd4f46d276c6
3,656,189
def get_fitting_custom_pipeline(): """ Pipeline looking like this lagged -> custom -> ridge """ lagged_node = PrimaryNode('lagged') lagged_node.custom_params = {'window_size': 50} # For custom model params as initial approximation and model as function is necessary custom_node = SecondaryNode('custom', nodes_from=[lagged_node]) custom_node.custom_params = {'alpha': 5, 'model_predict': custom_ml_model_imitation_predict, 'model_fit': custom_ml_model_imitation_fit} node_final = SecondaryNode('lasso', nodes_from=[custom_node]) pipeline = Pipeline(node_final) return pipeline
3ed78dc2f83110b0ac7dd4622a76511d0316404f
3,656,190
def get_regularizable_variables(scope): """ Get *all* regularizable variables in the scope. :param scope: scope to filter variables by :return: """ return tf.get_collection(REGULARIZABLE_VARS, scope)
67a6673be12af47128a453e413778f18f4344eaa
3,656,191
import glob def load(midi_path: str, config: dict): """ returns a 3-tuple of `tf.Dataset` each returning `(input_seq, target_seq)`, representing train, validation, and test portions of the overall dataset. `input_seq` represents the `inp_split` portion of each midi sequence in `midi_path`. """ batch_size = config.get('batch_size', None) # get midi files filenames = tf.random.shuffle(glob.glob(f'{midi_path}/**/*.midi', recursive=True)) # get train, validation, and test sizes train_split, test_split = config.get('train_size', None), config.get('test_size', None) train_split = int(train_split * len(filenames)) test_split = int(test_split * len(filenames)) val_split = len(filenames) - train_split + test_split # split filenames to train, test, split midi_ds, midi_tokenizer = _create_dataset( filenames=filenames, inp_len=config.get('inp_len', None), tar_len=config.get('tar_len', None), velocity_bins=config.get('velocity_bins', None), rest_resolution=config.get('rest_resolution', None)) train_ds = midi_ds.take(train_split) val_ds = train_ds.skip(train_split) test_ds = val_ds.skip(val_split) val_ds = val_ds.take(val_split) return (_optimize_dataset(train_ds.padded_batch(batch_size)), _optimize_dataset(val_ds.padded_batch(batch_size)), _optimize_dataset(test_ds.padded_batch(batch_size)), midi_tokenizer)
38d890a78cf85ce43cbdb783246ef1a5e7e2cd06
3,656,192
import re def _parse_docstring(doc): """Extract documentation from a function's docstring.""" if doc is None: return _Doc('', '', {}, []) # Convert Google- or Numpy-style docstrings to RST. # (Should do nothing if not in either style.) # use_ivar avoids generating an unhandled .. attribute:: directive for # Attribute blocks, preferring a benign :ivar: field. cfg = Config(napoleon_use_ivar=True) doc = str(GoogleDocstring(doc, cfg)) doc = str(NumpyDocstring(doc, cfg)) with _sphinx_common_roles(): tree = docutils.core.publish_doctree( # Disable syntax highlighting, as 1) pygments is not a dependency # 2) we don't render with colors and 3) SH breaks the assumption # that literal blocks contain a single text element. doc, settings_overrides={'syntax_highlight': 'none'}) class Visitor(NodeVisitor): optional = [ 'document', 'docinfo', 'field_list', 'field_body', 'literal', 'problematic', # Introduced by our custom passthrough handlers, but the Visitor # will recurse into the inner text node by itself. 'TextElement', ] def __init__(self, document): super().__init__(document) self.paragraphs = [] self.start_lines = [] self.params = defaultdict(dict) self.raises = [] self._current_paragraph = None self._indent_iterator_stack = [] self._indent_stack = [] def _do_nothing(self, node): pass def visit_paragraph(self, node): self.start_lines.append(node.line) self._current_paragraph = [] def depart_paragraph(self, node): text = ''.join(self._current_paragraph) text = ''.join(self._indent_stack) + text self._indent_stack = [ ' ' * len(item) for item in self._indent_stack] text = text.replace('\n', '\n' + ''.join(self._indent_stack)) self.paragraphs.append(text) self._current_paragraph = None visit_block_quote = visit_doctest_block = visit_paragraph depart_block_quote = depart_doctest_block = depart_paragraph def visit_Text(self, node): self._current_paragraph.append(node) depart_Text = _do_nothing def visit_emphasis(self, node): self._current_paragraph.append('\033[3m') # *foo*: italic def visit_strong(self, node): self._current_paragraph.append('\033[1m') # **foo**: bold def visit_title_reference(self, node): self._current_paragraph.append('\033[4m') # `foo`: underlined def _depart_markup(self, node): self._current_paragraph.append('\033[0m') depart_emphasis = depart_strong = depart_title_reference = \ _depart_markup def visit_rubric(self, node): self.visit_paragraph(node) def depart_rubric(self, node): # Style consistent with "usage:", "positional arguments:", etc. self._current_paragraph[:] = [ (t.lower() if t == t.title() else t) + ':' for t in self._current_paragraph] self.depart_paragraph(node) def visit_literal_block(self, node): text, = node self.start_lines.append(node.line) self.paragraphs.append( re.sub('^|\n', r'\g<0> ', text)) # indent raise SkipNode def visit_bullet_list(self, node): self._indent_iterator_stack.append( (node['bullet'] + ' ' for _ in range(len(node)))) def depart_bullet_list(self, node): self._indent_iterator_stack.pop() def visit_enumerated_list(self, node): enumtype = node['enumtype'] fmt = {('(', ')'): 'parens', ('', ')'): 'rparen', ('', '.'): 'period'}[node['prefix'], node['suffix']] start = node.get('start', 1) enumerators = [Body(None).make_enumerator(i, enumtype, fmt)[0] for i in range(start, start + len(node))] width = max(map(len, enumerators)) enumerators = [enum.ljust(width) for enum in enumerators] self._indent_iterator_stack.append(iter(enumerators)) def depart_enumerated_list(self, node): self._indent_iterator_stack.pop() def visit_list_item(self, node): self._indent_stack.append(next(self._indent_iterator_stack[-1])) def depart_list_item(self, node): self._indent_stack.pop() def visit_field(self, node): field_name_node, field_body_node = node field_name, = field_name_node parts = field_name.split() if len(parts) == 2: doctype, name = parts # docutils>=0.16 represents \* as \0* in the doctree. name = name.lstrip('*\0') elif len(parts) == 3: doctype, type_, name = parts name = name.lstrip('*\0') if doctype not in _PARAM_TYPES: raise SkipNode if 'type' in self.params[name]: raise ValueError('type defined twice for {}'.format(name)) self.params[name]['type'] = type_ else: raise SkipNode if doctype in _PARAM_TYPES: doctype = 'param' if doctype in _TYPE_NAMES: doctype = 'type' if doctype in ['param', 'type'] and doctype in self.params[name]: raise ValueError( '{} defined twice for {}'.format(doctype, name)) visitor = Visitor(self.document) field_body_node.walkabout(visitor) if doctype in ['param', 'type']: self.params[name][doctype] = ''.join(visitor.paragraphs) elif doctype in ['raises']: self.raises.append(name) raise SkipNode def visit_comment(self, node): self.paragraphs.append(comment_token) # Comments report their line as the *end* line of the comment. self.start_lines.append( node.line - node.children[0].count('\n') - 1) raise SkipNode def visit_system_message(self, node): raise SkipNode comment_token = object() visitor = Visitor(tree) tree.walkabout(visitor) tuples = {name: _Param(values.get('param'), values.get('type')) for name, values in visitor.params.items()} if visitor.paragraphs: text = [] for start, paragraph, next_start in zip( visitor.start_lines, visitor.paragraphs, visitor.start_lines[1:] + [0]): if paragraph is comment_token: continue text.append(paragraph) # Insert two newlines to separate paragraphs by a blank line. # Actually, paragraphs may or may not already have a trailing # newline (e.g. text paragraphs do but literal blocks don't) but # argparse will strip extra newlines anyways. This means that # extra blank lines in the original docstring will be stripped, but # this is less ugly than having a large number of extra blank lines # arising e.g. from skipped info fields (which are not rendered). # This means that list items are always separated by blank lines, # which is an acceptable tradeoff for now. text.append('\n\n') parsed = _Doc(text[0], ''.join(text), tuples, visitor.raises) else: parsed = _Doc('', '', tuples, visitor.raises) return parsed
ff4e3ce300748c32c2e65129c381f1e74912f4a1
3,656,193
def extract_remove_outward_edges_filter(exceptions_from_removal): """ This creates a closure that goes through the list of tuples to explicitly state which edges are leaving from the first argument of each tuple. Each tuple that is passed in has two members. The first member is a string representing a single node from which the children will be explicitly stated. The second member is the list of nodes that are in its child set. If the This covers both barren_nodes and explicit_parent_offspring. """ def remove_outward_edges_filter(G): graph = G.copy() list_of_parents = [x[0] for x in exceptions_from_removal if len(x[1]) > 0] list_of_barrens = [x[0] for x in exceptions_from_removal if len(x[1]) == 0] for barren in list_of_barrens: graph.remove_edges_from([edge for edge in graph.edges() if edge[0] == barren]) for parent in list_of_parents: current_edges = graph.out_edges(parent) valid_edges = [(x[0],y) for x in exceptions_from_removal if x[0] == parent for y in x[1]] graph.remove_edges_from([edge for edge in current_edges if edge not in valid_edges]) return graph return remove_outward_edges_filter
543e5823b8375cbdec200988ea5dd0c4f2d23d05
3,656,194
import torch def ln_addTH(x : torch.Tensor, beta : torch.Tensor) -> torch.Tensor: """ out = x + beta[None, :, None] """ return x + beta[None, :, None]
77e556c41a33a8c941826604b4b595ea7d456f9a
3,656,195
def drude2(tags, e, p): """dielectric function according to Drude theory for fitting""" return drude(e, p[0], p[1], p[2], p[3])
8032c61df099f6c1ac671f2b81c3bb93d1f81317
3,656,196
def ParseFile(path): """Parse function names and comments from a .h path. Returns mapping from function name to comment. """ result = {} with open(path, 'r') as fp: lines = fp.readlines() i = 0 n = len(lines) while i < n: line = lines[i] m = MCRE.match(line) if m and not m.group('rest') and not m.group('params'): # Looks like a function definition. Consume all adjacent following # comment lines. name = m.group('name') tmpl = m.group('tmpl') params = m.group('params') if tmpl is not None: name += '<%s>' % tmpl if params is not None: name += '(%s)' % params # print '%3d: %s' % (i+1, m.groupdict()) comments = [] i += 1 while i < n: m = CRE.match(lines[i]) if not m: break comments.append(m.group('line')) i += 1 result[name] = comments else: i += 1 return result
6319137de084aaf366b28e76af52cc1911298d8b
3,656,197
from typing import Dict def get_records(data: Dict[_Expr, Dict], column_order): """Output data as a list of records""" def cell_callback(expr, i, val, spreadsheet_data): spreadsheet_data[-1].append(val) return spreadsheet_data def row_callback(spreadsheet_data): spreadsheet_data[-1] = tuple(spreadsheet_data[-1]) spreadsheet_data.append([]) return spreadsheet_data out = [[]] out = print_analyses_v2(data, column_order, cell_callback, row_callback, out) return out[:-1]
8a8eb0e69c9dabe6dfc59c9b5637fdf4ee2d2dd1
3,656,198
import torch def support_mask_to_label(support_masks, n_way, k_shot, num_points): """ Args: support_masks: binary (foreground/background) masks with shape (n_way, k_shot, num_points) """ support_masks = support_masks.view(n_way, k_shot*num_points) support_labels = [] for n in range(support_masks.shape[0]): support_mask = support_masks[n, :] #(k_shot*num_points) support_label = torch.zeros_like(support_mask) mask_index = torch.nonzero(support_mask).squeeze(1) support_label= support_label.scatter_(0, mask_index, n+1) support_labels.append(support_label) support_labels = torch.stack(support_labels, dim=0) support_labels = support_labels.view(n_way, k_shot, num_points) return support_labels.long()
e6d73dc93e1e0b54d805d9c8b69785168dd2621e
3,656,199