content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def conv_res_step(x, hparams, padding, mask): """One step of convolutions and mid-residual.""" k = (hparams.kernel_height, hparams.kernel_width) k2 = (hparams.large_kernel_size, 1) dilations_and_kernels1 = [((1, 1), k), ((1, 1), k)] dilations_and_kernels2 = [((1, 1), k2), ((4, 4), k2)] with tf.variable_scope("conv_res_step"): y = common_layers.subseparable_conv_block( x, hparams.filter_size, dilations_and_kernels1, padding=padding, mask=mask, separabilities=0, name="residual1") y = tf.nn.dropout(y, 1.0 - hparams.dropout) return common_layers.subseparable_conv_block( y, hparams.hidden_size, dilations_and_kernels2, padding=padding, mask=mask, separabilities=0, name="residual2")
e0d2728f4991112a0dbd504121048f8670a4406b
3,652,889
import six from typing import Any def _get_kind_name(item): """Returns the kind name in CollectionDef. Args: item: A data item. Returns: The string representation of the kind in CollectionDef. """ if isinstance(item, (six.string_types, six.binary_type)): kind = "bytes_list" elif isinstance(item, six.integer_types): kind = "int64_list" elif isinstance(item, float): kind = "float_list" elif isinstance(item, Any): kind = "any_list" else: kind = "node_list" return kind
094298763f9bf1e3e7a421c19e08016f2138b7d7
3,652,890
def Froude_number(v, h, g=9.80665): """ Calculate the Froude Number of the river, channel or duct flow, to check subcritical flow assumption (if Fr <1). Parameters ------------ v : int/float Average velocity [m/s]. h : int/float Mean hydrolic depth float [m]. g : int/float Gravitational acceleration [m/s2]. Returns --------- Fr : float Froude Number of the river [unitless]. """ assert isinstance(v, (int,float)), 'v must be of type int or float' assert isinstance(h, (int,float)), 'h must be of type int or float' assert isinstance(g, (int,float)), 'g must be of type int or float' Fr = v / np.sqrt( g * h ) return Fr
754225397baa6a27ae58adc63f09bba5287f18e9
3,652,891
from typing import Callable from typing import Any def handle_error( func: Callable[[Command | list[Command]], Any] ) -> Callable[[str], Any]: """Handle tradfri api call error.""" @wraps(func) async def wrapper(command: Command | list[Command]) -> None: """Decorate api call.""" try: await func(command) except RequestError as err: _LOGGER.error("Unable to execute command %s: %s", command, err) return wrapper
1604f8ae224a9fb565f81ae70d74c24e68e60b9e
3,652,892
def write(ser, command, log): """Write command to serial port, append what you write to log.""" ser.write(command) summary = " I wrote: " + repr(command) log += summary + "\n" print summary return log
769e345d90121d4bf2d8cc23c128c2a588cba37c
3,652,893
def anscombe(x): """Compute Anscombe transform.""" return 2 * np.sqrt(x + 3 / 8)
9a47318733568892c4695db2cf153e59e78bb8d7
3,652,894
def max_accuracy(c1, c2): """ Relabel the predicted labels *in order* to achieve the best accuracy, and return that score and the best labelling Parameters ---------- c1 : np.array numpy array with label of predicted cluster c2 : np.array numpy array with label of true cluster """ c1 = c1.astype(str) c2 = c2.astype(str) match_satimage = pd.DataFrame({"Guess": c1, "True": c2}) match_satimage['match'] = match_satimage['Guess'] + '_t' + match_satimage['True'] comparison = pd.DataFrame(match_satimage['match']) A = comparison.value_counts() sum = 0 clusters = [] c1new = np.copy(c1).astype(int) j = 0 for i in range(len(A)): C_str = A[[i]].index.values[0][0] #print(C_str) CTL = C_str.split('_') if CTL[0] in clusters or CTL[1] in clusters or CTL[0] == '-1': pass else: c1new[c1 == CTL[0]] = CTL[1][1:] clusters.append(CTL[0]) clusters.append(CTL[1]) sum = sum + int(A[[i]]) #print(clusters) #print(sum) j = j + 1 accuracy = sum/len(c1) return accuracy, c1new.astype(int)
7ec438b500463859c27ea94d315312b88f5954f1
3,652,895
def create_sphere(): """Create and return a single sphere of radius 5.""" sphere = rt.sphere() sphere.radius = 5 return sphere
a8d5e2e8c0ec7d00f75c4007214d21aa0d2b64ad
3,652,896
import time def get_input(prompt=None): """Sets the prompt and waits for input. :type prompt: None | list[Text] | str """ if not isinstance(prompt, type(None)): if type(prompt) == str: text_list = [Text(prompt, color=prompt_color, new_line=True)] elif type(prompt) == list: text_list = prompt else: raise Exception("Must be None, str, or list[Text]") update_textbox("events", text_list) _user_input = check_input() while isinstance(_user_input, type(None)): time.sleep(.1) if not is_running(): return None _user_input = check_input() return _user_input
bbcd5bbd7f97bff8d213d13afe22ae9111849e10
3,652,898
def alpha_liq(Nu, lyambda_feed, d_inner): """ Calculates the coefficent of heat transfer(alpha) from liquid to wall of pipe. Parameters ---------- Nu : float The Nusselt criterion, [dimensionless] lyambda_feed : float The thermal conductivity of feed, [W / (m * degreec celcium)] d_inner : float The diametr of inner pipe, [m] Returns ------- alpha_liq : float The coefficent of heat transfer(alpha), [W / (m**2 * degrees celcium)] References ---------- Романков, формула 4.11, стр.150 """ return Nu * lyambda_feed / d_inner
13d0371248c106fb0f12d26335381675d7484000
3,652,899
def get_dataset(opts): """ Dataset And Augmentation """ if opts.dataset == 'camvids': mean, std = camvids.get_norm() train_transform = train_et.ExtCompose([ # et.ExtResize(size=opts.crop_size), train_et.ExtRandomScale((0.5, 2.0)), train_et.ExtRandomHorizontalFlip(), train_et.New_ExtRandomCrop( size=(481, 481), pad_if_needed=True), train_et.ExtToTensor(), train_et.ExtNormalize(mean=mean, std=std), ]) if opts.crop_val: val_transform = et.ExtCompose([ et.ExtResize(opts.crop_size), et.ExtCenterCrop(opts.crop_size), et.ExtToTensor(), et.ExtNormalize(mean=mean, std=std), ]) else: val_transform = et.ExtCompose([ et.ExtToTensor(), et.ExtNormalize(mean=mean, std=std), ]) train_dst = camvids.CamvidSegmentation(opts.data_root, image_set='trainval', transform=train_transform, num_copys=opts.num_copys) val_dst = camvids.CamvidSegmentation(opts.data_root, image_set='test', transform=val_transform) if opts.dataset == 'voc': # train_transform = et.ExtCompose([ # #et.ExtResize(size=opts.crop_size), # et.ExtRandomScale((0.5, 2.0)), # et.ExtRandomCrop(size=(opts.crop_size, opts.crop_size), pad_if_needed=True), # et.ExtRandomHorizontalFlip(), # et.ExtToTensor(), # et.ExtNormalize(mean=[0.485, 0.456, 0.406], # std=[0.229, 0.224, 0.225]), # ]) train_transform = train_et.ExtCompose([ # et.ExtResize(size=opts.crop_size), train_et.ExtRandomScale((0.5, 2.0)), train_et.ExtRandomHorizontalFlip(), train_et.New_ExtRandomCrop( size=(opts.crop_size, opts.crop_size), pad_if_needed=True), train_et.ExtToTensor(), train_et.ExtNormalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) if opts.crop_val: val_transform = et.ExtCompose([ et.ExtResize(opts.crop_size), et.ExtCenterCrop(opts.crop_size), et.ExtToTensor(), et.ExtNormalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) else: val_transform = et.ExtCompose([ et.ExtToTensor(), et.ExtNormalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) train_dst = VOCSegmentation(root=opts.data_root, year=opts.year, image_set='train', download=opts.download, transform=train_transform, num_copys=opts.num_copys) val_dst = VOCSegmentation(root=opts.data_root, year=opts.year, image_set='val', download=False, transform=val_transform) if opts.dataset == 'cityscapes': train_transform = et.ExtCompose([ # et.ExtResize( 512 ), et.ExtRandomCrop(size=(opts.crop_size, opts.crop_size)), et.ExtColorJitter(brightness=0.5, contrast=0.5, saturation=0.5), et.ExtRandomHorizontalFlip(), et.ExtToTensor(), et.ExtNormalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) val_transform = et.ExtCompose([ # et.ExtResize( 512 ), et.ExtToTensor(), et.ExtNormalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) train_dst = Cityscapes(root=opts.data_root, split='train', transform=train_transform, num_copys=opts.num_copys) print("------------------------now copy: {:}----------------------------------".format(opts.num_copys)) val_dst = Cityscapes(root=opts.data_root, split='val', transform=val_transform) return train_dst, val_dst
046d2ebdf9a0b1be37fea052fbf07e14a623ab1e
3,652,900
def gen_sankey_diagram_distribute_query(query_statement, params, final_entites_name): """ 桑基图数据分布查询 :param query_statement: :param params: :param final_entites_name: :return: """ query_statement = dgraph_get_project_count(query_statement) # 第一层的节点 first_level_sql = """select a.code as tag_code, a.alias as tag_alias, a.id as tag_id, b.code as parent_code from tag as a, tag as b where a.parent_id in (select id from tag where code in ('techops', 'bissness')) and a.parent_id = b.id and a.kpath = 1;""" first_level_list = tagaction.query_direct_sql_to_map_list(connections['bkdata_basic_slave'], first_level_sql) # 第二层的节点 first_level_id_tuple = tuple([each_tag['tag_id'] for each_tag in first_level_list]) second_level_sql = """select a.code as tag_code, a.alias as tag_alias, a.id as tag_id, b.code as parent_code from tag as a, tag as b where a.parent_id in {} and a.parent_id = b.id and a.kpath = 1;""".format( first_level_id_tuple ) second_level_list = tagaction.query_direct_sql_to_map_list(connections['bkdata_basic_slave'], second_level_sql) # graphQL for each_tag in first_level_list: query_statement += get_single_tag_query( each_tag.get('tag_code'), '$final_filter_uids_name', need_me_count=False ) # 其他节点和processing_type之间的对应关系 query_statement += get_other_tag_query(first_level_list, '$final_filter_uids_name') for each_tag in second_level_list: query_statement += get_single_tag_query( each_tag.get('tag_code'), '$final_filter_uids_name', need_me_count=False, need_processing_type=True ) query_statement = query_statement.replace('$final_filter_uids_name', final_entites_name) query_statement += '\n}' dgraph_result = meta_dgraph_complex_search(query_statement, return_original=True) return { 'first_level_tag_list': first_level_list, 'second_level_tag_list': second_level_list, 'dgraph_result': dgraph_result, }
7d87157ca289928bfe8f8bcdfb7cbc6cbee6e521
3,652,901
def declare(objective:str, displayname:str=None, criteria:str="dummy"): """ objective:str -> The id/name given to a scoreboard displayname:str -> The name that will be displayed on screen criteria:str -> The criteria of the scoreboard """ f = f"scoreboard objectives add {objective} {criteria}" global SCOREBOARDS SCOREBOARDS.append(objective) if displayname == None: return f"scoreboard objectives add {objective} {criteria}\n" else: return f"scoreboard objectives add {objective} {criteria} \"{displayname}\"\n"
0a574741a51afa27799b917e735657e3cb34b072
3,652,902
def complement_angle(angle): """ 90 minus angle, in degrees""" return 90 - angle;
bca1dfa3158df61e87cbadc47307f68298a237b7
3,652,903
def parse_custom_commands(command, separator=";"): """Parse run custom command string into the commands list :param str command: run custom [config] command(s) :param str separator: commands separator in the string :rtype: list[str] """ if not command: return [] return command.strip(separator).split(separator)
4d55ef149aa16e224f5894fb0ef506a1bd8285f3
3,652,904
def lower_volatility_band(c, dev_target, band_target, center_target): """ | Calculates the lower volatility band | Name: lower\_volatility\_band\_\ **c**\ \_times\_\ **band_target.name**\ &\ **dev_target.name**\ \_over\_\ **center_target.name** :param c: Multiplier constant :type c: float :param dev_target: Used for band displacement. Can be a constant or a function :type dev_target: function or float :param band_target: Used for band displacement. Can be a constant or a function :type band_target: function or float :param center_target: Data column for the band center :type center_target: str """ def return_function(data): if hasattr(band_target, "name") & hasattr(dev_target, "name"): column_name = f"lower_volatility_band_{c}_times_{band_target.name}&{dev_target.name}_under_{center_target.name}" elif hasattr(band_target, "name"): column_name = f"lower_volatility_band_{c}_times_{band_target.name}&{dev_target}_under_{center_target.name}" else: column_name = f"lower_volatility_band_{c}_times_{band_target}&{dev_target}_under_{center_target.name}" if column_name not in data.columns: data[column_name] = center_target - c * dev_target * band_target return data[column_name].copy() return return_function
d910c1f9e14fa28b171dd16e937fa65c220839d7
3,652,905
def find_by_attr(node, value, name="name", maxlevel=None): """Identical to :any:`search.find_by_attr` but cached.""" return search.find_by_attr(node, value, name=name, maxlevel=maxlevel)
3d4d5084762fe25572e06eeb56fd4374d91dc4c8
3,652,906
import time def remind(phenny, input): """Set a reminder""" m = r_command.match(input.bytes) if not m: return phenny.reply("Sorry, didn't understand the input.") length, scale, message = m.groups() length = float(length) factor = scaling.get(scale, 60) duration = length * factor if duration % 1: duration = int(duration) + 1 else: duration = int(duration) t = int(time.time()) + duration reminder = (input.sender, input.nick, message) try: phenny.remind_data[t].append(reminder) except KeyError: phenny.remind_data[t] = [reminder] dump_database(phenny) if duration >= 60: w = '' if duration >= 3600 * 12: w += time.strftime(' on %d %b %Y', time.gmtime(t)) w += time.strftime(' at %H:%MZ', time.gmtime(t)) phenny.reply('Okay, will remind%s' % w) else: phenny.reply('Okay, will remind in %s secs' % duration)
21edf68ccb4f914325d4bb44177efb5fa44c14ac
3,652,908
import glob from pathlib import Path def get_timestamps_from_sensor_folder(sensor_folder_wildcard: str) -> NDArrayInt: """Timestamp always lies at end of filename. Args: sensor_folder_wildcard: string to glob to find all filepaths for a particular sensor files within a single log run Returns: Numpy array of integers, representing timestamps """ path_generator = glob.glob(sensor_folder_wildcard) path_generator.sort() timestamps: NDArrayInt = np.array([int(Path(jpg_fpath).stem.split("_")[-1]) for jpg_fpath in path_generator]) return timestamps
2e1bd2e7b568c83aac0ef1c1f81ce4bcb8f3fe1e
3,652,909
def vehicles_missing(request): """ Displays to users their theft reports """ reports = TheftReport.objects.all() return render(request, "vms/theft_reports.html", { 'reports': reports, })
254eb4ead3f058f10de8401263f354ef8690451c
3,652,911
def get_limits(data): """ Get the x, y ranges of the ST data. """ y_min = 1e6 y_max = -1e6 x_min = 1e6 x_max = -1e6 for doc in data: x = doc["x"] y = doc["y"] y_min = y if y < y_min else y_min y_max = y if y > y_max else y_max x_min = x if x < x_min else x_min x_max = x if x > x_max else x_max return x_min, x_max, y_min, y_max
9e2894626b9de59e94d65affa0a1d1c6f30e6399
3,652,913
def get_bool(prompt: str | None = None, default: bool = False) -> bool: """Gets a boolean response from the command line. :param prompt: Input prompt. :param default: Default value used if no characters are typed. :return: Input boolean. """ input_str = input(_prompt_from_message(prompt, default='y' if default else 'n')) return input_str.lower().startswith('y')
c9504afe8500a99dcf80f5f95b8a1754dc881cd2
3,652,914
def proj_helsinki(x, y): """Project Helsinki coordinates into ETRS-GK25 (EPSG:3879). https://www.hel.fi/helsinki/fi/kartat-ja-liikenne/kartat-ja-paikkatieto/paikkatiedot+ja+-aineistot/koordinaatistot_ja+_korkeudet/koordinaatti_ja_korkeusjarjestelmat # pylint: disable=line-too-long """ # pylint: disable=invalid-name output_epsg = "EPSG:3879" a = 6654650.14636 b = 25447166.49457 c = 0.99998725362 d = -0.00120230340 e = 0.00120230340 f = 0.99998725362 x, y = a + c * x + d * y, b + e * x + f * y return x, y, output_epsg
d1dc6cc314e767cc971c6b8695d2a4c4043b608a
3,652,915
def _check_start_stop(raw, start, stop): """Aux function.""" out = list() for st in (start, stop): if st is None: out.append(st) else: try: out.append(_ensure_int(st)) except TypeError: # not int-like out.append(raw.time_as_index(st)[0]) return out
2d7c59fff70c7b43942060b353dcd1c7ae917443
3,652,916
import json def main(function, js): """Console script for es_reindex.""" args = json.loads(js) config = args['config'] # e.g. --json='{"config": "./es_index_tool/data/example_config.json"}' tool = ESIndexTool(config_path=config) if 'id' not in args: tool.reindex() else: # e.g., --json='{"id": "2kS98AsytSXb8prbH"}' id_ = args['id'] tool.index_document_by_id(id_) return 0
42cf4b0c33c7a58fb521357089a29195c0f04a91
3,652,917
def read( datapath, qt_app=None, dataplus_format=True, gui=False, start=0, stop=None, step=1, convert_to_gray=True, series_number=None, use_economic_dtype=True, dicom_expected=None, orientation_axcodes="original", **kwargs ): """Returns 3D data and its metadata. # NOTE(:param qt_app:) If it is set to None (as default) all dialogs for series selection are performed in terminal. If qt_app is set to QtGui.QApplication() dialogs are in Qt. :param datapath: directory with input data, if url is give, the file is downloaded into `~/data/downloads/` :param qt_app: Dialog destination. If None (default) -> terminal, if 'QtGui.QApplication()' -> Qt :param dataplus_format: New data format. Metadata and data are returned in one structure. :param gui: True if 'QtGui.QApplication()' instead of terminal should be used :param int start: used for DicomReader, defines where 3D data reading should start :param int stop: used for DicomReader, defines where 3D data reading should stop :param int step: used for DicomReader, defines step for 3D data reading :param bool convert_to_gray: if True -> RGB is converted to gray :param int series_number: used in DicomReader, essential in metadata :param use_economic_dtype: if True, casts 3D data array to less space consuming dtype :param dicom_expected: set true if it is known that data is in dicom format. Set False to suppress dicom warnings. :param orientation_axcodes: 'SPL' inferior to Superior, anterior to Posetrior, right to Left. Standard is for nifty is RAS. :return: tuple (data3d, metadata) """ # Simple read function. Internally calls DataReader.Get3DData() dr = DataReader() return dr.Get3DData( datapath=datapath, qt_app=qt_app, dataplus_format=dataplus_format, gui=gui, start=start, stop=stop, step=step, convert_to_gray=convert_to_gray, series_number=series_number, use_economic_dtype=use_economic_dtype, dicom_expected=dicom_expected, orientation_axcodes=orientation_axcodes, **kwargs )
8ed8c33a1e7bb61aa9f06b573f2f85fc6a96481b
3,652,918
def sum_squares2(n): """ Returns: sum of squares from 1 to n-1 Example: sum_squares(5) is 1+4+9+16 = 30 Parameter n: The number of steps Precondition: n is an int > 0 """ # Accumulator total = 0 print('Before while') x = 0 while x < n: print('Start loop '+str(x)) total = total + x*x x = x+1 print('End loop ') print('After while') return total
1d5dfe160568f032184eea723138b8d6dd3929fc
3,652,919
def read_image(src): """Read and resize individual images""" im = cv2.imread(src, cv2.IMREAD_COLOR) im = cv2.resize(im, (COLS, ROWS), interpolation=cv2.INTER_CUBIC) return im
cf3d31691ad0814c15fe635d03f2febee0150723
3,652,921
def pattern_classifier(data, pattern_threshold): """Return an array mask passing our selection.""" return data["key_pattern"] > pattern_threshold
116a7f84a18b57188fb2ce24fa7ecacd1b61c3da
3,652,923
def is_scalar(a) -> bool: """ Tests if a python object is a scalar (instead of an array) Parameters ---------- a : object Any object to be checked Returns ------- bool Whether the input object is a scalar """ if isinstance(a, (list, tuple)): return False if hasattr(a, "__array__") and hasattr(a, "__len__"): # np.array(1) is scalar return False return True
29206a7921da74257e6af66311c0bbfc4b576ac0
3,652,924
def median(ts: TimeSeries, /, window_length: int = 3) -> TimeSeries: """ Calculate a moving median. On n-dimensional data, filtering occurs on the first axis (time). Parameters ---------- ts Input TimeSeries window_length Optional. Kernel size, must be odd. The default is 3. Example ------- >>> ts = ktk.TimeSeries(time=np.arange(0, 0.5, 0.1)) >>> ts.data['data1'] = np.array([10., 11., 11., 20., 14., 15.]) >>> ts = ktk.filters.median(ts) >>> ts.data['data1'] array([10., 11., 11., 14., 15., 15.]) """ out_ts = ts.copy() for key in ts.data: window_shape = [1 for i in range(len(ts.data[key].shape))] window_shape[0] = window_length out_ts.data[key] = ndi.median_filter( ts.data[key], size=window_shape) return out_ts
93017c2da815687faf386beabd55a6ff4eaa674a
3,652,925
import math import copy def convert_polynomial_coefficients(A_in, B_in, C_in, D_in, oss=False, inverse=False, parent_aperture=None): """Emulate some transformation made in nircam_get_polynomial_both. Written by Johannes Sahlmann 2018-02-18, structure largely based on nircamtrans.py code by Colin Cox. Parameters ---------- A_in : numpy array polynomial coefficients B_in : numpy array polynomial coefficients C_in : numpy array polynomial coefficients D_in : numpy array polynomial coefficients oss : bool Whether this is an OSS aperture or not inverse : bool Whether this is forward or backward/inverse transformation parent_aperture : str Name of parent aperture Returns ------- AR, BR, CR, DR, V3SciXAngle, V3SciYAngle, V2Ref, V3Ref : tuple of arrays and floats Converted polynomial coefficients """ if inverse is False: # forward direction V2Ref = A_in[0] V3Ref = B_in[0] A_in[0] = 0.0 B_in[0] = 0.0 V3SciXAngle = np.rad2deg(np.arctan2(A_in[1], B_in[1])) # V3SciXAngle V3SciYAngle = np.rad2deg(np.arctan2(A_in[2], B_in[2])) V3Angle = V3SciYAngle # V3SciYAngle if abs(V3Angle) > 90.0: V3Angle = V3Angle - math.copysign(180.0, V3Angle) # AR, BR = rotate_coefficients(A_in, B_in, V3Angle) AR, BR = add_rotation(A_in, B_in, -1*V3Angle) CS = shift_coefficients(C_in, V2Ref, V3Ref) DS = shift_coefficients(D_in, V2Ref, V3Ref) CR = prepend_rotation_to_polynomial(CS, V3Angle) DR = prepend_rotation_to_polynomial(DS, V3Angle) if oss: # OSS apertures V3Angle = copy.deepcopy(V3SciYAngle) else: # non-OSS apertures if abs(V3SciYAngle) > 90.0: # e.g. NRCA2_FULL # print 'Reverse Y axis direction' AR = -flip_y(AR) BR = flip_y(BR) CR = flip_x(CR) DR = -flip_x(DR) else: # e.g NRCA1_FULL # print 'Reverse X axis direction' AR = -flip_x(AR) BR = flip_x(BR) CR = -flip_x(CR) DR = flip_x(DR) V3SciXAngle = V3SciXAngle - math.copysign(180.0, V3SciXAngle) # V3Angle = betaY # Cox: Changed 4/29 - might affect rotated polynomials V3SciYAngle = V3Angle return AR, BR, CR, DR, V3SciXAngle, V3SciYAngle, V2Ref, V3Ref else: siaf_detector_layout = read.read_siaf_detector_layout() master_aperture_names = siaf_detector_layout['AperName'].data if parent_aperture.AperName not in master_aperture_names: raise RuntimeError polynomial_degree = parent_aperture.Sci2IdlDeg V3SciYAngle = copy.deepcopy(parent_aperture.V3SciYAngle) # betaY V3SciXAngle = parent_aperture.V3SciXAngle # betaX betaY = V3SciYAngle + parent_aperture.DetSciYAngle # master aperture is never OSS if abs(betaY) > 90.0: # e.g. NRCA2_FULL # print 'Reverse Y axis direction' AR = -flip_y(A_in) BR = flip_y(B_in) CR = flip_x(C_in) DR = -flip_x(D_in) else: # e.g NRCA1_FULL # print 'Reverse X axis direction' AR = -flip_x(A_in) BR = flip_x(B_in) CR = -flip_x(C_in) DR = flip_x(D_in) V3SciXAngle = revert_correct_V3SciXAngle(V3SciXAngle) # rotate the other way # A, B = rotate_coefficients(AR, BR, -V3SciYAngle) A, B = add_rotation(AR, BR, +1*V3SciYAngle) A[0] = parent_aperture.V2Ref B[0] = parent_aperture.V3Ref # now invert the last part of nircam_get_polynomial_forward AFS = A BFS = B # shift by parent aperture reference point AF = shift_coefficients(AFS, -parent_aperture.XDetRef, -parent_aperture.YDetRef) BF = shift_coefficients(BFS, -parent_aperture.XDetRef, -parent_aperture.YDetRef) CS = prepend_rotation_to_polynomial(CR, -V3SciYAngle) DS = prepend_rotation_to_polynomial(DR, -V3SciYAngle) C = shift_coefficients(CS, -parent_aperture.V2Ref, -parent_aperture.V3Ref) D = shift_coefficients(DS, -parent_aperture.V2Ref, -parent_aperture.V3Ref) C[0] += parent_aperture.XDetRef D[0] += parent_aperture.YDetRef return AF, BF, C, D
3b1e85c0416a9a16c5c648a34704c13570ea9ee3
3,652,928
def spinner_runner_factory(spec, t_compile, extra_commands): """Optimized spinner runner, which receives the spec of an animation, and controls the flow of cycles and frames already compiled to a certain screen length and with wide chars fixed, thus avoiding any overhead in runtime within complex spinners, while allowing their factories to be garbage collected. Args: spec (SimpleNamespace): the spec of an animation t_compile (about_time.Handler): the compile time information extra_commands (tuple[tuple[cmd, list[Any], dict[Any]]]): requested extra commands Returns: a spinner runner """ def spinner_runner(): """Wow, you are really deep! This is the runner of a compiled spinner. Every time you call this function, a different generator will kick in, which yields the frames of the current animation cycle. Enjoy!""" yield from next(cycle_gen) # I love generators! def runner_check(*args, **kwargs): # pragma: no cover return check(spec, *args, **kwargs) spinner_runner.__dict__.update(spec.__dict__, check=fix_signature(runner_check, check, 1)) spec.__dict__.update(t_compile=t_compile, runner=spinner_runner) # set after the update above. sequential(spec) apply_extra_commands(spec, extra_commands) cycle_gen = spec.strategy(spec.data) return spinner_runner
887af0abc7f11dcf56edb5cda7de136bb95cf6b8
3,652,929
def _project_im_rois(im_rois, im_scale_factor, im_crop): """Project image RoIs into the rescaled training image.""" im_rois[:, 0] = np.minimum( np.maximum(im_rois[:, 0], im_crop[0]), im_crop[2]) im_rois[:, 1] = np.minimum( np.maximum(im_rois[:, 1], im_crop[1]), im_crop[3]) im_rois[:, 2] = np.maximum( np.minimum(im_rois[:, 2], im_crop[2]), im_crop[0]) im_rois[:, 3] = np.maximum( np.minimum(im_rois[:, 3], im_crop[3]), im_crop[1]) crop = np.tile(im_crop[:2], [im_rois.shape[0], 2]) rois = (im_rois - crop) * im_scale_factor # For YAROIPooling Layer # rois = (im_rois - crop) # width = im_crop[2] - im_crop[0] # height = im_crop[3] - im_crop[1] # rois[:, 0] = rois[:, 0] / width # rois[:, 1] = rois[:, 1] / height # rois[:, 2] = rois[:, 2] / width # rois[:, 3] = rois[:, 3] / height return rois
596d9baa12708e1adcc9a034c34d4b751ef7e73a
3,652,930
from typing import List from typing import Tuple def evaluate_error_absolute(poses_to_test: List[Tuple[str, kapture.PoseTransform]], poses_ground_truth: List[Tuple[str, kapture.PoseTransform]] ) -> List[Tuple[str, float, float]]: """ Evaluate the absolute error for poses to a ground truth. :param poses_to_test: poses to test :param poses_ground_truth: reference poses :return: list of error evaluation """ poses_ground_truth_as_dict = {name: pose for name, pose in poses_ground_truth} result = [(name,) + world_pose_transform_distance(pose, poses_ground_truth_as_dict[name]) for (name, pose) in poses_to_test] return result
45ef1335074514837a72be159f8e55f229676779
3,652,931
def restricted_offset(parent_dimensions, size, offset): """ Get offset restricted by various factors """ limit_x = (parent_dimensions[0] - size[0]) / 2 limit_y = (parent_dimensions[1] - size[1]) / 2 x = clamp(offset[0], -limit_x, limit_x) y = clamp(offset[1], -limit_y, limit_y) return x, y
8e8f16f2267c2ddefda896db9e4905836030f24e
3,652,932
def wt_sgrna(target='none'): """ Return the wildtype sgRNA sequence. The construct is composed of 3 domains: stem, nexus, and hairpins. The stem domain encompasses the lower stem, the bulge, and the upper stem. Attachments are allowed pretty much anywhere, although it would be prudent to restrict this based on the structural biology of Cas9 if you're planning to make random attachments. """ sgrna = Construct('wt') sgrna += spacer(target) sgrna += Domain('stem', 'GUUUUAGAGCUAGAAAUAGCAAGUUAAAAU') sgrna += Domain('nexus', 'AAGGCUAGUCCGU') sgrna += Domain('hairpins', 'UAUCAACUUGAAAAAGUGGCACCGAGUCGGUGC') sgrna += Domain('tail', 'UUUUUU') sgrna['stem'].expected_fold = '((((((..((((....))))....))))))' sgrna['hairpins'].expected_fold = '.....((((....)))).((((((...))))))' sgrna['stem'].style = 'green' sgrna['nexus'].style = 'red' sgrna['hairpins'].style = 'blue' sgrna['stem'].attachment_sites = 'anywhere' sgrna['nexus'].attachment_sites = 'anywhere' sgrna['hairpins'].attachment_sites = 'anywhere' return sgrna
72d07c10defa52529818217c495d44f8fb66062e
3,652,933
import csv def export_nodes(nodes, csvfilepath): """ Writes the standard nodes data in `nodes` to the CSV file at `csvfilepath`. """ with open(csvfilepath, "w") as csv_file: csvwriter = csv.DictWriter(csv_file, STANDARD_NODE_HEADER_V0) csvwriter.writeheader() for node in nodes: noderow = node_to_rowdict(node) csvwriter.writerow(noderow) return csvfilepath
d84658904a848993237e8571412ca3a3860be999
3,652,934
def _var_network(graph, add_noise=True, inno_cov=None, invert_inno=False, T=100, initial_values=None): """Returns a vector-autoregressive process with correlated innovations. Useful for testing. Example: graph=numpy.array([[[0.2,0.,0.],[0.5,0.,0.]], [[0.,0.1,0. ],[0.3,0.,0.]]]) represents a process X_1(t) = 0.2 X_1(t-1) + 0.5 X_2(t-1) + eps_1(t) X_2(t) = 0.3 X_2(t-1) + 0.1 X_1(t-2) + eps_2(t) with inv_inno_cov being the negative (except for diagonal) inverse covariance matrix of (eps_1(t), eps_2(t)) OR inno_cov being the covariance. Initial values can also be provided. Parameters ---------- graph : array Lagged connectivity matrices. Shape is (n_nodes, n_nodes, max_delay+1) add_noise : bool, optional (default: True) Flag to add random noise or not inno_cov : array, optional (default: None) Covariance matrix of innovations. invert_inno : bool, optional (defualt : False) Flag to negate off-diagonal elements of inno_cov and invert it before using it as the covariance matrix of innovations T : int, optional (default: 100) Sample size. initial_values : array, optional (defult: None) Initial values for each node. Shape is (n_nodes, max_delay+1), i.e. must be of shape (graph.shape[1], graph.shape[2]). Returns ------- X : array Array of realization. """ n_nodes, _, period = graph.shape time = T # Test stability _check_stability(graph) # Generate the returned data data = np.random.randn(n_nodes, time) # Load the initial values if initial_values is not None: # Check the shape of the initial values _check_initial_values(initial_values, data[:, :period].shape) # Input the initial values data[:, :period] = initial_values # Check if we are adding noise noise = None if add_noise: # Use inno_cov if it was provided if inno_cov is not None: noise = _generate_noise(inno_cov, time=time, use_inverse=invert_inno) # Otherwise just use uncorrelated random noise else: noise = np.random.randn(time, n_nodes) for a_time in range(period, time): data_past = np.repeat( data[:, a_time-period:a_time][:, ::-1].reshape(1, n_nodes, period), n_nodes, axis=0) data[:, a_time] = (data_past*graph).sum(axis=2).sum(axis=1) if add_noise: data[:, a_time] += noise[a_time] return data.transpose()
ca7c327f4052f44cdcfd60e628f8f53f4e411162
3,652,935
def build_features(component, borders, initial_group): """ Integrate peaks within similarity components and build features :param component: a groupedROI object :param borders: dict - key is a sample name, value is a (n_borders x 2) matrix; predicted, corrected and transformed to normal values borders :param initial_group: a number of mzrt group :return: None (in-place correction) """ rtdiff = (component.rois[0].rt[1] - component.rois[0].rt[0]) scandiff = (component.rois[0].scan[1] - component.rois[0].scan[0]) frequency = scandiff / rtdiff features = [] labels = np.unique(component.grouping) for label in labels: # compute number of peaks peak_number = None for i, sample in enumerate(component.samples): # to do: it would be better to have mapping from group to samples and numbers if component.grouping[i] == label: peak_number = len(borders[sample]) for p in range(peak_number): # build feature intensities = [] samples = [] rois = [] feature_borders = [] shifts = [] rtmin, rtmax, mz = None, None, None for i, sample in enumerate(component.samples): # to do: it would be better to have mapping from group to samples and numbers if component.grouping[i] == label: assert len(borders[sample]) == peak_number begin, end = borders[sample][p] intensity = np.sum(component.rois[i].i[begin:end]) intensities.append(intensity) samples.append(sample) rois.append(component.rois[i]) feature_borders.append(borders[sample][p]) shifts.append(component.shifts[i]) if mz is None: mz = component.rois[i].mzmean rtmin = component.rois[i].rt[0] + begin / frequency rtmax = component.rois[i].rt[0] + end / frequency else: mz = (mz * i + component.rois[i].mzmean) / (i + 1) rtmin = min((rtmin, component.rois[i].rt[0] + begin / frequency)) rtmax = max((rtmax, component.rois[i].rt[0] + end / frequency)) features.append(Feature(samples, rois, feature_borders, shifts, intensities, mz, rtmin, rtmax, initial_group, label)) # to do: there are a case, when borders are empty # assert len(features) != 0 return features
2070741b58c7c04b3a929747407c9dcd9caa025b
3,652,936
def get_cred_fh(library: str) -> str: """ Determines correct SimplyE credential file """ if library == "BPL": return ".simplyE/bpl_simply_e.yaml" elif library == "NYPL": return ".simplyE/nyp_simply_e.yaml" else: raise ValueError("Invalid library code passsed")
aefea283c171963778bdc34ddf2f2aeb18fd126d
3,652,937
def create_app(enviornment): """Construct the core application.""" app = Flask(__name__, static_url_path = "") app.config.from_object(Config) if enviornment == 'test': app.config['TESTING'] = True return app db.init_app(app) with app.app_context(): # Imports # Create tables for our models # db.create_all() return app
7b4169adfbcca18a8373ca74cc95691438318837
3,652,938
def weighted_percentiles(a, percentiles, weights=None): """Compute weighted percentiles by using interpolation of the weighted ECDF. Parameters ---------- a : np.ndarray Vector of data for computing quantiles percentiles : np.ndarray Vector of percentiles in [0, 100] weights : np.ndarray Vector of non-negative weights. Not required to sum to one. Returns ------- percentiles : np.ndarray""" a = np.array(a) percentiles = np.array(percentiles) quantiles = percentiles / 100. if weights is None: weights = np.ones(len(a)) else: weights = np.array(weights) assert np.all(weights > 0), 'Weights must be > 0' assert np.all(quantiles >= 0) and np.all(quantiles <= 1), 'Percentiles must be in [0, 100]' sorti = np.argsort(a) a = a[sorti] weights = weights[sorti] """Two definitions for the weighted eCDF. See _plotSolutions() below for a comparison. Note that there are also several options in R for computing a weighted quantile, but I did not fully understand the motivation for each. The chosen option here was intuitive to me and agreed well with the empirical solution below. https://github.com/harrelfe/Hmisc/R/wtd.stats.s""" # ecdf = np.cumsum(weights) / weights.sum() ecdf = (np.cumsum(weights) - 0.5 * weights) / np.sum(weights) return np.interp(quantiles, ecdf, a)
1ffda97f48e4223e3c54167e99af1952b357573a
3,652,939
def default_summary_collector(): """ Get the :class:`SummaryCollector` object at the top of context stack. Returns: SummaryCollector: The summary collector. """ return _summary_collect_stack.top()
98a547f73a6c96bc3e33331ad64430da7f19c1e2
3,652,940
def norm(*args, **kwargs): """ See https://www.tensorflow.org/versions/master/api_docs/python/tf/norm . """ return tensorflow.norm(*args, **kwargs)
95a03e8267453db6e8c2ece0a2d45131c4fcb9a9
3,652,941
from typing import Optional from typing import Sequence def get_steering_policies(compartment_id: Optional[str] = None, display_name: Optional[str] = None, display_name_contains: Optional[str] = None, filters: Optional[Sequence[pulumi.InputType['GetSteeringPoliciesFilterArgs']]] = None, health_check_monitor_id: Optional[str] = None, id: Optional[str] = None, state: Optional[str] = None, template: Optional[str] = None, time_created_greater_than_or_equal_to: Optional[str] = None, time_created_less_than: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSteeringPoliciesResult: """ This data source provides the list of Steering Policies in Oracle Cloud Infrastructure DNS service. Gets a list of all steering policies in the specified compartment. ## Example Usage ```python import pulumi import pulumi_oci as oci test_steering_policies = oci.dns.get_steering_policies(compartment_id=var["compartment_id"], display_name=var["steering_policy_display_name"], display_name_contains=var["steering_policy_display_name_contains"], health_check_monitor_id=oci_health_checks_http_monitor["test_http_monitor"]["id"], id=var["steering_policy_id"], state=var["steering_policy_state"], template=var["steering_policy_template"], time_created_greater_than_or_equal_to=var["steering_policy_time_created_greater_than_or_equal_to"], time_created_less_than=var["steering_policy_time_created_less_than"]) ``` :param str compartment_id: The OCID of the compartment the resource belongs to. :param str display_name: The displayName of a resource. :param str display_name_contains: The partial displayName of a resource. Will match any resource whose name (case-insensitive) contains the provided value. :param str health_check_monitor_id: Search by health check monitor OCID. Will match any resource whose health check monitor ID matches the provided value. :param str id: The OCID of a resource. :param str state: The state of a resource. :param str template: Search by steering template type. Will match any resource whose template type matches the provided value. :param str time_created_greater_than_or_equal_to: An [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) timestamp that states all returned resources were created on or after the indicated time. :param str time_created_less_than: An [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) timestamp that states all returned resources were created before the indicated time. """ __args__ = dict() __args__['compartmentId'] = compartment_id __args__['displayName'] = display_name __args__['displayNameContains'] = display_name_contains __args__['filters'] = filters __args__['healthCheckMonitorId'] = health_check_monitor_id __args__['id'] = id __args__['state'] = state __args__['template'] = template __args__['timeCreatedGreaterThanOrEqualTo'] = time_created_greater_than_or_equal_to __args__['timeCreatedLessThan'] = time_created_less_than if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('oci:dns/getSteeringPolicies:getSteeringPolicies', __args__, opts=opts, typ=GetSteeringPoliciesResult).value return AwaitableGetSteeringPoliciesResult( compartment_id=__ret__.compartment_id, display_name=__ret__.display_name, display_name_contains=__ret__.display_name_contains, filters=__ret__.filters, health_check_monitor_id=__ret__.health_check_monitor_id, id=__ret__.id, state=__ret__.state, steering_policies=__ret__.steering_policies, template=__ret__.template, time_created_greater_than_or_equal_to=__ret__.time_created_greater_than_or_equal_to, time_created_less_than=__ret__.time_created_less_than)
6919dc3e155854db5ac0838635cb20f691a423d3
3,652,943
def res_input_matrix_random_sparse(idim = 1, odim = 1, density=0.1, dist = 'normal'): """reservoirs.res_input_matrix_random_sparse Create a sparse reservoir input matrix. Wrapper for create_matrix_sparse_random. Arguments: idim: input dimension odim: hidden dimension density: density dist: distribution Returns: wi: input matrix """ # p_wi = density # wi_ = spa.rand(odim, idim, p_wi) # # print "sparse wi", wi_ # wi = wi_.todense() # tmp_idx = wi != 0 # tmp = wi[tmp_idx] # # tmp_r = np.random.normal(0, 1, size=(tmp.shape[1],)) # tmp_r = np.random.uniform(-1, 1, size=(tmp.shape[1],)) # wi[tmp_idx] = tmp_r # # return dense repr # return np.asarray(wi) return create_matrix_sparse_random(odim, idim, density, dist = dist)
d7d1f986228ea982d01010080dfb7749444c31c2
3,652,944
import six def _MakeApiMap(root_package, api_config): """Converts a map of api_config into ApiDef. Args: root_package: str, root path of where generate api will reside. api_config: {api_name->api_version->{discovery,default,version,...}}, description of each api. Returns: {api_name->api_version->ApiDef()}. Raises: NoDefaultApiError: if for some api with multiple versions default was not specified. """ apis_map = {} apis_with_default = set() for api_name, api_version_config in six.iteritems(api_config): api_versions_map = apis_map.setdefault(api_name, {}) has_default = False for api_version, api_config in six.iteritems(api_version_config): if api_config.get('gcloud_gapic_library'): gapic_client = _MakeGapicClientDef(root_package, api_name, api_version) else: gapic_client = None default = api_config.get('default', len(api_version_config) == 1) if has_default and default: raise NoDefaultApiError( 'Multiple default client versions found for [{}]!' .format(api_name)) has_default = has_default or default enable_mtls = api_config.get('enable_mtls', True) mtls_endpoint_override = api_config.get('mtls_endpoint_override', '') api_versions_map[api_version] = api_def.APIDef( _MakeApitoolsClientDef(root_package, api_name, api_version), gapic_client, default, enable_mtls, mtls_endpoint_override) if has_default: apis_with_default.add(api_name) apis_without_default = set(apis_map.keys()).difference(apis_with_default) if apis_without_default: raise NoDefaultApiError('No default client versions found for [{0}]!' .format(', '.join(sorted(apis_without_default)))) return apis_map
1e44188d1cced2255ca4e30efc36631bda305b57
3,652,945
def make_election_frame(votes, shares=None, party_names=None, margin_idx=None): """ Constructs an election frame from at most two arrays. If provided, """ if votes.ndim == 1: votes = votes.reshape(-1,1) if votes.shape[-1] == 1 and shares is not None: votes, shares = votes, shares elif votes.shape[-1] > 1 and shares is None: if margin_idx is None: totals = votes.sum(axis=1).reshape(-1,1) else: totals = votes[:,margin_idx].reshape(-1,1) votes = np.delete(votes, margin_idx, axis=1) shares = votes / totals votes = totals data = np.hstack((votes, shares)) if party_names is None: party_names = ['Party_{}'.format(i) for i in range(data.shape[-1] - 1)] return pd.DataFrame(data, columns=['Votes'] + list(party_names))
d165e32b61c23522dfadab4ac04e438b2d7710dd
3,652,946
import json def payload_from_api_post_event(event): """Maps an API event to the expected payload""" # event = { # 'timeserie1': [(1, 100), (2, 100)], # 'timeserie2': [(3, 100), (4, 100)], # } body = json.loads(event['body']) return body
897a3d2e846e7bbf96d0acd288924d96b07acc78
3,652,947
def format_link_header(link_header_data): """Return a string ready to be used in a Link: header.""" links = ['<{0}>; rel="{1}"'.format(data['link'], data['rel']) for data in link_header_data] return ', '.join(links)
9a68ff381d51e6e10fe257d2d2d6766295ffc050
3,652,948
def parse_collection_members(object_: dict) -> dict: """Parse the members of a collection to make it easier to insert in database. :param object_: The body of the request having object members :type object_: dict :return: Object with parsed members :rtype: dict """ members = list() for member in object_['members']: # example member # { # "@id": "/serverapi/LogEntry/aab38f9d-516a-4bb2-ae16-068c0c5345bd", # "@type": "LogEntry" # } member_id = member['@id'].split('/')[-1] member_type = member['@type'] if crud.item_exists(member_type, member_id, get_session()): members.append({ "id_": member_id, "@type": member_type, }) else: error = HydraError(code=400, title="Data is not valid") return error_response(error) object_['members'] = members return object_
6d5a99c9346cd2f8bf3a00c13ab291c7b972abdc
3,652,949
def PoissonWasserstein_S2(tau, rho, function1, function2, numerical=False): """ Computes the Poisson bracket of two linear functionals on the space P^{OO}(S^2), of measures with a smooth positive density function on the 2-sphere S^2, at a measure in P^{OO}(S^2). The Poisson bracket on P^{OO}(S^2) is induced by a Poisson bivector field pi_{tau} on S^2. Let (theta, phi) be spherical coordinates on S^2 such that (theta, phi) |-> (sin(theta) * cos(phi), sin(theta) * sin(phi), cos(theta)). Then pi_{tau} has the following representation, pi_{tau} = (tau / sin(theta)) * d/d{theta} ^ d/d{phi}, ----------> (1) for some conformal factor tau on S^2. Hence, the Poisson bracket on P^{OO}(S^2) is given by {F_{f}, F_{h}}(mu_{rho}) = 1/(4*pi) * int_{0}^{2*pi} int_{0}^{pi} (df/d{theta} * dh/d{phi} - dh/d{theta} * df/d{phi}) * tau * rho * sin(theta) d{theta1}d{theta2}, ----------> (2) where F_{f} and F_{h} are linear functionals on P^{OO}(S^2) induced by scalar functions f and h on S^2, and mu_{rho} = rho * sin(theta) * |d{theta}^d{phi}| ----------> (3) is a measure in P^{OO}(S^2). Parameters ========== tau: string literal expression Represents the conformal factor tau in (1) rho: string literal expression Represents the density function rho in (1) function1: string literal expression Represents the function f in (2) function2: string literal expression Represents the function h in (2) numerical: Boolean expression, optional Indicates numerical computation. By default, numerical == False. Returns: a symbolic expression or a tuple ======= * A symbolic expression of the double integral in (2) * A tuple (numerical approximation of the double integral in (2), estimated error) """ # Define the symbolic variables theta and phi theta, phi = sym.symbols('theta phi') # Convert the string literal expressions tau, rho, function1 and function2 into symbolic variables, in that order tau = sym.sympify(tau) rho = sym.sympify(rho) ff = sym.sympify(function1) hh = sym.sympify(function2) # Compute the Poisson bracket of function1 and function2 induced by pi_{tau} in (1): # (df/d{theta} * dh/d{phi} - dh/d{theta} * df/d{phi}) * tau bracket_ff_hh = (sym.diff(ff, theta) * sym.diff(hh, phi) - sym.diff(hh, theta) * sym.diff(ff, phi)) * tau # Compute the integrand of the double integral in (2) integrand = bracket_ff_hh * rho * sym.sin(theta) if numerical == True: # Indicate numerical computation # Transform the symbolic variable 'integrand' into a NumPy function that allows a numerical evaluation integrand = sym.lambdify([theta, phi], 1/(4*sym.pi) * integrand, 'numpy') # Return a tuple: (numerical approximation of the double integral in (2), estimated error) return dblquad(integrand, 0, 2*np.pi, lambda phi: 0, lambda phi: np.pi) # Compute the the double integral in (2) integrand = sym.integrate(integrand, (theta, 0, sym.pi)) integral = sym.integrate(integrand, (phi, 0, 2*sym.pi)) # Return a symbolic expression of the double integral in (2) return 1/(4*sym.pi) * integral
7cb3884ecc665ced43c30dae1dca3c4f4f00af4d
3,652,950
def poly_in_gdf(): """ Fixture for a bounding box polygon. """ return make_poly_in_gdf()
bf57298bc002aababa8df30b90dbbe7b91858afc
3,652,951
from typing import List def apply_inclusion_exclusion_criteria( df: pd.DataFrame, col: str, criteria: List[List[str], List[str]] ) -> pd.Series: """Filter out files based on `criteria`, a nested list of row values to include or exclude, respectively :param df: dataframe to filter :type df: pd.DataFrame :param col: column to filter :type col: str :param criteria: nested list containing row values to include or exclude. May be `None` or `['all']` to indicate that all values are included. :type criteria: List[List[str]] :return: filtered column of `df` :rtype: pd.Series """ if criteria is None: return df.loc[:, col] # copy dataframe to be filtered out = df.copy() # join criteria masks = ["|".join(c) for c in criteria] # inclusion if masks[0] != "all": out = out.loc[out[col].str.contains(masks[0], na=False)] # exclusion out = out.loc[~out[col].str.contains(masks[1], na=False)] return out
eb8060d8d9d06a798a8617935ff8506698e63a33
3,652,952
import re def prune_string(string): """Prune a string. - Replace multiple consecutive spaces with a single space. - Remove spaces after open brackets. - Remove spaces before close brackets. """ return re.sub( r" +(?=[\)\]\}])", "", re.sub(r"(?<=[\(\[\{]) +", "", re.sub(r" +", " ", string)), )
53a2c00f50c16b568a75e59bc32a124a5f152b4a
3,652,953
def detect_face(MaybeImage): """ Take an image and return positional information for the largest face in it. Args: MaybeImage: An image grabbed from the local camera. Returns: Maybe tuple((bool, [int]) or (bool, str)): True and list of positional coordinates of the largest face found. False and an error string if no faces are found. """ if MaybeImage.success: image = MaybeImage.result else: return MaybeImage faceCascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml') # Load face classifier major_ver = (cv2.__version__).split('.')[0] if int(major_ver) < 3: flag_for_detect = cv2.cv.CV_HAAR_SCALE_IMAGE else: flag_for_detect = cv2.CASCADE_SCALE_IMAGE # Detect faces in the image # faces will be an iterable object faces = faceCascade.detectMultiScale( image=image, scaleFactor=1.1, minNeighbors=5, minSize=(40, 40), flags = flag_for_detect ) try: # Assume largest face is the subject face = faces[0] # [0] index is largest face. return Maybe(True, face) except IndexError: return Maybe(False, "No faces detected. This may be due to low or uneven \ lighting.")
3b530f7de671fe3d26f3f50adcead639c45be78c
3,652,954
def compute_TVL1(prev, curr, bound=15): """Compute the TV-L1 optical flow.""" TVL1 = cv2.DualTVL1OpticalFlow_create() flow = TVL1.calc(prev, curr, None) assert flow.dtype == np.float32 flow = (flow + bound) * (255.0 / (2 * bound)) flow = np.round(flow).astype(int) flow[flow >= 255] = 255 flow[flow <= 0] = 0 return flow
9020667c141c9be8034c330c9a4943a32b3f3195
3,652,955
def _un_partial_ize(func): """ Alter functions working on 1st arg being a callable, to descend it if it's a partial. """ @wraps(func) def wrapper(fn, *args, **kw): if isinstance(fn, (partial, partialmethod)): fn = fn.func return func(fn, *args, **kw) return wrapper
acb9409ed8fa08e8a1f915b504b073b390fa4520
3,652,956
import json import requests def search_full_text(text, ipstreet_api_key): """sends input text to /full_text semantic search endpoint. returns json results""" endpoint = 'https://api.ipstreet.com/v2/full_text' headers = {'x-api-key': ipstreet_api_key} payload = json.dumps({'raw_text': str(text), 'q': { 'start_date': '1976-01-01', 'start_date_type': 'application_date', 'end_date': '2017-03-10', 'end_date_type': 'application_date', 'applied': True, 'granted': True, 'expired': True, 'max_expected_results': 500, 'page_size': 500, } }) r = requests.post(endpoint, headers=headers, data=payload) return r.json()
7112e04698dcfaf3072b30d0085fa2dc18043f76
3,652,957
def main(filename, plotDir='Plots/'): """ """ # Which pixels and sidebands? pixelOffsets = Pointing.GetPixelOffsets('COMAP_FEEDS.dat') # READ IN THE DATA d = h5py.File(filename) tod = d['spectrometer/tod'] mjd = d['spectrometer/MJD'][:] if len(d['pointing/az'].shape) > 1: az = d['pointing/az'][0,:] el = d['pointing/el'][0,:] else: az = d['pointing/az'][:] el = d['pointing/el'][:] mjdpoint = d['pointing/MJD'][:] slewDist = SlewDistance(az) ra, dec, pa, az, el, mjd = Pointing.GetPointing(az, el, mjd, mjdpoint, pixelOffsets, lon=Pointing.comap_lon, lat=Pointing.comap_lat) # Calculate data sizes: nHorns = tod.shape[0] nSBs = tod.shape[1] nFreqs = tod.shape[2] nSamps = tod.shape[3] # Calculate the position of Jupiter clon, clat, diam = EphemNew.rdplan(mjd[0:1], 5, Pointing.comap_lon*np.pi/180., Pointing.comap_lat*np.pi/180.) EphemNew.precess(clon, clat, mjd[0:1]) # Loop over horns/SBs P1out = None prefix = filename.split('/')[-1].split('.')[0] for iHorn in range(nHorns): print('Processing Horn {:d}'.format(iHorn+1)) _tod = np.nanmean(np.nanmean(tod[iHorn,:,5:-5,:],axis=0),axis=0) #Tim: Pass this function whatever chunk of time-ordered data you have in memory P1, P1e, cross, mweight, weight, model = FitSource.FitTOD(_tod, ra[0,:], # horn 0 because we want the relative offset from Focal Plane dec[0,:], clon*180./np.pi, clat*180./np.pi, pa[0,:], prefix='{}_Horn{}'.format(prefix, iHorn+1), plotDir=plotDir) if isinstance(P1out, type(None)): P1out = np.zeros((nHorns, len(P1))) Peout = np.zeros((nHorns, len(P1e))) mout = np.zeros(mweight.shape) hout = np.zeros(weight.shape) if not isinstance(P1, type(None)): P1out[iHorn, :] = P1 Peout[iHorn, :] = P1e mout += mweight*(model+1)**2 hout += weight*(model+1)**2 pyplot.imshow(mout/hout, extent=[-100/2. * 1.5, 100/2.*1.5,-100/2. * 1.5, 100/2.*1.5] ) pyplot.xlabel('Az offset (arcmin)') pyplot.ylabel('EL offset (arcmin)') pyplot.title('{}'.format(prefix)) pyplot.grid(True) pyplot.savefig('{}/FeedPositions_{}.png'.format(plotDir, prefix), bbox_inches='tight') pyplot.clf() meanMJD = np.mean(mjd) meanEl = np.median(el) meanAz = np.median(az) d.close() print('SLEW DISTANCE', slewDist) return P1out, Peout, mout/hout, meanMJD, meanEl, meanAz
9f059f49222ed8983008f32e5e7edbc303dc1328
3,652,958
def add_rows(df, row_list=[], column_list=[], append=False): """ add a list of rows by index number for a wide form dataframe """ df = df.filter(items=row_list, axis=0) df = pd.DataFrame(df.sum()).T return df
3620e625716e7570e095efd219b2505f8fb89413
3,652,959
from typing import OrderedDict def coerce_data_type_value(context, presentation, data_type, entry_schema, constraints, value, # pylint: disable=unused-argument aspect): """ Handles the ``_coerce_data()`` hook for complex data types. There are two kinds of handling: 1. If we have a primitive type as our great ancestor, then we do primitive type coersion, and just check for constraints. 2. Otherwise, for normal complex data types we return the assigned property values while making sure they are defined in our type. The property definition's default value, if available, will be used if we did not assign it. We also make sure that required definitions indeed end up with a value. """ primitive_type = data_type._get_primitive_ancestor(context) if primitive_type is not None: # Must be coercible to primitive ancestor value = coerce_to_primitive(context, presentation, primitive_type, constraints, value, aspect) else: definitions = data_type._get_properties(context) if isinstance(value, dict): temp = OrderedDict() # Fill in our values, but make sure they are defined for name, v in value.iteritems(): if name in definitions: definition = definitions[name] definition_type = definition._get_type(context) definition_entry_schema = definition.entry_schema definition_constraints = definition._get_constraints(context) temp[name] = coerce_value(context, presentation, definition_type, definition_entry_schema, definition_constraints, v, aspect) else: context.validation.report( 'assignment to undefined property "%s" in type "%s" in "%s"' % (name, data_type._fullname, presentation._fullname), locator=get_locator(v, value, presentation), level=Issue.BETWEEN_TYPES) # Fill in defaults from the definitions, and check if required definitions have not been # assigned for name, definition in definitions.iteritems(): if (temp.get(name) is None) and hasattr(definition, 'default') \ and (definition.default is not None): definition_type = definition._get_type(context) definition_entry_schema = definition.entry_schema definition_constraints = definition._get_constraints(context) temp[name] = coerce_value(context, presentation, definition_type, definition_entry_schema, definition_constraints, definition.default, 'default') if getattr(definition, 'required', False) and (temp.get(name) is None): context.validation.report( 'required property "%s" in type "%s" is not assigned a value in "%s"' % (name, data_type._fullname, presentation._fullname), locator=presentation._get_child_locator('definitions'), level=Issue.BETWEEN_TYPES) value = temp elif value is not None: context.validation.report('value of type "%s" is not a dict in "%s"' % (data_type._fullname, presentation._fullname), locator=get_locator(value, presentation), level=Issue.BETWEEN_TYPES) value = None return value
521d022c5aead7f4066b637bbe6a84a07d4e728e
3,652,960
def Inst2Vec( bytecode: str, vocab: vocabulary.VocabularyZipFile, embedding ) -> np.ndarray: """Transform an LLVM bytecode to an array of embeddings. Args: bytecode: The input bytecode. vocab: The vocabulary. embedding: The embedding. Returns: An array of embeddings. """ embed = lambda x: EmbedEncoded(x, embedding) encode = lambda x: EncodeLlvmBytecode(x, vocab) return embed(encode(PreprocessLlvmBytecode(bytecode)))
4c56c4b35b51ce4b41f203579ca0c8d552c18b0c
3,652,961
import _ast def extract_from_code(code, gettext_functions): """Extract strings from Python bytecode. >>> from genshi.template.eval import Expression >>> expr = Expression('_("Hello")') >>> list(extract_from_code(expr, GETTEXT_FUNCTIONS)) [('_', u'Hello')] >>> expr = Expression('ngettext("You have %(num)s item", ' ... '"You have %(num)s items", num)') >>> list(extract_from_code(expr, GETTEXT_FUNCTIONS)) [('ngettext', (u'You have %(num)s item', u'You have %(num)s items', None))] :param code: the `Code` object :type code: `genshi.template.eval.Code` :param gettext_functions: a sequence of function names :since: version 0.5 """ def _walk(node): if isinstance(node, _ast.Call) and isinstance(node.func, _ast.Name) \ and node.func.id in gettext_functions: strings = [] def _add(arg): if isinstance(arg, _ast_Str) \ and isinstance(_ast_Str_value(arg), unicode): strings.append(_ast_Str_value(arg)) elif isinstance(arg, _ast_Str): strings.append(unicode(_ast_Str_value(arg), 'utf-8')) elif arg: strings.append(None) [_add(arg) for arg in node.args] if hasattr(node, 'starargs'): _add(node.starargs) if hasattr(node, 'kwargs'): _add(node.kwargs) if len(strings) == 1: strings = strings[0] else: strings = tuple(strings) yield node.func.id, strings elif node._fields: children = [] for field in node._fields: child = getattr(node, field, None) if isinstance(child, list): for elem in child: children.append(elem) elif isinstance(child, _ast.AST): children.append(child) for child in children: for funcname, strings in _walk(child): yield funcname, strings return _walk(code.ast)
eb766056ab08d31d20728717570ffb6deb240e03
3,652,962
import torch def sample_raw_locations(stacking_program, address_suffix=""): """ Samples the (raw) horizontal location of blocks in the stacking program. p(raw_locations | stacking_program) Args stacking_program [num_blocks] Returns [num_blocks] """ device = stacking_program[0].device dist = pyro.distributions.Independent( pyro.distributions.Normal(torch.zeros((len(stacking_program),), device=device), 1), reinterpreted_batch_ndims=1, ) return pyro.sample(f"raw_locations{address_suffix}", dist)
8c31f916a36fadf22c86dfe671fa2f7000609f14
3,652,963
def get_raw_data(params, data_type=1): """Method to filter which report user wants.""" # class="table table-bordered" data = None raw_data = [] td_zeros = '<td>0</td>' * 12 tblanks = ['M', 'F'] * 6 blanks = ['0'] * 13 csvh = ['0 - 5 yrs', '', '6 - 10 yrs', '', '11 - 15 yrs', '', '16 - 17 yrs', '', '18+ yrs', '', 'Sub-Total', '', ''] try: report_type = int(params['report_id']) if report_type == 6: data, raw_data = get_ovc_values(params) elif report_type == 5: data, raw_data = get_raw_values(params) elif report_type == 4: # Other values otherd, vls_ids = {}, {} idata, idatas = {}, {} otherd[1] = 'ALL OTHER DISEASES' otherd[2] = 'FIRST ATTENDANCES' otherd[3] = 'RE-ATTENDANCES' otherd[4] = 'REFERRALS IN' otherd[5] = 'REFERRALS OUT' vals = get_dict(field_name=['new_condition_id']) for vls in vals: vls_ids[vls] = vls if 'NCOD' in vals: del vals['NCOD'] r_title = "{period_name}ly Health Report {unit_type}" dt = '<table class="table table-bordered"><thead>' dt += "<tr><th colspan='16'>%s" % (r_title) dt += '</th></tr>' dt += "<tr><th colspan='16'>{cci_si_name}</th></tr>" dt += case_load_header(report_type=3) dt += "</thead>" # Fetch raw data rdatas = get_institution_data(params, report_type) rdata = get_totals(rdatas['data'], vls_ids) if rdata: idata = write_row(rdata) idatas = write_row(rdata, is_raw=True) # Get totals itotals = col_totals(rdata) itotal = write_row([itotals]) # Show the values total_h = 0 diss_vals = {} hel_head = ['', 'Health Report'] + [''] * 13 hel_title = ['', 'List of Diseases'] + tblanks + ['Total'] if rdata: raw_data.append(hel_head) raw_data.append(['', ''] + csvh) raw_data.append(hel_title) cnt = 1 other_items = {1: 'NCOD'} for val in vals: val_name = vals[val] val_data = diss_vals[val] if val in diss_vals else 0 total_h += val_data dt += '<tr><td>%s</td><td>%s</td>' % (str(cnt) + '.', val_name) if val in idata: dt += '%s' % (idata[val]) else: dt += '<td></td>%s<td>0</td></tr>' % (td_zeros) if val in idatas: rd = idatas[val] del rd[0:2] else: rd = blanks if rdata: raw_data.append([str(cnt) + '.', val_name] + rd) cnt += 1 for oval in otherd: oval_name = otherd[oval] sval = other_items[oval] if oval in other_items else oval dt += '<tr><td>%s</td><td>%s</td>' % (str(cnt) + '.', oval_name) if sval in idata: dt += '%s' % (idata[sval]) else: dt += '<td></td>%s<td>0</td></tr>' % (td_zeros) if sval in idatas: rd = idatas[sval] del rd[0:2] else: rd = blanks if rdata: raw_data.append([str(cnt) + '.', oval_name] + rd) cnt += 1 if rdata: del itotals[1] raw_data.append([''] + itotals) dt += '<tr><td></td><td>Total</td>' dt += '%s' % (itotal['TOTAL']) dt += '<table>' data = dt elif report_type == 1: # KNBS List ids = {} ids['CSAB'] = 'Child offender' ids['CCIP'] = 'Children on the streets' ids['CSIC'] = 'Neglect' ids['CIDC'] = 'Orphaned Child' ids['CCIP'] = 'Children on the streets' ids['CDIS'] = 'Abandoned' ids['CHCP'] = 'Lost and found children' ids['CSDS'] = 'Drug and Substance Abuse' ids['CSNG'] = 'Physical abuse/violence' ids['CDSA'] = 'Abduction' ids['CCDF'] = 'Defilement' ids['CTRF'] = 'Child Labour' # Query just like case load all_datas = get_data(params) all_data = all_datas['data'] knb_ids, rdata, rdatas = {}, {}, {} # Just send ids as ids for easier rendering later # Have to get all ids else errors case_categories = get_categories() for knb_id in ids: knb_ids[knb_id] = knb_id data = get_totals(all_data, case_categories) if data: rdata = write_row(data) rdatas = write_row(data, is_raw=True) rtotals = col_totals(data) rtotal = write_row([rtotals]) rtitle = 'KNBS REPORT %s %s' % ( params['month'], params['year']) # Just add title whether there is data or not knb_head = ['', rtitle.upper()] + [''] * 13 knb_title = ['', 'Case Category'] + tblanks + ['Total'] if data: raw_data.append(knb_head) raw_data.append(['', ''] + csvh) raw_data.append(knb_title) dt = '<table class="table table-bordered"><thead>' dt += '<tr><th colspan="16">%s</th></tr>' % (rtitle.upper()) dt += case_load_header(report_type=4) dt += "</thead>" knbcnt = 1 if data: for val in ids: val_name = ids[val] dt += '<tr><td>%s</td><td>%s</td>' % (knbcnt, val_name) if val in rdata: dt += '%s' % (rdata[val]) else: dt += '<td></td>%s<td>0</td></tr>' % (td_zeros) if val in rdatas: rd = rdatas[val] del rd[0:2] else: rd = blanks raw_data.append([knbcnt, val_name] + rd) knbcnt += 1 raw_data.append(rtotals) dt += '<tr><td colspan="2"><b>Total</b></td>' dt += '%s' % (rtotal['TOTAL']) dt += '<table>' data = dt elif report_type == 3: discs = {'AEES': 'AEES'} dvals = {2: 'TANA', 4: 'TARR', 5: 'TRIN', 6: 'TARE'} rvals = {4: 'TARR', 5: 'TRIN', 6: 'TARE'} evals = {8: 'AEES', 9: 'AEAB', 10: 'TDER', 11: 'TDTR', 12: 'TDEX', 13: 'DTSI', 15: 'AEDE'} svals = {8: 'AEES', 9: 'AEAB', 10: 'TDER', 11: 'TDTR', 12: 'TDEX', 13: 'DTSI', 14: '14'} death_vals = {15: 'AEDE'} # Get all types of discharges discharges = get_dict(field_name=['discharge_type_id']) for disc in discharges: discs[disc] = disc # This is it pdatas = get_institution_data(params, report_type) devals = {} for dval in dvals: deq = dvals[dval] devals[deq] = deq pdata = get_totals(pdatas['data'], devals) odata = get_totals(pdatas['odata'], devals) ddata = get_totals(pdatas['ddata'], discs) edata = get_totals(pdatas['death'], death_vals) ids = {2: 'New Admissions', 3: 'Returnees', 4: ' - Relapse', 5: ' - Transfer in', 6: ' - Return after escape', 7: 'Exits', 8: ' - Escapee', 9: ' - Abducted', 10: ' - Early Release', 11: ' - Released on License', 12: ' - Released on Expiry of Order', 13: ' - Transfer to another Institution', 14: ' - Other exits', 15: 'Death'} r_title = "{cci_si_title} {period_name}ly Returns {unit_type}" dt = '<table class="table table-bordered"><thead>' dt += "<tr><th colspan='16'>%s" % (r_title) dt += "</th></tr><tr><th colspan='16'>{cci_si_name}</th></tr>" dt += case_load_header(report_type=2) dt += "</thead>" # This is it popdata, popdatas = {}, {} opdata, opdatas = {}, {} dopdata, dopdatas = {}, {} depdata, depdatas = {}, {} osdata = [] if pdata: all_returnees = get_others(pdata, rvals, 3, True) pdata.append(all_returnees) popdata = write_row(pdata) popdatas = write_row(pdata, is_raw=True) # Old data if odata: p1 = col_totals(odata) osdata.append(p1) opdata = write_row(osdata) opdatas = write_row(osdata, is_raw=True) # Discharge data if ddata: all_other = get_others(ddata, evals, 14) ddata.append(all_other) all_exits = get_others(ddata, svals, 7, True) ddata.append(all_exits) dopdata = write_row(ddata) dopdatas = write_row(ddata, is_raw=True) # Deaths as a type of discharge if edata: depdata = write_row(edata) depdatas = write_row(edata, is_raw=True) # Just add title whether there is data or not pop_head = ['Institution Population'] + ['-'] * 14 pop_title = ['Category', 'Sub-category'] + tblanks + ['Total'] all_var = merge_two_dicts(popdata, dopdata) all_rvar = merge_two_dicts(popdatas, dopdatas) all_vars = merge_two_dicts(all_var, depdata) all_rvars = merge_two_dicts(all_rvar, depdatas) if pdata: raw_data.append(pop_head) raw_data.append(['', ''] + csvh) raw_data.append(pop_title) si_total = 0 # All totals final_totals = get_final_totals(osdata, pdata, ddata) ptotal = write_row([final_totals]) ptotal_raw = write_row([final_totals], is_raw=True) td_pad = '</td><td>' s_text = '<b>Total Children by End of Previous {period_name}</b>' dt += '<tr><td colspan="3">%s' % (s_text) if opdata: o_data = opdata['TOTAL'].replace('<td></td>', '') dt += o_data raw_ol = opdatas['TOTAL'] del raw_ol[0:2] raw_data.append(['From previous period', ''] + raw_ol) else: dt += '</td>%s<td>0</td></tr>' % (td_zeros) ftotal = ptotal['TOTAL'].replace('<td></td>', '') for val in ids: vname = ids[val] v_name = vname.replace(' - ', td_pad) r_name = vname.replace(' - ', '') val_name = v_name + td_pad if '<td>' not in v_name else v_name vraw = [r_name, ''] if '<td>' not in v_name else ['', r_name] val_data = 0 if val in dvals: vd = dvals[val] elif val in evals: vd = evals[val] else: vd, val_data = str(val), 0 dt += '<tr><td width="1px"></td><td>%s</td>' % (val_name) if vd in all_rvars: rd = all_rvars[vd] del rd[0:2] else: rd = blanks if all_vars: raw_data.append(vraw + rd) if vd in all_vars: my_val = all_vars[vd].replace('<td></td>', '') dt += '%s' % (my_val) else: dt += '%s<td>0</td></tr>' % (td_zeros) si_total += val_data t_text = '<b>Total Children by End of Reporting {period_name}</b>' dt += '<tr><td colspan="3">%s</td>' % (t_text) dt += '%s' % (ftotal) dt += '</table>' if all_vars: raw_data.append(ptotal_raw['TOTAL']) data = dt except Exception as e: print('Error with raw data - %s' % (str(e))) raise e else: return data, raw_data
235a9cfd73e89bc93626fc3ccc49a02976038187
3,652,964
import re def get_existing_cert(server, req_id, username, password, encoding='b64'): """ Gets a certificate that has already been created. Args: server: The FQDN to a server running the Certification Authority Web Enrollment role (must be listening on https) req_id: The request ID to retrieve username: The username for authentication pasword: The password for authentication encoding: The desired encoding for the returned certificate. Possible values are "bin" for binary and "b64" for Base64 (PEM) Returns: The issued certificate Raises: CouldNotRetrieveCertificateException: If something went wrong while fetching the cert """ headers = { # We need certsrv to think we are a browser, or otherwise the Content-Type will be wrong 'User-Agent': 'Mozilla/5.0 certsrv (https://github.com/magnuswatn/certsrv)', 'Authorization':'Basic %s' % urllib2.base64.b64encode('%s:%s' % (username, password)) } cert_url = 'https://%s/certsrv/certnew.cer?ReqID=%s&Enc=%s' % (server, req_id, encoding) cert_req = urllib2.Request(cert_url, headers=headers) response = urllib2.urlopen(cert_req) response_content = response.read() if response.headers.type != 'application/pkix-cert': # The response was not a cert. Something must have gone wrong try: error = re.search('Disposition message:[^\t]+\t\t([^\r\n]+)', response_content).group(1) except AttributeError: error = 'An unknown error occured' raise CouldNotRetrieveCertificateException(error, response_content) else: return response_content
b9f4d04a0e30190870880961c100fc71964bf61d
3,652,965
def _get_go2parents(go2parents, goid, goterm): """Add the parent GO IDs for one GO term and their parents.""" if goid in go2parents: return go2parents[goid] parent_goids = set() for parent_goterm in goterm.parents: parent_goid = parent_goterm.id parent_goids.add(parent_goid) parent_goids |= _get_go2parents(go2parents, parent_goid, parent_goterm) go2parents[goid] = parent_goids return parent_goids
e4585bb84a4ac9532468451036a609a1d561c928
3,652,968
def compute_conditional_statistics(x_test, x, kernel, ind): """ This version uses cho_factor and cho_solve - much more efficient when using JAX Predicts marginal states at new time points. (new time points should be sorted) Calculates the conditional density: p(xₙ|u₋, u₊) = 𝓝(Pₙ @ [u₋, u₊], Tₙ) :param x_test: time points to generate observations for [N] :param x: inducing state input locations [M] :param kernel: prior object providing access to state transition functions :param ind: an array containing the index of the inducing state to the left of every input [N] :return: parameters for the conditional mean and covariance P: [N, D, 2*D] T: [N, D, D] """ dt_fwd = x_test[..., 0] - x[ind, 0] dt_back = x[ind + 1, 0] - x_test[..., 0] A_fwd = kernel.state_transition(dt_fwd) A_back = kernel.state_transition(dt_back) Pinf = kernel.stationary_covariance() Q_fwd = Pinf - A_fwd @ Pinf @ A_fwd.T Q_back = Pinf - A_back @ Pinf @ A_back.T A_back_Q_fwd = A_back @ Q_fwd Q_mp = Q_back + A_back @ A_back_Q_fwd.T jitter = 1e-8 * np.eye(Q_mp.shape[0]) chol_Q_mp = cho_factor(Q_mp + jitter, lower=True) Q_mp_inv_A_back = cho_solve(chol_Q_mp, A_back) # V = Q₋₊⁻¹ Aₜ₊ # The conditional_covariance T = Q₋ₜ - Q₋ₜAₜ₊ᵀQ₋₊⁻¹Aₜ₊Q₋ₜ == Q₋ₜ - Q₋ₜᵀAₜ₊ᵀL⁻ᵀL⁻¹Aₜ₊Q₋ₜ T = Q_fwd - A_back_Q_fwd.T @ Q_mp_inv_A_back @ Q_fwd # W = Q₋ₜAₜ₊ᵀQ₋₊⁻¹ W = Q_fwd @ Q_mp_inv_A_back.T P = np.concatenate([A_fwd - W @ A_back @ A_fwd, W], axis=-1) return P, T
458e1d99a739825de8b72fe125bdb00e4a1d7b9f
3,652,969
def int_to_bytes(value: int) -> bytes: """ Encode an integer to an array of bytes. :param value: any integer :return: integer value representation as bytes """ return value.to_bytes(length=BYTES_LENGTH, byteorder=BYTES_ORDER)
4f7fc878d8632c1ab250e8821f55e280a6be9b9b
3,652,970
def generate_1d_trajectory_distribution( n_demos, n_steps, initial_offset_range=3.0, final_offset_range=0.1, noise_per_step_range=20.0, random_state=np.random.RandomState(0)): """Generates toy data for testing and demonstration. Parameters ---------- n_demos : int Number of demonstrations n_steps : int Number of steps initial_offset_range : float, optional (default: 3) Range of initial offset from cosine final_offset_range : float, optional (default: 0.1) Range of final offset from cosine noise_per_step_range : float, optional (default: 20) Factor for noise in each step random_state : RandomState, optional (default: seed 0) Random state Returns ------- T : array, shape (n_steps,) Times Y : array, shape (n_demos, n_steps, 1) Demonstrations (positions) """ T = np.linspace(0, 1, n_steps) Y = np.empty((n_demos, n_steps, 1)) A = create_finite_differences_matrix_1d(n_steps, dt=1.0 / (n_steps - 1)) cov = np.linalg.inv(A.T.dot(A)) L = np.linalg.cholesky(cov) for demo_idx in range(n_demos): Y[demo_idx, :, 0] = np.cos(2 * np.pi * T) if initial_offset_range or final_offset_range: initial_offset = initial_offset_range * (random_state.rand() - 0.5) final_offset = final_offset_range * (random_state.rand() - 0.5) Y[demo_idx, :, 0] += np.linspace( initial_offset, final_offset, n_steps) if noise_per_step_range: noise_per_step = (noise_per_step_range * L.dot(random_state.randn(n_steps))) Y[demo_idx, :, 0] += noise_per_step return T, Y
cadc92671b3b89285db9427738dcf2856c97a045
3,652,971
def encrypt(binary_plaintext, binary_key): """Generate binary ciphertext from binary plaintext with AES.""" padded_plaintext = pad_plaintext(binary_plaintext, 128) subkeys = key_schedule(binary_key) final_blocks = [] for block in block_split(padded_plaintext, 128): block_matrix = binary_to_matrix(block) block_matrix = add_round_key(block_matrix, subkeys[0]) for round in xrange(1, 10): block_matrix = byte_sub(block_matrix) block_matrix = shift_rows(block_matrix) block_matrix = mix_columns(block_matrix, COLUMN_MIX) block_matrix = add_round_key(block_matrix, subkeys[round]) block_matrix = byte_sub(block_matrix) block_matrix = shift_rows(block_matrix) block_matrix = add_round_key(block_matrix, subkeys[-1]) final_blocks.append(matrix_to_binary(block_matrix)) return ''.join(final_blocks)
9bce8326b7b8ba223edbbac0c7eaa79cfdfb7098
3,652,972
import json def emit_event(project_slug, action_slug, payload, sender_name, sender_secret, event_uuid=None): """Emit Event. :param project_slug: the slug of the project :param action_slug: the slug of the action :param payload: the payload that emit with action :param sender_name: name that identified the sender :parma sender_secret: secret string :return: dict with task_id and event_uuid raise MissingSender if sender does not exist raise WrongSenderSecret if sender_secret is wrong raise NotAllowed if sender is not allowed to emit action to project """ project_graph = graph.get_project_graph(project_slug) project_graph.verify_sender(sender_name, sender_secret) action = project_graph.get_action(action_slug) project = project_graph.project # execute event event_uuid = event_uuid or uuid4() event = {'uuid': event_uuid, 'project': project['slug'], 'action': action['slug']} res = exec_event(event, action['webhooks'], payload) logger.info('EMIT %s "%s" "%s" %s', event_uuid, project_slug, action_slug, json.dumps(payload)) return dict( task=dict( id=res.id, ), event=dict( uuid=event_uuid, ), )
53df17f6194f2e89c4cf9c4a0face05ecf49a588
3,652,973
def add_item(cart_id: str, item: CartItem): """ Endpoint. Add item to cart. :param str cart_id: cart id :param CartItem item: pair of name and price :return: dict with cart, item and price :rtype: dict """ logger.info(f'Request@/add_item/{cart_id}') return cart.add_item(cart_id=cart_id, item=item)
623be20b0bd06ff78b9d88295516b56604b276b2
3,652,974
import warnings def _inst2earth(advo, reverse=False, rotate_vars=None, force=False): """ Rotate data in an ADV object to the earth from the instrument frame (or vice-versa). Parameters ---------- advo : The adv object containing the data. reverse : bool (default: False) If True, this function performs the inverse rotation (earth->inst). rotate_vars : iterable The list of variables to rotate. By default this is taken from advo.props['rotate_vars']. force : Do not check which frame the data is in prior to performing this rotation. """ if reverse: # earth->inst # The transpose of the rotation matrix gives the inverse # rotation, so we simply reverse the order of the einsum: sumstr = 'jik,j...k->i...k' cs_now = 'earth' cs_new = 'inst' else: # inst->earth sumstr = 'ijk,j...k->i...k' cs_now = 'inst' cs_new = 'earth' if rotate_vars is None: if 'rotate_vars' in advo.attrs: rotate_vars = advo.rotate_vars else: rotate_vars = ['vel'] cs = advo.coord_sys.lower() if not force: if cs == cs_new: print("Data is already in the '%s' coordinate system" % cs_new) return elif cs != cs_now: raise ValueError( "Data must be in the '%s' frame when using this function" % cs_now) if hasattr(advo, 'orientmat'): omat = advo['orientmat'].values else: if 'vector' in advo.inst_model.lower(): orientation_down = advo['orientation_down'] omat = _calc_omat(advo['heading'].values, advo['pitch'].values, advo['roll'].values, orientation_down) # Take the transpose of the orientation to get the inst->earth rotation # matrix. rmat = np.rollaxis(omat, 1) _dcheck = rotb._check_rotmat_det(rmat) if not _dcheck.all(): warnings.warn("Invalid orientation matrix (determinant != 1) at indices: {}." .format(np.nonzero(~_dcheck)[0]), UserWarning) for nm in rotate_vars: n = advo[nm].shape[0] if n != 3: raise Exception("The entry {} is not a vector, it cannot " "be rotated.".format(nm)) advo[nm].values = np.einsum(sumstr, rmat, advo[nm]) advo = rotb._set_coords(advo, cs_new) return advo
51949445daf34bb09033136e53ea705abfb8ec50
3,652,975
from typing import Dict from typing import List def get_latency_of_one_partition( partition: Partition, node_to_latency_mapping: Dict[Node, NodeLatency] ) -> PartitionLatency: """Given a partiton and its nodes' latency, return a PartitionLatency for this partition""" def get_top_nodes(partition: Partition) -> List[Node]: """Given a partition, return a list of nodes on the top bfs level""" top_nodes: List[Node] = [] for node in partition.nodes: # Skip placeholder and get_attr nodes if node.op in {"placeholder", "get_attr"}: continue input_nodes: Dict[Node, None] = {} map_arg(node.args, lambda n: input_nodes.setdefault(n)) map_arg(node.kwargs, lambda n: input_nodes.setdefault(n)) # If a node has no input nodes in this partition, # or its input nodes in this partition are placeholders and get_attrs # this node is on the top bfs level in this partition if not any( [ n in partition.nodes and n.op not in {"placeholder", "get_attr"} for n in input_nodes ] ): top_nodes.append(node) return top_nodes def dfs_helper(node: Node, partition_latency) -> PartitionLatency: """Given a top node of a partition, this function returns the latency of the critical path in the partition """ node_latency = node_to_latency_mapping[node] # Calculate the current overall latency of the partition overall_latency_sec = partition_latency.overall_latency_sec + max( node_latency.computer_latency_sec, node_latency.mem_latency_sec ) # Update the mem latency of this path mem_latency_sec = ( partition_latency.mem_latency_sec + node_latency.mem_latency_sec ) # Update the compute latency of this path computer_latency_sec = ( partition_latency.computer_latency_sec + node_latency.computer_latency_sec ) # Get all users of this node that are in this partition users = set(node.users).intersection(partition.nodes) if users: max_latency = PartitionLatency( mem_latency_sec=0.0, computer_latency_sec=0.0, overall_latency_sec=0.0 ) for n in users: # Get new partition latency recursively new_partition_latency = dfs_helper( n, PartitionLatency( mem_latency_sec, computer_latency_sec, overall_latency_sec ), ) if ( new_partition_latency.overall_latency_sec > max_latency.overall_latency_sec ): max_latency = new_partition_latency return max_latency # If there is no user, the node is at bottom of the partition return PartitionLatency( mem_latency_sec, computer_latency_sec, overall_latency_sec ) # Main part starts # Get all top level nodes of this partition top_nodes = get_top_nodes(partition) critical_path_latency = PartitionLatency( mem_latency_sec=0.0, computer_latency_sec=0.0, overall_latency_sec=0.0 ) # Go through all top nodes and find the largest latency (critical pass latency) for node in top_nodes: partition_latency = dfs_helper( node, PartitionLatency( mem_latency_sec=0.0, computer_latency_sec=0.0, overall_latency_sec=0.0 ), ) if ( partition_latency.overall_latency_sec > critical_path_latency.overall_latency_sec ): critical_path_latency = partition_latency return critical_path_latency
c4d61b5bd49800f7daae54df1226d9798007c4c5
3,652,979
def nearest_dy(lon,lat,t,gs,dy,tr = [0,0],box = [0,0],time_vec = False,space_array = False): """ give this a dy object and a gs object, the nearest point to the supplied lon lat will be returned tr is a time range option [time points previous, after] if tr > 0 time_vec=True will return a rs/H/WAVES/SWH/swh_arrays/SWH_10vector of the time point, False is nanmean box is a spatial range option [range in x, range in y] if there is a box, space_array=True returns the whole box, False is nanmean """ y,x = nearest_xy(lon,lat,gs) out_array = dy[t-tr[0]:t+tr[1]+1,x-box[0]:x+box[0]+1,y-box[1]:y+box[1]+1] if time_vec and space_array: return out_array elif time_vec: return np.nanmean(out_array,axis = (1,2)) elif space_array: return np.nanmean(out_array,axis = 0) else: return np.nanmean(out_array)
553547a958706cc30fa35248450cf499dc875051
3,652,980
def get_return_type() -> None: """Prompt the user for the return datatype of the function. :return return_type: {str} """ return_type = None # function or method while return_type is None or return_type == "": return_type = prompt( "return type? [bool|dict|float|int|list|str|tuple]: ", completer=datatype_completer, ) return_type = return_type.strip() if return_type is None or return_type == "": return_type = "None" break else: if return_type.lower().startswith("b"): return_type = "bool" elif return_type.lower().startswith("d"): return_type = "dict" elif return_type.lower().startswith("f"): return_type = "float" elif return_type.lower().startswith("i"): return_type = "int" elif return_type.lower().startswith("l"): return_type = "list" elif return_type.lower().startswith("s"): return_type = "str" elif return_type.lower().startswith("t"): return_type = "tuple" else: continue break return return_type
2f90b037bf1344cb11d8b00e1e3ee210728bbd03
3,652,981
def solve_capcha(capcha_str): """Function which calculates the solution to part 1 Arguments --------- capcha_str : str, a string of numbers Returns ------- total : int, the sum of adjacent matches """ capcha = [int(cc) for cc in list(capcha_str)] total = 0 for ii in range(len(capcha)): if capcha[ii] == capcha[ii - 1]: total += capcha[ii] return total
85a74f9b708f8134500d9c7add6e2df8617ec305
3,652,982
from typing import Union from typing import Sequence from typing import Callable from functools import reduce def compose(fs: Union[ModuleList, Sequence[Callable]]) -> F: """ Compose functions as a pipeline function. Args: fs (``Sequence[Callable]`` | :class:`~torch.nn.ModuleList`): The functions input for composition. Returns: :class:`~fn.func.F`: The composed output function. Examples:: >>> f = lambda x: x + 1 >>> g = lambda x: x * 2 >>> h = lambda x: x ** 2 >>> x = 1 >>> h(g(f(x))) == compose([f, g, h])(x) True """ return reduce(_ >> _, fs, F())
f4d566db95107fdba89a334c2022e68ba0b42f82
3,652,983
def nanargmin(a, axis=None): """ Return the indices of the minimum values in the specified axis ignoring NaNs. For all-NaN slices ``ValueError`` is raised. Warning: the results cannot be trusted if a slice contains only NaNs and Infs. Parameters ---------- a : array_like Input data. axis : int, optional Axis along which to operate. By default flattened input is used. Returns ------- index_array : ndarray An array of indices or a single index value. See Also -------- argmin, nanargmax Examples -------- >>> a = np.array([[np.nan, 4], [2, 3]]) >>> np.argmin(a) 0 >>> np.nanargmin(a) 2 >>> np.nanargmin(a, axis=0) array([1, 1]) >>> np.nanargmin(a, axis=1) array([1, 0]) """ a, mask = _replace_nan(a, np.inf) res = np.argmin(a, axis=axis) if mask is not None: mask = np.all(mask, axis=axis) if np.any(mask): raise ValueError("All-NaN slice encountered") return res
6d2320bd38a96b364b752e2b50a204daf1f673e8
3,652,984
import re def check_right_flank(seq_right, list_rep, verbose=False): """ Check if start of right flank sequence contains last repetitive sequence. :param seq_right: str - right flank sequence :param list_rep: list(Repetition) - list of Repetitions(seq, num) :param verbose: bool - be verbose :return: seq_right - str - updated right flank sequence rep_list - list(Repetition) - updated list of Repetitions(seq, num) """ rep_list = deepcopy(list_rep) last = rep_list[-1] last_seq = re.sub('N', '.', last.seq) if re.match(last_seq, seq_right): if verbose: print('Repetitive sequence find in right flank region. Adding this sequence into repetitions.') while re.match(last_seq, seq_right): # cut repetitive sequence from flank sequence and add it to the list of repetitive sequences seq_right = seq_right[len(last_seq):] last.num += 1 return seq_right, rep_list
c4f5675756e5acd88e71ed713bcdcb5a6c2763a2
3,652,985
def readout_gg(_X, X, O): """ Graph Gathering implementation. (The none shown in the equation) _X: final node embeddings. X: initial node features. O: desired output dimension. """ val1 = dense(tf.concat([_X, X], axis=2), O, use_bias=True) val1 = tf.nn.sigmoid(val1) val2 = dense(_X, O, use_bias=True) out = tf.multiply(val1, val2) out = tf.reduce_sum(out, axis=1) out = tf.nn.relu(out) return out
f8f8bc2ed52bf72ba14325d6d55c3bc46e100f82
3,652,987
def plot_taylor_axes(axes, cax, option): """ Plot axes for Taylor diagram. Plots the x & y axes for a Taylor diagram using the information provided in the AXES dictionary returned by the GET_TAYLOR_DIAGRAM_AXES function. INPUTS: axes : data structure containing axes information for target diagram cax : handle for plot axes option : data structure containing option values. (Refer to GET_TAYLOR_DIAGRAM_OPTIONS function for more information.) option['colcor'] : CORs grid and tick labels color (Default: blue) option['colrms'] : RMS grid and tick labels color (Default: green) option['colstd'] : STD grid and tick labels color (Default: black) option['numberpanels'] : number of panels (quadrants) to use for Taylor diagram option['tickrms'] : RMS values to plot gridding circles from observation point option['titlecor'] : title for CORRELATION axis option['titlerms'] : title for RMS axis option['titlestd'] : title fot STD axis OUTPUTS: ax: returns a list of handles of axis labels Author: Peter A. Rochford Acorn Science & Innovation [email protected] Created on Dec 3, 2016 Author: Peter A. Rochford Symplectic, LLC www.thesymplectic.com [email protected] """ ax = [] axlabweight = "bold" if option["numberpanels"] == 1: # Single panel if option["titlestd"] == "on": ttt = plt.ylabel("test", fontsize=14) x = -0.15 * axes["rmax"] y = 0.8 * axes["rmax"] handle = plt.text( x, y, "Standard Deviation", rotation=90, color=option["colstd"], fontweight=axlabweight, fontsize=plt.get(ttt, "fontsize"), horizontalalignment="center", ) ax.append(handle) if option["titlecor"] == "on": pos1 = 45 DA = 15 lab = "Correlation Coefficient" c = np.fliplr([np.linspace(pos1 - DA, pos1 + DA, len(lab))])[0] dd = 1.1 * axes["rmax"] for ii, ith in enumerate(c): handle = plt.text( dd * np.cos(ith * np.pi / 180), dd * np.sin(ith * np.pi / 180), lab[ii], ) handle.set( rotation=ith - 90, color=option["colcor"], horizontalalignment="center", verticalalignment="bottom", fontsize=plt.get(ax[0], "fontsize"), fontweight=axlabweight, ) ax.append(handle) if option["titlerms"] == "on": pos1 = option["tickrmsangle"] + (180 - option["tickrmsangle"]) / 2 DA = 15 pos1 = 160 lab = "RMSD" c = np.fliplr([np.linspace(pos1 - DA, pos1 + DA, len(lab))])[0] # Find optimal placement of label itick = -1 ratio = 1.0 while ratio > 0.7: itick += 1 ratio = (option["axismax"] - option["tickrms"][itick]) / option[ "axismax" ] dd = 0.7 * option["tickrms"][itick] + 0.3 * option["tickrms"][itick + 1] # Write label in a circular arc for ii, ith in enumerate(c): xtextpos = axes["dx"] + dd * np.cos(ith * np.pi / 180) ytextpos = dd * np.sin(ith * np.pi / 180) handle = plt.text(xtextpos, ytextpos, lab[ii]) handle.set( rotation=ith - 90, color=option["colrms"], horizontalalignment="center", verticalalignment="top", fontsize=plt.get(ax[0], "fontsize"), fontweight=axlabweight, ) ax.append(handle) else: # Double panel if option["titlestd"] == "on": ttt = plt.ylabel("test", fontsize=14) x = 0 y = -0.15 * axes["rmax"] handle = plt.text( x, y, "Standard Deviation", rotation=0, color=option["colstd"], fontweight=axlabweight, fontsize=plt.get(ttt, "fontsize"), horizontalalignment="center", ) ax.append(handle) if option["titlecor"] == "on": pos1 = 90 DA = 25 lab = "Correlation Coefficient" c = np.fliplr([np.linspace(pos1 - DA, pos1 + DA, len(lab))])[0] dd = 1.1 * axes["rmax"] for ii, ith in enumerate(c): handle = plt.text( dd * np.cos(ith * np.pi / 180), dd * np.sin(ith * np.pi / 180), lab[ii], ) handle.set( rotation=ith - 90, color=option["colcor"], horizontalalignment="center", verticalalignment="bottom", fontsize=plt.get(ax[0], "fontsize"), fontweight=axlabweight, ) ax.append(handle) if option["titlerms"] == "on": pos1 = 160 DA = 10 lab = "RMSD" c = np.fliplr([np.linspace(pos1 - DA, pos1 + DA, len(lab))])[0] dd = 1.05 * option["tickrms"][0] for ii, ith in enumerate(c): xtextpos = axes["dx"] + dd * np.cos(ith * np.pi / 180) ytextpos = dd * np.sin(ith * np.pi / 180) handle = plt.text(xtextpos, ytextpos, lab[ii]) handle.set( rotation=ith - 90, color=option["colrms"], horizontalalignment="center", verticalalignment="bottom", fontsize=plt.get(ax[0], "fontsize"), fontweight=axlabweight, ) ax.append(handle) # VARIOUS ADJUSTMENTS TO THE PLOT: cax.set_aspect("equal") plt.axis("off") plt.gcf().patch.set_facecolor("w") # set axis limits if option["numberpanels"] == 2: axislim = [axes["rmax"] * x for x in [-1.15, 1.15, 0, 1.15]] plt.axis(axislim) plt.plot([-axes["rmax"], axes["rmax"]], [0, 0], color=axes["tc"], linewidth=2) plt.plot([0, 0], [0, axes["rmax"]], color=axes["tc"]) else: axislim = [axes["rmax"] * x for x in [0, 1.15, 0, 1.15]] plt.axis(axislim) plt.plot([0, axes["rmax"]], [0, 0], color=axes["tc"], linewidth=2) plt.plot([0, 0], [0, axes["rmax"]], color=axes["tc"], linewidth=2) return ax
69193482a954f1afbcac7b7934f4ce507050ce55
3,652,988
def list_timezones(): """Return a list of all time zones known to the system.""" l=[] for i in xrange(parentsize): l.append(_winreg.EnumKey(tzparent, i)) return l
c9fea053f6f86043065e0e3efc04a30dc5585b5d
3,652,989
import functools def keyword_decorator(deco): """Wrap a decorator to optionally takes keyword arguments.""" @functools.wraps(deco) def new_deco(fn=None, **kwargs): if fn is None: @functools.wraps(deco) def newer_deco(fn): return deco(fn, **kwargs) return newer_deco else: return deco(fn, **kwargs) return new_deco
5ffc100c4fbbf7657c974685ab70dfc903a4abe1
3,652,990
import numpy import math def quaternion_slerp(quat0, quat1, fraction, spin=0, shortestpath=True): """Return spherical linear interpolation between two quaternions. >>> q0 = random_quaternion() >>> q1 = random_quaternion() >>> q = quaternion_slerp(q0, q1, 0.0) >>> numpy.allclose(q, q0) True >>> q = quaternion_slerp(q0, q1, 1.0, 1) >>> numpy.allclose(q, q1) True >>> q = quaternion_slerp(q0, q1, 0.5) >>> angle = math.acos(numpy.dot(q0, q)) >>> numpy.allclose(2.0, math.acos(numpy.dot(q0, q1)) / angle) or \ numpy.allclose(2.0, math.acos(-numpy.dot(q0, q1)) / angle) True """ q0 = unit_vector(quat0[:4]) q1 = unit_vector(quat1[:4]) if fraction == 0.0: return q0 elif fraction == 1.0: return q1 d = numpy.dot(q0, q1) if abs(abs(d) - 1.0) < _EPS: return q0 if shortestpath and d < 0.0: # invert rotation d = -d q1 *= -1.0 angle = math.acos(d) + spin * math.pi if abs(angle) < _EPS: return q0 isin = 1.0 / math.sin(angle) q0 *= math.sin((1.0 - fraction) * angle) * isin q1 *= math.sin(fraction * angle) * isin q0 += q1 return q0
1895bdb60e6bce11a0cd3f659ceca2a83a0c4810
3,652,992
def doctest2md(lines): """ Convert the given doctest to a syntax highlighted markdown segment. """ is_only_code = True lines = unindent(lines) for line in lines: if not line.startswith('>>> ') and not line.startswith('... ') and line not in ['>>>', '...']: is_only_code = False break if is_only_code: orig = lines lines = [] for line in orig: lines.append(line[4:]) return lines
33ffaa31e7d7b578e1664707476b214bdc705346
3,652,993
def get_return_value(total, cash_given): """show how much money you owe to customer after they give you a bill.""" return Decimal(Decimal(total) - Decimal(cash_given)).quantize(Decimal('.01'))
3c4bc4819bc7c133b56881ea59cbd3d7a108b3dd
3,652,994
def show_predictions(scores, target='y', threshold=0.5, path_out=False, verbose=True, figsize=(7, 200)): """This function will plot which have been correctly classified. The input is single dict containing labels as keys and information on each model as values in the order [auc_score, ids_test, y_true, y_pred]. all_ids: List, IDs of all samples as strings. model_dict: Dict, containing model name as key and [auc_score, ids_test, y_true, y_pred] as value. path_out: String, path where to save plot. show: If True, show plot. """ all_ids = scores.index.tolist() N, M = scores.shape y_true = scores[target] # Set up figure to hold IDs vs model type f, id_fig = plt.subplots(figsize=figsize) id_fig.margins(0.01, 0.01) plt.ylabel('Samples (IDs)', fontsize=14) plt.xlabel('Models', fontsize=14) plt.title('Correctly classified samples', fontsize=20) plt.yticks(range(len(all_ids)), all_ids, fontsize=12) plt.grid(which='major', linestyle='dashed', linewidth=0.1) plt.rc('axes', axisbelow=True) cmap = plt.get_cmap('tab20', M) # Coordinates and legend counts = [0 for item in all_ids] how_many_correct = dict(zip(all_ids, counts)) all_ids = dict(zip(all_ids, list(range(len(all_ids))))) xticks = [] height = 0 legend = [] # Run through each model missing_counts = {} for col in scores.columns: if col != target: y_pred = scores[col].dropna(how='any') # Find correct IDs ids_test = [] pred_labels = [1 if v >= threshold else 0 for v in y_pred] for ID, true, pred in zip(y_pred.index, y_true, pred_labels): if true == round(pred): ids_test.append(ID) # Count item how_many_correct[ID] += 1 # Get correct classifications xticks.append(col) y = [all_ids[i] for i in ids_test] x = [height]*len(y) # Plot correct IDs plot_ids = id_fig.scatter(x=x, y=y, s=15, label=col) # Plot x for missing IDs missing = [] for ID in all_ids: if ID not in missing_counts.keys(): missing_counts[ID] = 0 if ID not in y_pred.index: missing.append(ID) missing_counts[ID] += 1 if len(missing) > 0: y = [all_ids[i] for i in missing] x = [height]*len(y) id_fig.scatter(x=x, y=y, marker='x', color='black') legend.append(height) height += 1 plt.xticks(legend, xticks, fontsize=12, rotation=90) plt.tight_layout() plt.show() if path_out: plt.savefig(path_out, dpi=1000, transparent=True) return how_many_correct, missing_counts
f113b485832623048c01c798c6ca059403b3fb75
3,652,996
def det(a, **kwargs): """ Compute the determinant of arrays, with broadcasting. Parameters ---------- a : (NDIMS, M, M) array Input array. Its inner dimensions must be those of a square 2-D array. Returns ------- det : (NDIMS) array Determinants of `a` See Also -------- slogdet : Another representation for the determinant, more suitable for large matrices where underflow/overflow may occur Notes ----- Numpy broadcasting rules apply. The determinants are computed via LU factorization using the LAPACK routine _getrf. Implemented for single, double, csingle and cdouble. Numpy conversion rules apply. Examples -------- The determinant of a 2-D array [[a, b], [c, d]] is ad - bc: >>> a = np.array([[1, 2], [3, 4]]) >>> np.allclose(-2.0, det(a)) True >>> a = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]] ]) >>> np.allclose(-2.0, det(a)) True """ return _impl.det(a, **kwargs)
8d7ccb0756375db749c5a21c8f54c301b37bfc28
3,652,997
def SegStart(ea): """ Get start address of a segment @param ea: any address in the segment @return: start of segment BADADDR - the specified address doesn't belong to any segment """ seg = idaapi.getseg(ea) if not seg: return BADADDR else: return seg.startEA
0ced19a5b868a605e61c28b97cf1de8b8a6c0d54
3,652,998
import json import inspect def update_school_term(request): """ 修改周期的开始时间和结束时间 :param request: :return: """ operation_object = None try: if request.method == 'POST': object_form = SchoolTermUpdateForm(request.POST) if object_form.is_valid(): update_id = object_form.cleaned_data.get('update_id', None) if update_id: school_terms = SchoolTerm.objects.filter(id=update_id) if len(school_terms) == 1: form_object_school_term = form_to_obj(object_form.cleaned_data, school_terms[0]) form_object_school_term.save() operation_object = form_object_school_term result['status'] = True result['message'] = '修改成功!' result['data'] = json.dumps({}, ensure_ascii=False) else: result['status'] = False result['message'] = '系统异常,请稍后尝试或联系管理员!' result['data'] = '' else: print(type(object_form.errors), object_form.errors) # errors类型是ErrorDict,里面是ul,li标签 result['status'] = False result['message'] = '系统异常,请稍后尝试或联系管理员!错误提示:' + type(object_form.errors) + "," + object_form.errors result['data'] = '' else: result['status'] = False result['message'] = '系统异常,请稍后尝试或联系管理员!' result['data'] = '' except Exception as e: result['status'] = False result['message'] = "系统异常:" + str(e) result['data'] = '' result["level"] = log_level_edit save_operation_log(request, inspect.stack()[0][3], operation_object.__str__(True), result) return JsonResponse(result, safe=False)
850bcaa39a1a0668f017dc90224e766b2b336a37
3,652,999
def chown( path: Pathable, owner: str, flags: t.Optional[str] = None, sudo: bool = False ) -> ChangeList: """Change a path's owner.""" path = _to_path(path) needs_sudo_w = need_sudo_to_write(path) needs_sudo_r = need_sudo_to_read(path) if needs_sudo_r and not sudo: raise NeedsSudoException(f"chown {path}") curr_owner = _run( f"stat -c '%U:%G' {path}", check=True, sudo=needs_sudo_r ).stdout.decode.strip() if ":" not in curr_owner: curr_owner = curr_owner.split(":", 1)[0] if curr_owner != owner: if needs_sudo_w and not sudo: raise NeedsSudoException(f"chown {owner} {path}") _run(f"chown {flags} {owner}", sudo=needs_sudo_w, check=True) return [cl(ChownModify, path, owner, curr_owner, flags)] return []
124ba2877dc2ff4396d84c3b8825846c9d057cf5
3,653,000
def metric_dist(endclasses, metrics='all', cols=2, comp_groups={}, bins=10, metric_bins={}, legend_loc=-1, xlabels={}, ylabel='count', title='', indiv_kwargs={}, figsize='default', v_padding=0.4, h_padding=0.05, title_padding=0.1, **kwargs): """ Plots the histogram of given metric(s) separated by comparison groups over a set of scenarios Parameters ---------- endclasses : dict Dictionary of metrics with structure {'scen':{'metric':value}} metrics : list, optional list of metrics in the dictionary to plot cols : int, optional columns to use in the figure. The default is 2. comp_groups : dict, optional Dictionary for comparison groups (if more than one) with structure: {'group1':('scen1', 'scen2'), 'group2':('scen3', 'scen4')} Default is {} If a legend is shown, group names are used as labels. bins : int Number of bins to use (for all plots). Default is None metric_bins : dict, Dictionary of number of bins to use for each metric with structure {'metric':num} Default is {} legend_loc : int, optional Specifies the plot to place the legend on, if runs are being compared. Default is -1 (the last plot) To remove the legend, give a value of False xlabels : dict, optional Label for the x-axes with structure {'metric':'label'} ylabel : str, optional Label for the y-axes. Default is 'time' title : str, optional overall title for the plot. Default is '' indiv_kwargs : dict, optional dict of kwargs with structure {comp1:kwargs1, comp2:kwargs2}, where where kwargs is an individual dict of keyword arguments for the comparison group comp (or scenario, if not aggregated) which overrides the global kwargs (or default behavior). figsize : tuple (float,float) x-y size for the figure. The default is 'default', which dymanically gives 3 for each column and 2 for each row v_padding : float vertical padding between subplots as a fraction of axis height h_padding : float horizontal padding between subplots as a fraction of axis width title_padding : float padding for title as a fraction of figure height **kwargs : kwargs keyword arguments to mpl.hist e.g. bins, etc """ #Sort into comparison groups if not comp_groups: groupmetrics = {'default':endclasses} else: groupmetrics = {group:{ec:cl for ec,cl in endclasses.items() if ec in groupscens} for group, groupscens in comp_groups.items()} template = [*endclasses.values()][0] if metrics=='all': plot_values = [i for i in template.keys()] else: plot_values = [i for i in template.keys() if i in metrics] num_plots = len(plot_values) if num_plots==1: cols=1 rows = int(np.ceil(num_plots/cols)) if figsize=='default': figsize=(cols*3, 2*rows) fig, axs = plt.subplots(rows,cols, sharey=True, sharex=False, figsize=figsize) if type(axs)==np.ndarray: axs = axs.flatten() else: axs=[axs] num_bins = bins for i, plot_value in enumerate(plot_values): ax = axs[i] xlabel = xlabels.get(plot_value, plot_value) if type(xlabel)==str: ax.set_xlabel(xlabel) else: ax.set_xlabel(' '.join(xlabel)) ax.grid(axis='y') fulldata = [ec[plot_value] for endc in groupmetrics.values() for ec in endc.values()] bins = np.histogram(fulldata, metric_bins.get(plot_value, num_bins))[1] if not i%cols: ax.set_ylabel(ylabel) for group, endclasses in groupmetrics.items(): local_kwargs = {**kwargs, **indiv_kwargs.get(group,{})} x = [ec[plot_value] for ec in endclasses.values()] ax.hist(x, bins, label=group, **local_kwargs) multiplot_legend_title(groupmetrics, axs, ax, legend_loc, title,v_padding, h_padding, title_padding) return fig, axs
95bbc645abad812585de58d4724787e310424f4a
3,653,001
def get_colors(df, colormap=None, vmin=None, vmax=None, axis=1): """ Function to automatically gets a colormap for all the values passed in, Have the option to normalise the colormap. :params: values list(): list of int() or str() that have all the values that need a color to be map to. In case of a list() of str(), the try/except use the range(len()) to map a colour colormap cm(): type of colormap that need to be used. All can be found here: https://matplotlib.org/examples/color/colormaps_reference.html vmin, vmax int(): Number to normalise the return of the colourmap if needed a Normalised colourmap :return: colormap cm.colormap(): An array of RGBA values Original version found on stackerOverflow (w/o the try/except) but cannot find it back """ if colormap is None: colormap = plt.cm.RdBu if axis == 0: values = df.index elif axis == 1: values = df.columns norm = plt.Normalize(vmin, vmax) try: return colormap(norm(values)) except (AttributeError, TypeError): # May happen when gives a list of categorical values return colormap(norm(range(len(values))))
7da0c0a8f8542c9a8137121c4664da91485d8cca
3,653,002
def proxy_channels(subreddits): """ Helper function to proxy submissions and posts. Args: subreddits (list of praw.models.Subreddit): A list of subreddits Returns: list of ChannelProxy: list of proxied channels """ channels = { channel.name: channel for channel in Channel.objects.filter( name__in=[subreddit.display_name for subreddit in subreddits] ) } return [ ChannelProxy(subreddit, channels[subreddit.display_name]) for subreddit in subreddits if subreddit.display_name in channels ]
caab3ecfa5a85b06d94192fe77308724f67b0e96
3,653,003
def anno2map(anno): """ anno: { 'file' ==> file index 'instances': [ { 'class_name': 'class_idx': 'silhouette': 'part': [(name, mask), ...] }, ... ] } """ height, width = anno.instances[0].silhouette.shape cls_mask = np.zeros((height, width), dtype=np.uint8) inst_mask = np.zeros((height, width), dtype=np.uint8) part_mask = np.zeros((height, width), dtype=np.uint8) for i, inst in enumerate(anno.instances): assert height == inst.silhouette.shape[0] and width == inst.silhouette.shape[1] cls_mask[inst.silhouette.astype(np.bool)] = inst.class_idx inst_mask[inst.silhouette.astype(np.bool)] = i for pname, pmask in inst.part: assert pname in PASCAL_PART2ID_[inst.class_idx-1], f'The part {pname} is undefined in {inst.class_name}' assert inst.silhouette[pmask.astype(np.bool)].all(), 'The part region is not a complete subregion of the object' # if not inst.silhouette[pmask].all(): # print(f'Warning: [{anno.file}: {pname}] The part region is not a complete subregion of the object') pid = PASCAL_PART2ID_[inst.class_idx-1][pname] part_mask[pmask.astype(np.bool)] = pid return cls_mask, inst_mask, part_mask
18841b323d4368c5f1681dd34586e82aa8a9d97c
3,653,004
def string_to_bool(val: str): """Convert a homie string bool to a python bool""" return val == STATE_ON
f7fc9768762256fc5c2cf818949793f72948db98
3,653,005