content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def remove_rule(rule_id): """Remove a single rule""" ruleset = packetfilter.get_ruleset() ruleset.remove(rule_id) packetfilter.load_ruleset(ruleset) save_pfconf(packetfilter) return redirect(url_for('rules', message=PFWEB_ALERT_SUCCESS_DEL), code=302)
fe45a3d5af532ff67e8aef21ab093438818c6dbc
6,749
def HexToMPDecimal(hex_chars): """ Convert bytes to an MPDecimal string. Example \x00 -> "aa" This gives us the AppID for a chrome extension. """ result = '' base = ord('a') for i in xrange(len(hex_chars)): value = ord(hex_chars[i]) dig1 = value / 16 dig2 = value % 16 result += chr(dig1 + base) result += chr(dig2 + base) return result
5d81c0e1ee3f4f94e615578e132377b803beb47b
6,750
def fit_growth_curves(input_file, file_data_frame, file_data_units, condition_unit, time_unit, cell_density_unit): """ :Authors: Chuankai Cheng <[email protected]> and J. Cameron Thrash <[email protected]> :License: MIT :Version: 1.0 :Date: 2021-03-17 :Repository: https://github.com/thrash-lab/sparse-growth-curve """ output_data_indices=file_data_frame.groupby( ['Strain','Replicate','Condition'] ).size().reset_index().rename(columns={0:'count'} )[['Strain','Replicate','Condition']] strains_conditions=output_data_indices.groupby(['Strain','Condition'] ).size().reset_index()[['Strain','Condition']] output_data_indices['Growth: Doubling rate']=0 output_data_indices['Death: Doubling rate']=0 output_data_indices=output_data_indices.astype(object) output_data_indices=output_data_indices.sort_values(by=['Strain','Condition']) strains=np.unique(strains_conditions['Strain']) row_num=len(strains) col_num=np.int(np.ceil(len(strains_conditions)/len(strains))) plt.figure(figsize=(col_num*2+1, row_num*2+1)) plot_j=1 previous_condition=output_data_indices['Condition'].values[0] plt.subplot(row_num, col_num, plot_j) color_i=0 plt.title(str(output_data_indices['Strain'].values[0])+'\n' +str(output_data_indices['Condition'].values[0])+' ' +condition_unit) plt.ylabel(cell_density_unit) plt.xlabel(time_unit) for i in output_data_indices.index: target_gr_index=output_data_indices.loc[i] target_growth_curve_df = file_data_frame[ (file_data_frame['Strain']==target_gr_index['Strain'])& (file_data_frame['Condition']==target_gr_index['Condition']) & (file_data_frame['Replicate']==target_gr_index['Replicate'])] #print('\n\nStrain:', target_gr_index['Strain'], # '\t Condition:',str(target_gr_index['Condition'])+' '+condition_unit, # '\t Replicate:',str(target_gr_index['Replicate'])) time=target_growth_curve_df.loc[:,'Time'].values cell_density=target_growth_curve_df.loc[:,'Cell density'].values #print('time=', time) #print('cell density=', 'cell_density') if target_gr_index['Condition']!=previous_condition: plt.yscale('log') plt.ylim(10**np.floor(np.log10(np.min(file_data_frame['Cell density']))-1), 10**np.ceil(np.log10(np.max(file_data_frame['Cell density']))+1)) plt.legend() #plt.xlim(np.floor(np.min(file_data_frame['Time'])), # np.ceil(np.max(file_data_frame['Time']))) color_i=0 plot_j+=1 plt.subplot(row_num, col_num, plot_j) plt.title(str(target_gr_index['Strain'])+'\n' +str(target_gr_index['Condition'])+' ' +condition_unit) plt.ylabel(cell_density_unit) plt.xlabel(time_unit) if len(cell_density)>4: (all_fit_time, all_fit_cell_density, all_fit_conf_band, selected_doubling_rate, selected_fit_time, selected_fit_cell_density, selected_doubling_rate_d, selected_fit_time_d, selected_fit_cell_density_d)=fit_growth_curve( time, cell_density, one_order=10, decision_tree_depth=1) output_data_indices.loc[i,'Growth: Doubling rate']=selected_doubling_rate output_data_indices.loc[i,'Death: Doubling rate']=selected_doubling_rate_d for k in range(len(all_fit_time)): #plt.plot(all_fit_time[i], all_fit_cell_density[i], 'k--') #plt.fill_between(all_fit_time[k], # all_fit_cell_density[k]*(all_fit_conf_band[k]), # all_fit_cell_density[k]/(all_fit_conf_band[k]), # color=colormap(color_i), alpha=0.1) plt.plot(selected_fit_time, selected_fit_cell_density, '-', color=colormap(color_i), linewidth=2) plt.plot(selected_fit_time_d, selected_fit_cell_density_d, '--', color=colormap(color_i), linewidth=1) elif len(cell_density)>2: x=time y=np.log2(cell_density) x_fit = np.arange(0.0, x[-1], 0.01)[:, np.newaxis] (doubling_rate, pre_y, ci) = myLinearRegression_CB(x, y, x_fit, one_order=10) #plt.fill_between(x_fit, # pre_y*ci, # pre_y/ci, # color=colormap(color_i), alpha=0.1) if doubling_rate>0: output_data_indices.loc[i,'Growth: Doubling rate']=doubling_rate plt.plot(x_fit, pre_y, '-', color=colormap(color_i), linewidth=2) else: output_data_indices.loc[i,'Death: Doubling rate']=doubling_rate plt.plot(x_fit, pre_y, '--', color=colormap(color_i), linewidth=1) elif len(cell_density)==2: x=time y=np.log2(cell_density) doubling_rate=(y[1]-y[0])/(x[1]-x[0]) output_data_indices.loc[i,'Growth: Doubling rate']=doubling_rate if doubling_rate>0: output_data_indices.loc[i,'Growth: Doubling rate']=doubling_rate plt.plot(x, y, '-', color=colormap(color_i), linewidth=2) else: output_data_indices.loc[i,'Death: Doubling rate']=doubling_rate plt.plot(x, y, '--', color=colormap(color_i), linewidth=1) plt.plot(time, cell_density,'o',alpha=0.3, color=colormap(color_i), label=output_data_indices.loc[i]['Replicate']) color_i+=1 previous_condition=output_data_indices.loc[i]['Condition'] plt.yscale('log') plt.ylim(10**np.floor(np.log10(np.min(file_data_frame['Cell density']))-1), 10**np.ceil(np.log10(np.max(file_data_frame['Cell density']))+1)) #plt.xlim(np.floor(np.min(file_data_frame['Time'])), # np.ceil(np.max(file_data_frame['Time']))) plt.legend() plt.tight_layout() output_file_string=(output_folder+ '/'+input_file+ '/1_Data_fit_visualization_'+ dt_string+'.pdf') plt.savefig(output_file_string) print('output file saved:'+output_file_string) return output_data_indices
d551a094ea398e09dcc88f8f9668922b3e665317
6,751
def stringify(li,delimiter): """ Converts list entries to strings and joins with delimiter.""" string_list = map(str,li) return delimiter.join(string_list)
a4c35a19d8ea654a802cd3f92ababcbdfdf0ecfb
6,752
def norm_w(x, w): """ Compute sum_i( w[i] * |x[i]| ). See p. 7. """ return (w * abs(x)).sum()
a9825750cb6ee0bbbe87b0c4d1bd132bcfca90db
6,753
def _tensor_run_opt_ext(opt, momentum, learning_rate, gradient, weight, moment): """Apply momentum optimizer to the weight parameter using Tensor.""" success = True success = F.depend(success, opt(weight, moment, learning_rate, gradient, momentum)) return success
89ae490ba0f05455dff03bcd57d4b6f52f7d8327
6,754
from typing import Dict import yaml def get_config_settings(env: str = "dev") -> Dict: """ Retrieves configuration from YAML file """ config_fh = construct_config_path(env) with open(config_fh, "r") as f: data = yaml.safe_load(f) return data
190a7f8cb2a297ee4ae6d5734d4d9f521a18bb3f
6,755
def get_all(connection: ApiConnection, config: str, section: str = None) -> dict: """Get all sections of a config or all values of a section. :param connection: :param config:UCI config name :param section:[optional] UCI section name :return: JSON RPC response result """ return request(connection, 'uci', 'get_all', config, section)
be4a76f87398ce1d4e0765314266647867964e39
6,756
def load_module(module, app): """Load an object from a Python module In: - ``module`` -- name of the module - ``app`` -- name of the object to load Return: - (the object, None) """ r = __import__(module, fromlist=('',)) if app is not None: r = getattr(r, app) return r, None
858d9d0bf91ff7d83ad391218b8ff1b37007b43b
6,757
def get_routes(app: web.Application) -> list: """ Get the full list of defined routes """ return get_standard_routes(app) + get_custom_routes(app)
7f5d365c28ee45096e089ee6913d3aec4d8214d8
6,758
def cb_round(series: pd.Series, base: Number = 5, sig_dec: int = 0): """ Returns the pandas series (or column) with values rounded per the custom base value Args: series (pd.Series): data to be rounded base (float): base value to which data should be rounded (may be decimal) sig_dec (int): number of significant decimals for the custom-rounded value Returns: pd.Series """ valid.validate_array(series, "series", expected_len=None) if not base >= 0.01: err = f"cannot round with base {base}." + "cb_round designed for base >= 0.01." raise ValueError(err) result = series.apply(lambda x: round(base * round(float(x) / base), sig_dec)) return result
29599898fa8686c260e89d2efcdcceec108d5b4c
6,759
def makeGaussian(size, sigma=3, center=None): """ Make a square gaussian kernel. size is the length of a side of the square fwhm is full-width-half-maximum, which can be thought of as an effective radius. """ x = np.arange(0, size, 1, float) y = x[:, np.newaxis] if center is None: x0 = y0 = size // 2 else: x0 = center[0] y0 = center[1] return np.exp(-((x - x0) ** 2 + (y - y0) ** 2) / (2.0 * sigma ** 2)) # return np.exp(-4 * np.log(2) * ((x - x0) ** 2 + (y - y0) ** 2) / sigma ** 2)
8efef3cc265375d5412107a465a97380e8c4d101
6,760
import torch def average_relative_error(y_pred, y_true): """Calculate Average Relative Error Args: y_true (array-like): np.ndarray or torch.Tensor of dimension N x d with actual values y_pred (array-like): np.ndarray or torch.Tensor of dimension N x d with predicted values Returns: float: Average Relative Mean Squared Error Raises: ValueError : If Parameters are not both of type np.ndarray or torch.Tensor """ if isinstance(y_true, np.ndarray) and isinstance(y_pred, np.ndarray): return sum(sum(abs(y_true - y_pred) / y_true) / len(y_true)) / len(y_true[0, :]) elif isinstance(y_true, torch.Tensor) and isinstance(y_pred, torch.Tensor): return torch.sum(torch.sum(torch.abs(y_true - y_pred) / y_true, dim=0) / len(y_true)) / len(y_true[0, :]) else: raise ValueError( 'y_true and y_pred must be both of type numpy.ndarray or torch.Tensor')
2243eb82c78ff03181be3c10d50c3aa000e8476c
6,761
from unittest.mock import Mock def make_subprocess_hook_mock(exit_code: int, output: str) -> Mock: """Mock a SubprocessHook factory object for use in testing. This mock allows us to validate that the RenvOperator is executing subprocess commands as expected without running them for real. """ result_mock = Mock() result_mock.exit_code = exit_code result_mock.output = output hook_instance_mock = Mock() hook_instance_mock.run_command = Mock(return_value=result_mock) hook_factory_mock = Mock(return_value=hook_instance_mock) return hook_factory_mock
a047608503be8bc7fc4b782139e7d12145efb3cd
6,762
def binstr2int(bin_str: str) -> int: """转换二进制形式的字符串为10进制数字, 和int2binstr相反 Args: bin_str: 二进制字符串, 比如: '0b0011'或'0011' Returns: 转换后的10进制整数 """ return int(bin_str, 2)
87c6ac16c2215e533cb407407bef926ed8668e3e
6,763
def _nodeset_compare(compare, a, b, relational=False): """ Applies a comparison function to node-sets a and b in order to evaluate equality (=, !=) and relational (<, <=, >=, >) expressions in which both objects to be compared are node-sets. Returns an XPath boolean indicating the result of the comparison. """ if isinstance(a, Types.NodesetType) and isinstance(b, Types.NodesetType): # From XPath 1.0 Section 3.4: # If both objects to be compared are node-sets, then the comparison # will be true if and only if there is a node in the first node-set # and a node in the second node-set such that the result of # performing the comparison on the string-values of the two nodes # is true. if not (a and b): # One of the two node-sets is empty. In this case, according to # section 3.4 of the XPath rec, no node exists in one of the two # sets to compare, so *any* comparison must be false. return boolean.false # If it is a relational comparison, the actual comparison is done on # the string value of each of the nodes. This means that the values # are then converted to numbers for comparison. if relational: # NumberValue internally coerces a node to a string before # converting it to a number, so the "convert to string" clause # is handled. coerce = Conversions.NumberValue else: coerce = Conversions.StringValue # Convert the nodesets into lists of the converted values. a = map(coerce, a) b = map(coerce, b) # Now compare the items; if any compare True, we're done. for left in a: for right in b: if compare(left, right): return boolean.true return boolean.false # From XPath 1.0 Section 3.4: # If one object to be compared is a node-set and the other is a number, # then the comparison will be true if and only if there is a node in the # node-set such that the result of performing the comparison on the # number to be compared and on the result of converting the string-value # of that node to a number using the number function is true. If one # object to be compared is a node-set and the other is a string, then the # comparison will be true if and only if there is a node in the node-set # such that the result of performing the comparison on the string-value # of the node and the other string is true. If one object to be compared # is a node-set and the other is a boolean, then the comparison will be # true if and only if the result of performing the comparison on the # boolean and on the result of converting the node-set to a boolean using # the boolean function is true. # # (In other words, coerce each node to the same type as the other operand, # then compare them. Note, however, that relational comparisons convert # their operands to numbers.) if isinstance(a, Types.NodesetType): # a is nodeset if isinstance(b, Types.BooleanType): a = Conversions.BooleanValue(a) return compare(a, b) and boolean.true or boolean.false elif relational: b = Conversions.NumberValue(b) coerce = Conversions.NumberValue elif isinstance(b, Types.NumberType): coerce = Conversions.NumberValue else: b = Conversions.StringValue(b) coerce = Conversions.StringValue for node in a: if compare(coerce(node), b): return boolean.true else: # b is nodeset if isinstance(a, Types.BooleanType): b = Conversions.BooleanValue(b) return compare(a, b) and boolean.true or boolean.false elif relational: a = Conversions.NumberValue(a) coerce = Conversions.NumberValue elif isinstance(a, Types.NumberType): coerce = Conversions.NumberValue else: a = Conversions.StringValue(a) coerce = Conversions.StringValue for node in b: if compare(a, coerce(node)): return boolean.true return boolean.false
5751b793662689a1e0073cfe5fc4b86505952dcd
6,764
def test_cl_shift(options): """ Create tests for centerline shifts 8 out of 8 points are on one side of the mean >= 10 out of 11 points are on one side of the mean >= 12 out of 14 points are on one side of the mean >= 14 out of 17 points are on one side of the mean >= 16 out of 20 points are on one side of the mean """ windows = [ (8, Window(8, init=options.m)), (10, Window(11, init=options.m)), (12, Window(14, init=options.m)), (14, Window(17, init=options.m)), (16, Window(20, init=options.m)), ] cl = options.m def test(x): for n, w in windows: w.append(x) if np.sum(w.data > cl) >= n: err_out("%s is %g/%g points > centerline" % (w.data, n, w.n)) elif np.sum(w.data < cl) >= n: err_out("%s is %g/%g points < centerline" % (w.data, n, w.n)) return test
60d874c8b1484ff23c4791043eae949912a969d0
6,765
def _scale(tensor): """Scale a tensor based on min and max of each example and channel Resulting tensor has range (-1, 1). Parameters ---------- tensor : torch.Tensor or torch.autograd.Variable Tensor to scale of shape BxCxHxW Returns ------- Tuple (scaled_tensor, min, max), where min and max are tensors containing the values used for normalizing the tensor """ b, c, h, w = tensor.shape out = tensor.view(b, c, h * w) minimum, _ = out.min(dim=2, keepdim=True) out = out - minimum maximum, _ = out.max(dim=2, keepdim=True) out = out / maximum # out has range (0, 1) out = out * 2 - 1 # out has range (-1, 1) return out.view(b, c, h, w), minimum, maximum
64eed9bd70c543def6456f3af89fa588ec35bca8
6,766
def get_moscow_oh(opening_hours): """ returns an OpeningHourBlock from a fake json corresponding to a POI located in moscow city for different opening_hours formats. """ return get_oh_block(opening_hours, lat=55.748, lon=37.588, country_code="RU")
42f795e262753cc82d8689c2a98e6a82e143a2c3
6,767
def get_firebase_credential_errors(credentials: str): """ Wrapper to get error strings for test_firebase_credential_errors because otherwise the code is gross. Returns None if no errors occurred. """ try: test_firebase_credential_errors(credentials) return None except Exception as e: return str(e)
fbca79e837a3d6dc85ee90bfd426008c6ce25ac2
6,768
def url(endpoint, path): """append the provided path to the endpoint to build an url""" return f"{endpoint.rstrip('/')}/{path}"
dee733845984bfc4cf5728e9614cce08d19a2936
6,769
def is_collision(line_seg1, line_seg2): """ Checks for a collision between line segments p1(x1, y1) -> q1(x2, y2) and p2(x3, y3) -> q2(x4, y4) """ def on_segment(p1, p2, p3): if (p2[0] <= max(p1[0], p3[0])) & (p2[0] >= min(p1[0], p3[0])) & (p2[1] <= max(p1[1], p3[1])) & (p2[1] >= min(p1[1], p3[1])): return True return False def orientation(p1, p2, p3): val = ((p2[1] - p1[1]) * (p3[0] - p2[0])) - ((p2[0] - p1[0]) * (p3[1] - p2[1])) if val == 0: return 0 elif val > 0: return 1 elif val < 0: return 2 p1, q1 = line_seg1[0], line_seg1[1] p2, q2 = line_seg2[0], line_seg2[1] o1 = orientation(p1, q1, p2) o2 = orientation(p1, q1, q2) o3 = orientation(p2, q2, p1) o4 = orientation(p2, q2, q1) if (o1 != o2) & (o3 != o4): return True if (o1 == 0 & on_segment(p1, p2, q1)): return True if (o2 == 0 & on_segment(p1, q2, q1)): return True if (o3 == 0 & on_segment(p2, p1, q2)): return True if (o4 == 0 & on_segment(p2, q1, q2)): return True return False
17dba61faebe50336cbc2cd2cc56c49474db5431
6,770
import numpy def plot_bar_graph_one_time( example_table_xarray, time_index, predictor_indices, info_string=None, figure_object=None, axes_object=None): """Plots predictors at one time as bar graph. :param example_table_xarray: xarray table in format returned by `example_io.read_file`. :param time_index: Index of valid time to plot. :param predictor_indices: 1-D numpy array with indices of predictors to plot. :param info_string: Info string (to be appended to title). :param figure_object: Will plot on this figure (instance of `matplotlib.figure.Figure`). If None, will create new figure. :param axes_object: Will plot on these axes (instance of `matplotlib.axes._subplots.AxesSubplot`). If None, will create new axes. :return: figure_object: See input doc. :return: axes_object: See input doc. :return: pathless_output_file_name: Pathless name for output file. """ error_checking.assert_is_integer(time_index) error_checking.assert_is_geq(time_index, 0) error_checking.assert_is_integer_numpy_array(predictor_indices) error_checking.assert_is_geq_numpy_array(predictor_indices, 0) if info_string is not None: error_checking.assert_is_string(info_string) xt = example_table_xarray predictor_values = ( xt[example_utils.SATELLITE_PREDICTORS_UNGRIDDED_KEY].values[ time_index, predictor_indices ] ) num_predictors = len(predictor_values) y_coords = numpy.linspace( 0, num_predictors - 1, num=num_predictors, dtype=float ) if figure_object is None or axes_object is None: figure_object, axes_object = pyplot.subplots( 1, 1, figsize=(FIGURE_WIDTH_INCHES, FIGURE_HEIGHT_INCHES) ) axes_object.barh( y_coords, predictor_values, color=BAR_FACE_COLOUR, edgecolor=BAR_EDGE_COLOUR, linewidth=BAR_EDGE_WIDTH ) pyplot.yticks([], []) axes_object.set_xlim(MIN_NORMALIZED_VALUE, MAX_NORMALIZED_VALUE) predictor_names = xt.coords[ example_utils.SATELLITE_PREDICTOR_UNGRIDDED_DIM ].values[predictor_indices].tolist() for j in range(num_predictors): axes_object.text( 0, y_coords[j], predictor_names[j], color=BAR_FONT_COLOUR, horizontalalignment='center', verticalalignment='center', fontsize=BAR_FONT_SIZE, fontweight='bold' ) valid_time_unix_sec = ( xt.coords[example_utils.SATELLITE_TIME_DIM].values[time_index] ) valid_time_string = time_conversion.unix_sec_to_string( valid_time_unix_sec, TIME_FORMAT_SECONDS ) cyclone_id_string = xt[satellite_utils.CYCLONE_ID_KEY].values[time_index] if not isinstance(cyclone_id_string, str): cyclone_id_string = cyclone_id_string.decode('utf-8') title_string = 'Satellite for {0:s} at {1:s}'.format( cyclone_id_string, valid_time_string ) if info_string is not None: title_string += '; {0:s}'.format(info_string) axes_object.set_title(title_string) pathless_output_file_name = '{0:s}_{1:s}_scalar_satellite.jpg'.format( cyclone_id_string, valid_time_string ) return figure_object, axes_object, pathless_output_file_name
5b1faab11bd6e79bd617ca23a8f49aeb83de2aae
6,771
def reshape_nda_to_2d(arr) : """Reshape np.array to 2-d """ sh = arr.shape if len(sh)<3 : return arr arr.shape = (arr.size/sh[-1], sh[-1]) return arr
11c721b938e45fd07d2ed1674a569e6836913ff3
6,772
async def async_setup(hass, config): """Initialize the DuckDNS component.""" domain = config[DOMAIN][CONF_DOMAIN] token = config[DOMAIN][CONF_ACCESS_TOKEN] session = async_get_clientsession(hass) result = await _update_duckdns(session, domain, token) if not result: return False async def update_domain_interval(now): """Update the DuckDNS entry.""" await _update_duckdns(session, domain, token) async def update_domain_service(call): """Update the DuckDNS entry.""" await _update_duckdns(session, domain, token, txt=call.data[ATTR_TXT]) async_track_time_interval(hass, update_domain_interval, INTERVAL) hass.services.async_register( DOMAIN, SERVICE_SET_TXT, update_domain_service, schema=SERVICE_TXT_SCHEMA ) return result
7208d0a25b219b6decbae314618e219705224a5a
6,773
def mock_signal(*args): """Mock creation of a binary signal array. :return: binary array :rtype: np.ndarray """ signal = np.array([1, 0, 1]) return signal
ebeb1f40a43c2c51d941208da78e0bfc0acb6530
6,774
def matmul(a, b): """np.matmul defaults to bfloat16, but this helper function doesn't.""" return np.matmul(a, b, precision=jax.lax.Precision.HIGHEST)
a4efb25933a25067b0b37ada8271f09b76929cb8
6,775
def predictions(logit_1, logit_2, logit_3, logit_4, logit_5): """Converts predictions into understandable format. For example correct prediction for 2 will be > [2,10,10,10,10] """ first_digits = np.argmax(logit_1, axis=1) second_digits = np.argmax(logit_2, axis=1) third_digits = np.argmax(logit_3, axis=1) fourth_digits = np.argmax(logit_4, axis=1) fifth_digits = np.argmax(logit_5, axis=1) stacked_digits = np.vstack((first_digits, second_digits, third_digits, fourth_digits, fifth_digits)) rotated_digits = np.rot90(stacked_digits)[::-1] return rotated_digits
99e22cc4808634e6510196f2e9e79cba9dafd61c
6,777
def execute_parent(parent_path, child_path, input_tensor_npy, return_full_ctx=False): """Execute parent model containing a single StreamingDataflowPartition by replacing it with the model at child_path and return result.""" parent_model = load_test_checkpoint_or_skip(parent_path) iname = parent_model.graph.input[0].name oname = parent_model.graph.output[0].name sdp_node = parent_model.get_nodes_by_op_type("StreamingDataflowPartition")[0] sdp_node = getCustomOp(sdp_node) sdp_node.set_nodeattr("model", child_path) ret = execute_onnx(parent_model, {iname: input_tensor_npy}, True) if return_full_ctx: return ret else: return ret[oname]
2757d22c46ee89f34cc89c702393d4a42d275c28
6,778
def fibonacci(position): """ Based on a position returns the number in the Fibonacci sequence on that position """ if position == 0: return 0 elif position == 1: return 1 return fibonacci(position-1)+fibonacci(position-2)
cc4fe0860fa97234ead2179e18d208a8567e0cb3
6,780
def visualize_gebco(source, band, min=None, max=None): """ Specialized function to visualize GEBCO data :param source: String, Google Earth Engine image id :param band: String, band of image to visualize :return: Dictionary """ data_params = deepcopy(DATASETS_VIS[source]) # prevent mutation of global state if min is not None: data_params["bathy_vis_params"]["min"] = min if max is not None: data_params["topo_vis_params"]["max"] = max image = ee.Image(source) gebco = image.select(data_params["bandNames"][band]) land_mask = LANDMASK hillshaded = visualize_elevation( image=gebco, land_mask=land_mask, data_params=data_params, bathy_only=False, hillshade_image=True, ) url = _get_gee_url(hillshaded) info = {} info["dataset"] = "gebco" info["band"] = band linear_gradient = [] palette = ( data_params["bathy_vis_params"]["palette"] + data_params["topo_vis_params"]["palette"] ) n_colors = len(palette) offsets = np.linspace(0, 100, num=n_colors) for color, offset in zip(palette, offsets): linear_gradient.append( {"offset": "{:.3f}%".format(offset), "opacity": 100, "color": color} ) info.update( { "url": url, "linearGradient": linear_gradient, "min": data_params["bathy_vis_params"]["min"], "max": data_params["topo_vis_params"]["max"], "imageId": source, } ) return info
ccd382f1e1ede4cbe58bca6fc7eec15aa1b0a85a
6,781
import asyncio import functools def bound_concurrency(size): """Decorator to limit concurrency on coroutine calls""" sem = asyncio.Semaphore(size) def decorator(func): """Actual decorator""" @functools.wraps(func) async def wrapper(*args, **kwargs): """Wrapper""" async with sem: return await func(*args, **kwargs) return wrapper return decorator
030e4dea0efccf9d5f2cbe4a40f3e6f32dfef846
6,783
import socket def _download_file(url, required_length, STRICT_REQUIRED_LENGTH=True): """ <Purpose> Given the url, hashes and length of the desired file, this function opens a connection to 'url' and downloads the file while ensuring its length and hashes match 'required_hashes' and 'required_length'. tuf.util.TempFile is used instead of regular tempfile object because of additional functionality provided by 'tuf.util.TempFile'. <Arguments> url: A URL string that represents the location of the file. required_length: An integer value representing the length of the file. STRICT_REQUIRED_LENGTH: A Boolean indicator used to signal whether we should perform strict checking of required_length. True by default. We explicitly set this to False when we know that we want to turn this off for downloading the timestamp metadata, which has no signed required_length. <Side Effects> A 'tuf.util.TempFile' object is created on disk to store the contents of 'url'. <Exceptions> tuf.DownloadLengthMismatchError, if there was a mismatch of observed vs expected lengths while downloading the file. tuf.FormatError, if any of the arguments are improperly formatted. Any other unforeseen runtime exception. <Returns> A 'tuf.util.TempFile' file-like object which points to the contents of 'url'. """ # Do all of the arguments have the appropriate format? # Raise 'tuf.FormatError' if there is a mismatch. tuf.formats.URL_SCHEMA.check_match(url) tuf.formats.LENGTH_SCHEMA.check_match(required_length) # 'url.replace()' is for compatibility with Windows-based systems because # they might put back-slashes in place of forward-slashes. This converts it # to the common format. url = url.replace('\\', '/') logger.info('Downloading: '+str(url)) # NOTE: Not thread-safe. # Save current values or functions for restoration later. previous_socket_timeout = socket.getdefaulttimeout() previous_http_response_class = httplib.HTTPConnection.response_class # This is the temporary file that we will return to contain the contents of # the downloaded file. temp_file = tuf.util.TempFile() try: # NOTE: Not thread-safe. # Set timeout to induce non-blocking socket operations. socket.setdefaulttimeout(tuf.conf.SOCKET_TIMEOUT) # Replace the socket file-like object class with our safer version. httplib.HTTPConnection.response_class = SaferHTTPResponse # Open the connection to the remote file. connection = _open_connection(url) # We ask the server about how big it thinks this file should be. reported_length = _get_content_length(connection) # Then, we check whether the required length matches the reported length. _check_content_length(reported_length, required_length) # Download the contents of the URL, up to the required length, to a # temporary file, and get the total number of downloaded bytes. total_downloaded = _download_fixed_amount_of_data(connection, temp_file, required_length) # Does the total number of downloaded bytes match the required length? _check_downloaded_length(total_downloaded, required_length, STRICT_REQUIRED_LENGTH=STRICT_REQUIRED_LENGTH) except: # Close 'temp_file'; any written data is lost. temp_file.close_temp_file() logger.exception('Could not download URL: '+str(url)) raise else: return temp_file finally: # NOTE: Not thread-safe. # Restore previously saved values or functions. httplib.HTTPConnection.response_class = previous_http_response_class socket.setdefaulttimeout(previous_socket_timeout)
a9df32371f24a85971807354636621224fc8f7bd
6,784
def tune_speed_librosa(src=None, sr=_sr, rate=1., out_type=np.ndarray): """ 变语速 :param src: :param rate: :return: """ wav = anything2wav(src, sr=sr) spec = librosa.stft(wav) spec = zoom(spec.T, rate=1 / rate, is_same=0).T out = librosa.istft(spec) # out = librosa.griffinlim(spec, n_iter=10) if out_type is np.ndarray: return out else: return anything2bytesio(out, sr=sr)
423b83c6a266e8ee2b259bf3497e53ff2087ca44
6,785
import pathlib def fqn_from_file(java_filepath: pathlib.Path) -> str: """Extract the expected fully qualified class name for the given java file. Args: java_filepath: Path to a .java file. """ if not java_filepath.suffix == ".java": raise ValueError("{} not a path to a .java file".format(java_filepath)) package = extract_package(java_filepath) simple_name = java_filepath.name[:-len(java_filepath.suffix)] return fqn(package, simple_name)
cb1d515af968c1653d31f0529ce40fa6241cf1f4
6,786
def assert_raises(*args, **kwargs): """Assert an exception is raised as a context manager or by passing in a callable and its arguments. As a context manager: >>> with assert_raises(Exception): ... raise Exception Pass in a callable: >>> def raise_exception(arg, kwarg=None): ... raise Exception >>> assert_raises(Exception, raise_exception, 1, kwarg=234) """ if (len(args) == 1) and not kwargs: return _assert_raises_context_manager(args[0]) else: return _assert_raises(*args, **kwargs)
6ef00a131f6ce5192e88fe9bab34f5cd04dd5a8a
6,787
import click def proxy(ctx, control, host, port, socket, proxy): """Settings to configure the connection to a Tor node acting as proxy.""" if control == 'port': if host is None or port is None: raise click.BadOptionUsage( option_name='control', message=f"--control mode '{control}' requires --host and --port to be defined as well.") elif control == 'socket': if socket is None: raise click.BadOptionUsage(option_name='control', message="--control mode 'socket' requires --socket to be defined as well.") return {'proxy': { 'control': control, 'host': host, 'port': port, 'socket': socket, 'proxy': proxy }}
4fe25cb7dc38116e26fe61b43e3903908e098459
6,788
import gzip def get_gzip_uncompressed_file_size(file_name): """ this function will return the uncompressed size of a gzip file similar as gzip -l file_name """ file_obj = gzip.open(file_name, 'r') file_obj.seek(-8, 2) # crc32 = gzip.read32(file_obj) isize = gzip.read32(file_obj) return isize
bf1e40a83098fa32c95959e28069e4a4d4dcc2d7
6,789
def Capitalize(v): """Capitalise a string. >>> s = Schema(Capitalize) >>> s('hello world') 'Hello world' """ return str(v).capitalize()
9072ea91b946694bbb1410fb10a5b1b1f5cdd7c2
6,790
def pg_index_exists(conn, schema_name: str, table_name: str, index_name: str) -> bool: """ Does a postgres index exist? Unlike pg_exists(), we don't need heightened permissions on the table. So, for example, Explorer's limited-permission user can check agdc/ODC tables that it doesn't own. """ return ( conn.execute( """ select indexname from pg_indexes where schemaname=%(schema_name)s and tablename=%(table_name)s and indexname=%(index_name)s """, schema_name=schema_name, table_name=table_name, index_name=index_name, ).scalar() is not None )
98ebdc0db7f3e42050e61205fd17309d015352a0
6,791
def create_mock_data(bundle_name: str, user_params: dict): """ create some mock data and push to S3 bucket :param bundle_name: str, bundle name :param user_params: dict, what parameters to save :return: """ api.context(context_name) api.remote(context_name, remote_context=context_name, remote_url=s3_path) component_signature = {k: str(v) for k, v in user_params.items()} proc_name = api.Bundle.calc_default_processing_name( bundle_name, component_signature, dep_proc_ids={}) with api.Bundle(context_name, name=bundle_name, processing_name=proc_name) as b: b.add_params(component_signature) # local_path will be replaced by S3 by Disdat api.commit(context_name, bundle_name) api.push(context_name, bundle_name) # save the bundle to S3 return b.uuid # return the bundle uuid
0fd377eac24555306aceff26a61d4a2b4666d33d
6,792
def _vertex_arrays_to_list(x_coords_metres, y_coords_metres): """Converts set of vertices from two arrays to one list. V = number of vertices :param x_coords_metres: length-V numpy array of x-coordinates. :param y_coords_metres: length-V numpy array of y-coordinates. :return: vertex_list_xy_metres: length-V list, where each element is an (x, y) tuple. """ _check_polyline( x_coords_metres=x_coords_metres, y_coords_metres=y_coords_metres) num_vertices = len(x_coords_metres) vertex_list_xy_metres = [] for i in range(num_vertices): vertex_list_xy_metres.append((x_coords_metres[i], y_coords_metres[i])) return vertex_list_xy_metres
ef5bed973f684670f979f6cdb0fcfc38b45a4557
6,793
from typing import Optional from typing import Dict from typing import Any def info_from_apiKeyAuth(token: str, required_scopes) -> Optional[Dict[str, Any]]: """ Check and retrieve authentication information from an API key. Returned value will be passed in 'token_info' parameter of your operation function, if there is one. 'sub' or 'uid' will be set in 'user' parameter of your operation function, if there is one. Should return None if auth is invalid or does not allow access to called API. The real work happens in Auth0._set_user(). """ return {"token": token, "method": "apikey"}
29fec65450780e14dfc94979ba2fb73c00d2a4bf
6,794
from datetime import datetime def convert_unix2dt(series): """ Parameters ---------- series : column from pandas dataframe in UNIX microsecond formatting Returns ------- timestamp_dt : series in date-time format """ if (len(series) == 1): unix_s = series/1000 else: unix_s = series.squeeze()/1000 timestamp_dt = np.zeros(len(unix_s), dtype='datetime64[ms]') for i in range(len(timestamp_dt)): timestamp_dt[i] = datetime.fromtimestamp(unix_s.iloc[i]) return timestamp_dt
92b912ba85b123e9f368b3613bff4a374826130a
6,795
import re def sentence_segment(text, delimiters=('?', '?', '!', '!', '。', ';', '……', '…'), include_symbols=True): """ Sentence segmentation :param text: query :param delimiters: set :param include_symbols: bool :return: list(word, idx) """ result = [] delimiters = set([item for item in delimiters]) delimiters_str = '|'.join(delimiters) blocks = re.split(delimiters_str, text) start_idx = 0 for blk in blocks: if not blk: continue result.append((blk, start_idx)) start_idx += len(blk) if include_symbols and start_idx < len(text): result.append((text[start_idx], start_idx)) start_idx += 1 return result
c8860a872e779873330eaded8e9951cabdbba01e
6,797
def time_rep_song_to_16th_note_grid(time_rep_song): """ Transform the time_rep_song into an array of 16th note with pitches in the onsets [[60,4],[62,2],[60,2]] -> [60,0,0,0,62,0,60,0] """ grid_16th = [] for pair_p_t in time_rep_song: grid_16th.extend([pair_p_t[0]] + [0 for _ in range(pair_p_t[1]-1)]) return grid_16th
8986819bd39ae4830d04bf40ab158d310bb45485
6,798
def _double_threshold(x, high_thres, low_thres, n_connect=1, return_arr=True): """_double_threshold Computes a double threshold over the input array :param x: input array, needs to be 1d :param high_thres: High threshold over the array :param low_thres: Low threshold over the array :param n_connect: Postprocessing, maximal distance between clusters to connect :param return_arr: By default this function returns the filtered indiced, but if return_arr = True it returns an array of tsame size as x filled with ones and zeros. """ assert x.ndim == 1, "Input needs to be 1d" high_locations = np.where(x > high_thres)[0] locations = x > low_thres encoded_pairs = find_contiguous_regions(locations) filtered_list = list( filter( lambda pair: ((pair[0] <= high_locations) & (high_locations <= pair[1])).any(), encoded_pairs)) filtered_list = connect_(filtered_list, n_connect) if return_arr: zero_one_arr = np.zeros_like(x, dtype=int) for sl in filtered_list: zero_one_arr[sl[0]:sl[1]] = 1 return zero_one_arr return filtered_list
74a34ed39336c35dfc7eb954af12bb30b3089609
6,799
def upload(): """ Implements the upload page form """ return render_template('upload.html')
4dda3418621b3894234b049e22f810304050a398
6,800
def detect_changepoints(points, min_time, data_processor=acc_difference): """ Detects changepoints on points that have at least a specific duration Args: points (:obj:`Point`) min_time (float): Min time that a sub-segmented, bounded by two changepoints, must have data_processor (function): Function to extract data to feed to the changepoint algorithm. Defaults to `speed_difference` Returns: :obj:`list` of int: Indexes of changepoints """ data = data_processor(points) changepoints = pelt(normal_mean(data, np.std(data)), len(data)) changepoints.append(len(points) - 1) result = [] for start, end in pairwise(changepoints): time_diff = points[end].time_difference(points[start]) if time_diff > min_time: result.append(start) # adds the first point result.append(0) # adds the last changepoint detected result.append(len(points) - 1) return sorted(list(set(result)))
872c2e4d5d8cbb33de100495bc8d9ddb050400c8
6,801
def quad1(P): """[summary] Arguments: P (type): [description] Returns: [type]: [description] """ x1, z1, x2, z2 = P return (Fraction(x1, z1) - Fraction(x2, z2))**2
f3e9c34740038242c29f4abbe168df573da12390
6,803
def update_position(position, velocity): """ :param position: position(previus/running) of a particle :param velocity: the newest velocity that has been calculated during the specific iteration- new velocity is calculated before the new position :return: list - new position """ pos = [] length = len(position) for i in range(length): pos.append(position[i] + velocity[i]) return pos
7734e4021d958f42d974401b78331bcd2911ac92
6,804
def respond_batch(): """ responses with [{"batch": [{blacklist_1_name: true}, ]}] """ result = get_result(request) return jsonify([{"batch": result}])
97b1ceaafa88aacba09fc0ba6c564e87bfb07b66
6,805
from typing import Union import types from typing import Iterable def get_iterable_itemtype(obj): """Attempts to get an iterable's itemtype without iterating over it, not even partly. Note that iterating over an iterable might modify its inner state, e.g. if it is an iterator. Note that obj is expected to be an iterable, not a typing.Iterable. This function leverages various alternative ways to obtain that info, e.g. by looking for type annotations of '__iter__' or '__getitem__'. It is intended for (unknown) iterables, where the type cannot be obtained via sampling without the risk of modifying inner state. """ # support further specific iterables on demand if isinstance(obj, _typechecked_Iterable): return obj.itemtype try: if isinstance(obj, range): tpl = tuple(deep_type(obj.start), deep_type(obj.stop), deep_type(obj.step)) return Union[tpl] except TypeError: # We're running Python 2 pass if type(obj) is tuple: tpl = tuple(deep_type(t) for t in obj) return Union[tpl] elif type(obj) is types.GeneratorType: return get_generator_yield_type(obj) else: tp = deep_type(obj) if is_Generic(tp): if issubclass(tp.__origin__, Iterable): if len(tp.__args__) == 1: return tp.__args__[0] return _select_Generic_superclass_parameters(tp, Iterable)[0] if is_iterable(obj): if type(obj) is str: return str if hasattr(obj, '__iter__'): if has_type_hints(obj.__iter__): itrator = _funcsigtypes(obj.__iter__, True, obj.__class__)[1] if is_Generic(itrator) and itrator.__origin__ is _orig_Iterator: return itrator.__args__[0] if hasattr(obj, '__getitem__'): if has_type_hints(obj.__getitem__): itrator = _funcsigtypes(obj.__getitem__, True, obj.__class__)[1] if is_Generic(itrator) and itrator.__origin__ is _orig_Iterator: return itrator.__args__[0] return None # means that type is unknown else: raise TypeError('Not an iterable: '+str(type(obj)))
103a3928e4a161f119c8664e58ba7e6270a94a14
6,806
import re def getTimerIPs(): """ returns list of ip addr """ client = docker.from_env() container_list = client.containers.list() timer_ip_list = [] for container in container_list: if re.search("^timer[1-9][0-9]*", container.name): out = container.exec_run("awk 'END{print $1}' /etc/hosts", stdout=True) timer_ip_list.append(out.output.decode().split("\n")[0]) client.close() return timer_ip_list
e4bc28407fb8b292df9a813809998bf6b323c938
6,807
def BRepBlend_BlendTool_NbSamplesV(*args): """ :param S: :type S: Handle_Adaptor3d_HSurface & :param v1: :type v1: float :param v2: :type v2: float :rtype: int """ return _BRepBlend.BRepBlend_BlendTool_NbSamplesV(*args)
99af0513463ce64d5369fde226e679e48a7e397a
6,809
def get_response(session, viewstate, event_validation, event_target, outro=None, stream=False, hdfExport=''): """ Handles all the responses received from every request made to the website. """ url = "http://www.ssp.sp.gov.br/transparenciassp/" data = [ ('__EVENTTARGET', event_target), ('__EVENTARGUMENT', ''), ('__VIEWSTATE', viewstate), ('__EVENTVALIDATION', event_validation), ('ctl00$cphBody$hdfExport', hdfExport), ] if outro: data.append(('ctl00$cphBody$filtroDepartamento', '0')) data.append(('__LASTFOCUS', '')) response = session.post(url, headers=headers, data=data, stream=stream) return response
0bb31b29fb8fb8a0fe007f87d88b8d131fd4308c
6,811
def matchVuln(vuln, element, criteria): """ ================================================================================ Name: matchVuln Description: Sets the finding details of a given VULN. Parameter(s): vuln: The VULN element to be searched. element: The element to find. criteria: The search criteria against which to match. Returns: True: If a match is found. False: If a match is not found. Notes: N/A ================================================================================ """ if (getVulnElementValue(vuln, element) == criteria): return True return False
7158b263fd70e921b0b131fd8f2537223521570f
6,812
def require(*modules): """Check if the given modules are already available; if not add them to the dependency list.""" deplist = [] for module in modules: try: __import__(module) except ImportError: deplist.append(module) return deplist
88df83cd33d8bddea63e4d2fbfb4d8351a3c23b1
6,813
def fixture_base_context( env_name: str, ) -> dict: """Return a basic context""" ctx = dict( current_user="a_user", current_host="a_host", ) return ctx
fbfed439f784bdd64e93910bbb581955200af2bb
6,814
from typing import Optional from typing import List import yaml def definition(server: KedroLanguageServer, params: TextDocumentPositionParams) -> Optional[List[Location]]: """Support Goto Definition for a dataset or parameter. Currently only support catalog defined in `conf/base` """ if not server.is_kedro_project(): return None document = server.workspace.get_document(params.text_document.uri) word = _word_at_position(params.position, document) if word.startswith("params:"): param_location = _get_param_location(server.project_metadata, word) if param_location: return [param_location] catalog_paths = get_conf_paths(server.project_metadata) for catalog_path in catalog_paths: catalog_conf = yaml.load(catalog_path.read_text(), Loader=SafeLineLoader) if word in catalog_conf: line = catalog_conf[word]["__line__"] location = Location( uri=f"file://{catalog_path}", range=Range( start=Position(line=line - 1, character=0), end=Position( line=line, character=0, ), ), ) return [location] return None
7713d92fafa2f0acf68ee34b4dc83f1d5100a9b3
6,816
def augmented_neighbors_list(q_id, neighbors, is_training, processor, train_eval=False): """Retrieve and convert the neighbors to a list. Args: q_id: a question id neighbors: a table mapping q_id to a list of top candidates is_training: True for training set examples processor: Helper object train_eval: If this is on, we have a sub-set of the training set for which we don't add the gold answer if it is not in the neighbors list Returns: lists of passage ids, list of corresponding labels, list of scores, and the index of the first random negative """ n_pb = neighbors[q_id] n_list = [] n_labels = [] n_scores = [] # the higher, the better n_positive = 0 answers = processor.get_answers(q_id) for n in range(len(n_pb)): if n >= FLAGS.max_neighbors: break # ignore any later neighbors next_n = n_pb[n] if processor.answer_match(q_id, next_n[0], answers): n_list.append(next_n[0]) n_labels.append(1) n_scores.append(-next_n[1]) n_positive += 1 else: # see if we keep it n_list.append(next_n[0]) n_labels.append(0) n_scores.append(-next_n[1]) if not n_positive: if (is_training or FLAGS.add_gold_to_eval): gold_p_id = processor.get_gold_passage_id(q_id) if gold_p_id is None and is_training: print("Did not find answer matches.") return [], [], [], 0 if gold_p_id is not None: n_list.append(gold_p_id) n_labels.append(1) prior_gold = 0 n_scores.append(prior_gold) n_positive += 1 else: if is_training: print("Did not find answer matches.") return [], [], [], 0 # add the same number of random examples as we have neighbors # we should add about # (FLAGS.num_candidates -1) * FLAGS. train_records_per_query/2 random index_rand_start = len(n_list) num_random = index_rand_start if is_training and not train_eval: # getting fewer random for speed num_random = (int)( (FLAGS.num_candidates - 1) * FLAGS.train_records_per_query / 2) if FLAGS.add_random: random_passages = processor.get_random(num_random) random_labels = [] random_scores = [0] * num_random for r in range(len(random_passages)): n_scores.append(random_scores[r]) if processor.answer_match(q_id, random_passages[r], answers): random_labels.append(1) else: random_labels.append(0) n_list.extend(random_passages) n_labels.extend(random_labels) return n_list, n_labels, n_scores, index_rand_start
3ae8756a60fdfa4ce3fc6de91d364c5edebcc0ff
6,817
def estimate_tau_exp(chains, **kwargs): """ Estimate the exponential auto-correlation time for all parameters in a chain. """ # Calculate the normalised autocorrelation function in each parameter. rho = np.nan * np.ones(chains.shape[1:]) for i in range(chains.shape[2]): try: rho[:, i] = autocorr.function(np.mean(chains[:, :, i], axis=0), **kwargs) except: continue # Take the max rho at any step. rho_max = np.max(rho, axis=1) # Now fit the max rho with an exponential profile. x = np.arange(rho_max.size) func = lambda tau_exp: np.exp(-x/tau_exp) chi = lambda tau_exp: func(tau_exp[0]) - rho_max # tau_exp is a list # Start with 50% of the chain length. probably OK. tau_exp, ier = leastsq(chi, [chains.shape[1]/2.]) return (tau_exp, rho, func(tau_exp))
3de72cec6fa079913489c9c1b9b72ff572cedf60
6,818
def lda_model_onepass(dictionary, corpus, topics): """Create a single pass LDA model""" start_time = time.time() model = LdaMulticore(corpus, id2word = dictionary, num_topics = topics) model.save(""./data/lda/all_topics_single.lda"") print(model.print_topics(-1)) print("\nDone in {}".format(time.time() - start_time)) return model
3a88250af8c83fb23112b15cacaad39eeaebb27c
6,819
import dataclasses import pydantic def paramclass(cls: type) -> type: """ Parameter-Class Creation Decorator Transforms a class-definition full of Params into a type-validated dataclass, with methods for default value and description-dictionary retrieval. Hdl21's `paramclass`es are immutable, strongly-typed data-storage structures. They are defined through a syntax similar to `@dataclass`, but using the `Param` constructor, and assignment rather than type annotation. @paramclass class C: reqd = Param(dtype=int, desc="A Required Parameter") optn = Param(dtype=int, desc="An Optional Parameter", default=11) `Param`s each have required datatype (`dtype`) and description (`desc`) fields, and optional default values. Each `paramclass` constructor can be called with ordered arguments, in the order defined in the `paramclass`, or with named arguments. Named arguments are highly recommended for more than a single parameter. Note Python's function-argument ordering requirements also dictate that all `paramclass` required-arguments be declared *before* any optional arguments. This also reinforces good practice for communicating which parameters are required. Each `paramclass` comes with class-methods `descriptions` and `defaults`, which return dictionaries of the parameter names to descriptions and names to default values (for those with defaults), respectively. Requirements of the input `cls`: * *All* non-Python-internal fields must be of type `Param` * Inheritance is not supported """ if cls.__bases__ != (object,): raise RuntimeError(f"Invalid @hdl21.paramclass inheriting from {cls.__bases__}") protected_names = ["descriptions", "defaults"] dunders = dict() params = dict() # Take a lap through the class dictionary, type-check everything and grab Params for key, val in cls.__dict__.items(): if key in protected_names: raise RuntimeError(f"Invalid field name {key} in paramclass {cls}") elif key.startswith("__"): dunders[key] = val elif isinstance(val, Param): params[key] = val else: raise RuntimeError( f"Invalid class-attribute {key} in paramclass {cls}. All attributes should be `hdl21.Param`s." ) # Translate the Params into dataclass.field-compatible tuples fields = list() for name, par in params.items(): field = [name, par.dtype] if par.default is not _default: field.append(dataclasses.field(default=par.default)) # Default factories: not supported, yet. See `Param` below. # elif par.default_factory is not _default: # field.append(dataclasses.field(default_factory=par.default_factory)) fields.append(tuple(field)) # Add a few helpers to the class namespace ns = dict( __params__=params, __paramclass__=True, descriptions=classmethod( lambda cls: {k: v.desc for k, v in cls.__params__.items()} ), defaults=classmethod( lambda cls: { k: v.default for k, v in cls.__params__.items() if v.default is not _default } ), ) # Create ourselves a (std-lib) dataclass cls = dataclasses.make_dataclass(cls.__name__, fields, namespace=ns, frozen=True) # Pass this through the pydantic dataclass-decorator-function cls = pydantic.dataclasses.dataclass(cls, frozen=True) # Pydantic seems to want to add this one *after* class-creation def _brick_subclassing_(cls, *_, **__): msg = f"Error: attempt to sub-class `hdl21.paramclass` {cls} is not supported" raise RuntimeError(msg) cls.__init_subclass__ = classmethod(_brick_subclassing_) # And don't forget to return it! return cls
5f5b4b6612d3afc7858a4b26419d9238aaf6ec92
6,820
import string def text_process(mess): """ Takes in a string of text, then performs the following: 1. Remove all punctuation 2. Remove all stopwords 3. Returns a list of the cleaned text """ # Check characters to see if they are in punctuation nopunc = [char for char in mess if char not in string.punctuation] # Join the characters again to form the string. nopunc = ''.join(nopunc) # Now just remove any stopwords words = [word for word in nopunc.split() if word.lower() not in stopwords.words('english')] words = [word for word in words if word.lower() not in pills['BrandName'].values] # words = [word for word in words if word.lower() not in pills['ChemName'].values] words = [word.lower() for word in words if word.isalpha()] words = [word.lower() for word in words if len(word) > 2] return words
9069df05eb4d1b87c2091a64e7dd55754e362334
6,821
def IntCurveSurface_ThePolyhedronToolOfHInter_IsOnBound(*args): """ :param thePolyh: :type thePolyh: IntCurveSurface_ThePolyhedronOfHInter & :param Index1: :type Index1: int :param Index2: :type Index2: int :rtype: bool """ return _IntCurveSurface.IntCurveSurface_ThePolyhedronToolOfHInter_IsOnBound(*args)
bd64cb058793730197a805d7660fe4c8dc4f7af5
6,822
def read_mrc_like_matlab(mrc_file): """ Read MRC stack and make sure stack is 'Fortran indexed' before returning it. """ mrc_stack = mrcfile.open(mrc_file).data fortran_indexed_stack = c_to_fortran(mrc_stack) return fortran_indexed_stack
245a7371e94ae6c05248d24e231ce56afc937dd1
6,823
from typing import List def freeze_session(session: tf.Session, keep_var_names: List[str] = None, output_names: List[str] = None, clear_devices: bool = True) -> tf.GraphDef: """ Freezes the state of a session into a pruned computation graph. Creates a new computation graph where variable nodes are replaced by constants taking their current value in the session. The new graph will be pruned so subgraphs that are not necessary to compute the requested outputs are removed. :param session: The TensorFlow session to be frozen. :param keep_var_names: A list of variable names that should not be frozen, or None to freeze all the variables in the graph. :param output_names: Names of the relevant graph outputs. :param clear_devices: Remove the device directives from the graph for better portability. :return The frozen graph definition. """ graph = session.graph with graph.as_default(): freeze_var_names = list( set(v.op.name for v in tf.global_variables()).difference( keep_var_names or [])) output_names = output_names or [] output_names += [v.op.name for v in tf.global_variables()] input_graph_def = graph.as_graph_def() if clear_devices: for node in input_graph_def.node: node.device = "" frozen_graph = tf.graph_util.convert_variables_to_constants( session, input_graph_def, output_names, freeze_var_names) return frozen_graph
4754de754217031ac6151bc3360b6969a46a4e66
6,824
def evaluation(evaluators, dataset, runners, execution_results, result_data): """Evaluate the model outputs. Args: evaluators: List of tuples of series and evaluation functions. dataset: Dataset against which the evaluation is done. runners: List of runners (contains series ids and loss names). execution_results: Execution results that include the loss values. result_data: Dictionary from series names to list of outputs. Returns: Dictionary of evaluation names and their values which includes the metrics applied on respective series loss and loss values from the run. """ eval_result = {} # losses for runner, result in zip(runners, execution_results): for name, value in zip(runner.loss_names, result.losses): eval_result["{}/{}".format(runner.output_series, name)] = value # evaluation metrics for generated_id, dataset_id, function in evaluators: if (not dataset.has_series(dataset_id) or generated_id not in result_data): continue desired_output = dataset.get_series(dataset_id) model_output = result_data[generated_id] eval_result["{}/{}".format(generated_id, function.name)] = function( model_output, desired_output) return eval_result
ef3470edb8b2336bdc54507a5df8023f8095b995
6,827
def bestof(reps, func, *args, **kwargs): """Quickest func() among reps runs. Returns (best time, last result) """ best = 2 ** 32 for i in range(reps): start = timer() ret = func(*args, **kwargs) elapsed = timer() - start if elapsed < best: best = elapsed return (best, ret)
975d106a79b79cab3bc287d8b658585f45dd648d
6,828
import tempfile def gdal_aspect_analysis(dem, output=None, flat_values_are_zero=False): """Return the aspect of the terrain from the DEM. The aspect is the compass direction of the steepest slope (0: North, 90: East, 180: South, 270: West). Parameters ---------- dem : str Path to file storing DEM. output : str Path to output file. flat_values_are_zero: bool Designate flat values with value zero. Default: -9999. Returns ------- ndarray Aspect array. Notes ----- Ensure that the DEM is in a *projected coordinate*, not a geographic coordinate system, so that the horizontal scale is the same as the vertical scale (m). """ if output is None: output = tempfile.NamedTemporaryFile().name DEMProcessing(destName=output, srcDS=dem, processing='aspect', zeroForFlat=flat_values_are_zero, format='GTiff', band=1, creationOptions=[GDAL_TIFF_COMPRESSION_OPTION, ]) with rasterio.open(output) as src: return np.ma.masked_values(src.read(1), value=-9999)
ec8aa51f799368508f78dcff81ae991087b56132
6,829
import requests import json def _handle_braze_response(response: requests.Response) -> int: """Handles server response from Braze API. The amount of requests made is well below the limits for the given API endpoint therefore Too Many Requests API errors are not expected. In case they do, however, occur - the API calls will be re-tried, up to `MAX_API_RETRIES`, using exponential delay. In case of a server error, the same strategy will be applied. After max retries have been reached, the execution will terminate. In case users were posted but there were minor mistakes, the errors will be logged. In case the API received data in an unexpected format, the data that caused the issue will be logged. In any unexpected client API error (other than 400), the function execution will terminate. :param response: Response from the API :return: Number of users that resulted in an error :raise APIRetryError: On a 429 or 500 server error :raise FatalAPIError: After `MAX_API_RETRIES` unsuccessful retries, or on any non-400 client error """ res_text = json.loads(response.text) if response.status_code == 201 and 'errors' in res_text: print( f"Encountered errors processing some users: {res_text['errors']}") return len(res_text['errors']) if response.status_code == 400: print(f"Encountered error for user chunk. {response.text}") return 0 server_error = response.status_code == 429 or response.status_code >= 500 if server_error: raise APIRetryError("Server error. Retrying..") if response.status_code > 400: raise FatalAPIError(res_text.get('message', response.text)) return 0
da8aca622f7a4812235797501a1afe56cc760ea4
6,830
def unpack_file(filepath, tmpdir): """ Attempt to unpack file. filepath is the path to the file that should be attempted unpacked. tmpdir is a path to a temporary directory unique to this thread where the thread will attempt to unpack files to. Returns a list of unpacked files or an empty list. """ # Other unpacking tools have been removed due to # lacking reliability and usefulness of the tools. # If multiple unpacking tools are to be used here, # subdirectories below tmpdir should be created for each # tool to avoid tools overwriting output of each other. # Attempt static unpacking with ClamAV. Return unpacked files. return clam_unpack(filepath, tmpdir)
79fb80fe61145e865b128587525bc743d19e2ad0
6,831
def xarray_image_as_png(img_data, loop_over=None, animate=False, frame_duration=1000): """ Render an Xarray image as a PNG. :param img_data: An xarray dataset, containing 3 or 4 uint8 variables: red, greed, blue, and optionally alpha. :param loop_over: Optional name of a dimension on img_data. If set, xarray_image_as_png is called in a loop over all coordinate values for the named dimension. :param animate: Optional generate animated PNG :return: A list of bytes representing a PNG image file. (Or a list of lists of bytes, if loop_over was set.) """ if loop_over and not animate: return [ xarray_image_as_png(img_data.sel(**{loop_over: coord})) for coord in img_data.coords[loop_over].values ] xcoord = None ycoord = None for cc in ("x", "longitude", "Longitude", "long", "lon"): if cc in img_data.coords: xcoord = cc break for cc in ("y", "latitude", "Latitude", "lat"): if cc in img_data.coords: ycoord = cc break if not xcoord or not ycoord: raise Exception("Could not identify spatial coordinates") width = len(img_data.coords[xcoord]) height = len(img_data.coords[ycoord]) img_io = BytesIO() # Render XArray to APNG via Pillow # https://pillow.readthedocs.io/en/stable/handbook/image-file-formats.html#apng-sequences if loop_over and animate: time_slices_array = [ xarray_image_as_png(img_data.sel(**{loop_over: coord}), animate=True) for coord in img_data.coords[loop_over].values ] images = [] for t_slice in time_slices_array: im = Image.fromarray(t_slice, "RGBA") images.append(im) images[0].save(img_io, "PNG", save_all=True, default_image=True, loop=0, duration=frame_duration, append_images=images) img_io.seek(0) return img_io.read() if "time" in img_data.dims: img_data = img_data.squeeze(dim="time", drop=True) pillow_data = render_frame(img_data.transpose(xcoord, ycoord), width, height) if not loop_over and animate: return pillow_data # Change PNG rendering to Pillow im_final = Image.fromarray(pillow_data, "RGBA") im_final.save(img_io, "PNG") img_io.seek(0) return img_io.read()
201cb29144054417d7eb743690601ae37558dfbd
6,832
def x_section_from_latlon(elevation_file, x_section_lat0, x_section_lon0, x_section_lat1, x_section_lon1, as_polygon=False, auto_clean=False): """ This workflow extracts a cross section from a DEM based on the input latitude and longitude point pairs. Parameters: ----------- elevation_file: str Path to the elevation DEM. x_section_lat0: float THe first coordinate latitude. x_section_lon0: float THe first coordinate longitude. x_section_lat1: float THe second coordinate latitude. x_section_lon1: float THe second coordinate longitude. as_polygon: bool, optional If True, will return cross section as a :obj:`shapely.geometry.Polygon`. Default is False. auto_clean: bool, optional If True, will attempt to clean any issues from the polygon. Default is False. Returns: -------- list or :obj:`shapely.geometry.Polygon` Cross section information. The list will be xy coordinate pairs. Example:: from shapely.geometry import Polygon from xman.xsect import x_section_from_latlon elevation_file = '/path/to/elevation.tif' lat1 = 34.105265417341442 lon1 = 38.993958690587505 lat2 = 34.107264451129197 lon2 = 38.99355588515526) x_sect_list = x_section_from_latlon(elevation_file, lat1, lon1, lat2, lon2) """ utm_proj = utm_proj_from_latlon(x_section_lat0, x_section_lon0, as_osr=True) sp_ref = osr.SpatialReference() sp_ref.ImportFromEPSG(4326) geo_to_utm_trans = osr.CoordinateTransformation(sp_ref, utm_proj) x_line_m = LineString(( geo_to_utm_trans.TransformPoint(x_section_lon0, x_section_lat0)[:2], geo_to_utm_trans.TransformPoint(x_section_lon1, x_section_lat1)[:2] )) elevation_utm_ggrid = GDALGrid(elevation_file).to_projection(utm_proj) x_sect_list = [] for x_step in np.linspace(0, x_line_m.length, num=20): x_point = x_line_m.interpolate(x_step) x_sect_list.append(( x_step, elevation_utm_ggrid.get_val_coord(x_point.x, x_point.y) )) if as_polygon or auto_clean: x_sect_poly = Polygon(x_sect_list) if not x_sect_poly.is_valid and auto_clean: x_sect_poly = x_sect_poly.buffer(0) print("WARNING: Cross section cleaned up.") if hasattr(x_sect_poly, 'geoms'): if len(x_sect_poly.geoms) > 1: largest_poly = x_sect_poly.geoms[0] for geom_poly in x_sect_poly.geoms[1:]: if geom_poly.area > largest_poly.area: largest_poly = geom_poly x_sect_poly = largest_poly if as_polygon: return x_sect_poly x_coords, y_coords = x_sect_poly.exterior.coords.xy return list(zip(x_coords, y_coords)) return x_sect_list
0773cf535ee18ff91db805d692687c67bf6b2ed4
6,833
import re def convert_not_inline(line): """ Convert the rest of part which are not inline code but might impact inline code This part will dealing with following markdown syntax - strong - scratch - italics - image - link - checkbox - highlight :param line: str, the not inline code part of markdown :return: str, the html format """ # deal with strong line = strong(line) # Scratch line = scratch(line) # italics line = italics(line) # highlight line = highlight(line) # image while len(re.match(r'((?P<pre_text>.*)!\[(?P<alt_text>.*)\]\((?P<link>.*)\)(?P<after_text>.*))*', line).group()) \ != 0: match = re.match(r'((?P<pre_text>.*)!\[(?P<alt_text>.*)\]\((?P<link>.*)\)(?P<after_text>.*))*', line) pre_text = match.group('pre_text') alt_text = match.group('alt_text') link = match.group('link') after_text = match.group('after_text') # scale image if len(re.match(r'((?P<pre_link>.*)#scale=(?P<scale>[0-9]*))*', link).group()) != 0: match_scale = re.match(r'((?P<pre_link>.*)#scale=(?P<scale>[0-9]*))*', link) scale = match_scale.group('scale') img_html = '<img style="display: block; margin-left: auto; margin-right: auto; height:' + str(scale) + '%" src="' + link + '" alt="' + alt_text + '">' else: img_html = '<img style="display: block; margin-left: auto; margin-right: auto;" src="' + link + '" alt="' + alt_text + '">' line = pre_text + img_html + after_text # link while len(re.match(r'((?P<pre_text>.*)\[(?P<alt_text>.*)\]\((?P<link>.*)\)(?P<after_text>.*))*', line).group()) \ != 0: match = re.match(r'((?P<pre_text>.*)\[(?P<alt_text>.*)\]\((?P<link>.*)\)(?P<after_text>.*))*', line) pre_text = match.group('pre_text') alt_text = match.group('alt_text') link = match.group('link') if len(link) != 0 and link[0] == '#': link = link.replace(' ', '-') after_text = match.group('after_text') img_html = '<a href="' + link + '">' + alt_text + '</a>' line = pre_text + img_html + after_text return line
871abca2977fc494036c6d5aa19de789cfbfd5b9
6,834
def uniform_square_aperture(side, skypos, frequency, skyunits='altaz', east2ax1=None, pointing_center=None, power=False): """ ----------------------------------------------------------------------------- Compute the electric field or power pattern pattern at the specified sky positions due to a uniformly illuminated square aperture Inputs: side [scalar] Sides of the square (in m) skypos [list or numpy vector] Sky positions at which the power pattern is to be estimated. Size is M x N where M is the number of locations, N = 2 (if skyunits = altaz denoting Alt-Az coordinates), or N = 3 (if skyunits = dircos denoting direction cosine coordinates). If skyunits = altaz, then altitude and azimuth must be in degrees frequency [list or numpy vector] frequencies (in GHz) at which the power pattern is to be estimated. Frequencies differing by too much and extending over the usual bands cannot be given. Keyword Inputs: skyunits [string] string specifying the coordinate system of the sky positions. Accepted values are 'altaz', and 'dircos'. Default = 'altaz'. If 'dircos', the direction cosines are aligned with the local East, North, and Up. If 'altaz', then altitude and azimuth must be in degrees. east2ax1 [scalar] Angle (in degrees) the primary axis of the array makes with the local East (positive anti-clockwise). pointing_center [list or numpy array] coordinates of pointing center (in the same coordinate system as that of sky coordinates specified by skyunits). 2-element vector if skyunits='altaz'. 2- or 3-element vector if skyunits='dircos'. power [boolean] If set to True, compute power pattern, otherwise compute field pattern (default=False). Output: Electric field pattern or power pattern, number of rows equal to the number of sky positions (which is equal to the number of rows in skypos), and number of columns equal to the number of wavelengths. ----------------------------------------------------------------------------- """ try: side, skypos, frequency except NameError: raise NameError('Square antenna side, skypos, frequency must be specified') if not isinstance(sides, (int,float)): raise TypeError('Antenna sides must be a scalar') sides = NP.asarray([side]*2, dtype=NP.float) ab = uniform_rectangular_aperture(sides, skypos, frequency, skyunits=skyunits, east2ax1=east2ax1, pointing_center=pointing_center, power=power) return ab
275249164bba5fae8f8652f8af1f2c8dc13c9525
6,835
def sources_table(citator): """ Return the content for an HTML table listing every template that the citator can link to. """ rows = [] for template in citator.templates.values(): # skip templates that can't make URLs if not template.__dict__.get('URL_builder'): continue URL = urlsplit(''.join(template.URL_builder.parts)) domain_URL = f'{URL.scheme}://{URL.netloc}' domain_name = URL.hostname regex = unify_regex(template, simplify_for_regexper=True) rows.append(SOURCES_TABLE_ROW.format( name=template.name, domain_URL=domain_URL, domain_name=domain_name, escaped_regex=quote_plus(regex).replace('+', '%20') )) return SOURCES_TABLE.format(rows=''.join(rows))
460a06e03e7ec6d5cee465001d9b828976a4da1b
6,836
def parse_bot_commands(data, starterbot_id): """ Parses a list of events coming from the Slack RTM API to find bot commands. If a bot command is found, this function returns a tuple of command and channel. If its not found, then this function returns None, None. """ user_id, message = parse_direct_mention(data["text"]) print(f'user_id: {user_id}') print(f'starterbot_id: {starterbot_id}') if user_id == starterbot_id: return message, data["channel"] return None, None
5d614cfdf55133180425a87aedac34896a54b552
6,837
from typing import Callable def construct_obj_in_dict(d: dict, cls: Callable) -> dict: """ Args d (dict): d[name][charge][annotation] """ if not isinstance(d, dict): return d else: new_d = deepcopy(d) for key, value in d.items(): if value.get("@class", "") == cls.__name__: new_d[key] = cls.from_dict(value) else: new_d[key] = construct_obj_in_dict(value, cls) return new_d
c069fb474a6a675f8d917483435856df506ff331
6,838
def signup_user(request): """ Function to sing up user that are not admins :param request: This param contain all the information associated to the request :param type request: Request :return: The URL to render :rtype: str """ try: log = LoggerManager('info', 'singup_manager-info', session=request.session) if request.method == 'POST': form = ClientRegistrationForm(request.POST) if form.is_valid(): form.save() max_id = Account.objects.all().aggregate(Max('id'))['id__max'] user = Account.objects.filter(id=max_id) web_group, created = Group.objects.get_or_create(name=request.user.email) web_group.user_set.add(request.user.id) web_group.user_set.add(user.get().id) log.write_info(form.data) return redirect('client_list') else: form = ClientRegistrationForm() return render(request, 'registration/signup.html', { 'form': form }) except Exception as ex: log = LoggerManager('exception', 'singup_manager-exception', session=request.session) log.write_exception(ex)
3ea45a9b84cb8281f3afc6997230fdcbab75f045
6,839
def haar_rand_state(dim: int) -> np.ndarray: """ Given a Hilbert space dimension dim this function returns a vector representing a random pure state operator drawn from the Haar measure. :param dim: Hilbert space dimension. :return: Returns a dim by 1 vector drawn from the Haar measure. """ unitary = haar_rand_unitary(dim) fiducial_vec = np.zeros((dim, 1)) fiducial_vec[0] = 1 return np.matmul(unitary, fiducial_vec)
3d374fe32fee91667747df86d79f9feb08836c61
6,840
from pathlib import Path def is_src_package(path: Path) -> bool: """Checks whether a package is of the form: ├─ src │ └─ packagename │ ├─ __init__.py │ └─ ... ├─ tests │ └─ ... └─ setup.py The check for the path will be if its a directory with only one subdirectory containing an __init__.py file. Parameters ---------- path : Path Full path pointing to a dir. Returns ------- check : bool If the package is an src package, returns True, False otherwise. See Also -------- is_package """ check: bool = False if path.is_dir(): maybe_subdirs = list(path.iterdir()) if len(maybe_subdirs) == 1: check = is_package(path / maybe_subdirs[0]) return check
36bfd704a0a71a41e9943dc9a9d19cf5e46746f8
6,841
from re import T from typing import Optional from re import I def value_element(units=(OneOrMore(T('NN')) | OneOrMore(T('NNP')) | OneOrMore(T('NNPS')) | OneOrMore(T('NNS')))('raw_units').add_action(merge)): """ Returns an Element for values with given units. By default, uses tags to guess that a unit exists. :param BaseParserElement units: (Optional) A parser element for the units that are to be looked for. Default option looks for nouns. :returns: An Element to look for values and units. :rtype: BaseParserElement """ number = R('^[\+\-–−]?\d+(\.\d+)?$') joined_range = R('^[\+\-–−]?\d+(\.\d+)?[\-–−~∼˜]\d+(\.\d+)?$')('raw_value').add_action(merge) spaced_range = (number + Optional(units).hide() + (R('^[\-–−~∼˜]$') + number | number))('raw_value').add_action(merge) to_range = (number + Optional(units).hide() + I('to') + number)('raw_value').add_action(join) plusminus_range = (number + R('±') + number)('value').add_action(join) between_range = (I('between').hide() + number + I('and') + number).add_action(join) value_range = (Optional(R('^[\-–−]$')) + (plusminus_range | joined_range | spaced_range | to_range | between_range))('raw_value').add_action(merge) value_single = (Optional(R('^[~∼˜\<\>]$')) + Optional(R('^[\-–−]$')) + number)('raw_value').add_action(merge) value = Optional(lbrct).hide() + (value_range | value_single)('raw_value') + Optional(rbrct).hide() return value + units
1b111fb30369d0d3b6c506d5f02e80b5c88044d5
6,842
def get_pattern(model_id, release_id) -> list: """ content demo: [ '...', { 0.1: [ ['if', 'checker.check'], 3903, ['if', 'checker.check', '*', Variable(name="ip", value='10.0.0.1')], ['if checker.check():', 'if checker.check()'], [282. 1877], 27886975249790003104399390262688492018705644758766193963474214767849400520551 ] }, '...', '...' ] sensitive_pattern [List]: - representative tokens: 符合pattern的其中一个分词 - numbers: 属于该pattern的日志数量 - pattern: 聚类模式 - raw_log: 所有原始log,list - log_index: 所有原始log的index - log_signature: 聚类模型signature """ content = AiopsModelHandler.pickle_decode( content=AiopsModelHandler().aiops_release_model_release_id_model_file( model_id=model_id, model_release_id=release_id )["file_content"] ) patterns = [] for _, sensitive_patterns in content[CONTENT_PATTERN_INDEX].items(): for sensitive_pattern in sensitive_patterns: signature = sensitive_pattern[PATTERN_SIGNATURE_INDEX] pattern_list = [] for pattern in sensitive_pattern[PATTERN_INDEX]: if hasattr(pattern, "name"): pattern_list.append("#{}#".format(pattern.name)) continue pattern_list.append(str(pattern)) patterns.append({"signature": str(signature), "pattern": " ".join(pattern_list)}) return patterns
2307180d26e687fd7057c326e15e21b7aaf81471
6,843
def bit_remove(bin_name, byte_offset, byte_size, policy=None): """Creates a bit_remove_operation to be used with operate or operate_ordered. Remove bytes from bitmap at byte_offset for byte_size. Args: bin_name (str): The name of the bin containing the map. byte_offset (int): Position of bytes to be removed. byte_size (int): How many bytes to remove. policy (dict, optional): The bit_policy policy dictionary. See: See :ref:`aerospike_bit_policies`. default: None Returns: A dictionary usable in operate or operate_ordered. The format of the dictionary should be considered an internal detail, and subject to change. """ return { OP_KEY: aerospike.OP_BIT_REMOVE, BIN_KEY: bin_name, POLICY_KEY: policy, BYTE_OFFSET_KEY: byte_offset, BYTE_SIZE_KEY: byte_size }
356ff2f3421b67790f7e224ecc49c844335864f8
6,844
def four2five(data, format_, dst_dtype='float16', need_custom_tiling=True): """ Convert 4-dims "data" to 5-dims,the format of "data" is defined in "format_" Args: data (tvm.tensor.Tensor): 4-dims tensor of type float16, float32 format_ (str): a str defined the format of "data" dst_dtype (str): a str defined the type of output, could be float16 or float32 Returns: 5-dims tvm.tensor.Tensor,type is defined by dst_dtype, which shape is [N, ceil(C / 16), H, W, 16] and attr about tiling args Raises: ValueError: If the type of format_ is invalid. """ # Check dtype vc_util.ops_dtype_check(data.dtype, vc_util.DtypeForDavinci.ALL_FLOAT) # Check shape shape = get_shape(data) vc_util.davinci_format_check(shape, format_, dim=4) # Check format if format_ not in ['NCHW', 'NHWC']: raise ValueError("{} format is not support, four2five only support NCHW and NHWC format input" .format(format_)) last_channel = 16 if format_ == "NCHW": bs, c, h, w = get_shape(data) else: bs, h, w, c = get_shape(data) pad_c = c if c % last_channel != 0: pad_c = (c + 15) // last_channel * last_channel c1 = pad_c // last_channel c0 = last_channel is_dynamic = ds.shape_is_dynamic(data) if not is_dynamic: attrs = get_attrs() else: attrs = get_dynamic_attrs() # Check size c when casting happens if data.dtype != dst_dtype and c0 * c1 >= C_LIMIT_FOR_CAST: raise ValueError("When input and output data type is not matched, shape of 'c' axis should not exceed {}, " "while currently set is {}".format(C_LIMIT_FOR_CAST, c0 * c1)) @script(capture=locals()) def nchw_to_nc1hwc0_step(inputs, bs, c1, h, w, c0): output = allocate((bs, c1, h, c0, w), inputs.dtype, "local") for n_i in range(bs): for c_i in range(c1): for h_i in range(h): for w_i in range(w): for c_i0 in range(c0): output[n_i, c_i, h_i, c_i0, w_i] = inputs[n_i, c_i * last_channel + c_i0, h_i, w_i] output1 = allocate((bs, c1, h, w, c0), inputs.dtype, "local") for n_i in range(bs): for c_i in range(c1): for h_i in range(h): for w_i in range(w): for c_i0 in range(c0): output1[n_i, c_i, h_i, w_i, c_i0] = output[n_i, c_i, h_i, c_i0, w_i] return output1 @script(capture=locals()) def nchw_to_nc1hwc0(inputs, bs, c1, h, w, c0): output = allocate((bs, c1, h, w, c0), inputs.dtype, "local") for n_i in range(bs): for c_i in range(c1): for h_i in range(h): for w_i in range(w): for c_i0 in range(c0): output[n_i, c_i, h_i, w_i, c_i0] = inputs[n_i, c_i * last_channel + c_i0, h_i, w_i] return output @script(capture=locals()) def nhwc_to_nc1hwc0(inputs, zero, bs, c1, h, w, c0): output = allocate((bs, c1, h, w, c0), inputs.dtype, "local") for n_i in range(bs): for c_i in range(c1): for h_i in range(h): for w_i in range(w): for c_i0 in range(c0): if c_i * last_channel + c_i0 < c: output[n_i, c_i, h_i, w_i, c_i0] = inputs[n_i, h_i, w_i, c_i * last_channel + c_i0] else: output[n_i, c_i, h_i, w_i, c_i0] = zero return output cast_data = data need_cast = data.dtype == 'float32' and dst_dtype == 'float16' if c % last_channel != 0 or need_cast: expansion = int(ct_util.BLOCK_SIZE / get_bytes(data.dtype)) else: expansion = None # float32 -> float16, need to cast before transform if need_cast: cast_data = akg.lang.cce.cast_to(data, dst_dtype) zero_ = akg.tvm.const(0.0, cast_data.dtype) if format_ == "NCHW": if c % last_channel != 0: pad_shape = [bs, pad_c, h, w] if h == 1 and w == 1: # if h and w both are 1, it is pad last dim case output_shape = [bs, pad_c // last_channel, h, w, last_channel] output = akg.tvm.compute(output_shape, lambda i, c1, k, l, c0: akg.tvm.expr.Select( c0 < c - c1 * last_channel, cast_data[i, c1 * last_channel + c0, k, l], akg.tvm.const(0, cast_data.dtype)), name="output") else: # if need to pad c dim, separate transpose to two steps # first is nchw -> nc1hc0w, second is nc1hc0w -> nc1hwc0 pad_data = akg.tvm.compute(pad_shape, lambda i, j, k, l: akg.tvm.expr.Select(j < c, cast_data[i, j, k, l], zero_), name="pad_data") output = nchw_to_nc1hwc0_step( pad_data, to_tvm_const(bs), to_tvm_const(c1), to_tvm_const(h), to_tvm_const(w), to_tvm_const(c0)) else: if not is_dynamic and data.dtype == "float16" and h * w % last_channel == 0 and h * w < 3600: output_shape = [bs, c1, h, w, c0] output = akg.tvm.compute(output_shape, lambda n, c1, h, w, c0: akg.lang.cce.four2five_nchw(cast_data[n, c1 * last_channel + c0, h, w]), name="output") else: output = nchw_to_nc1hwc0( cast_data, to_tvm_const(bs), to_tvm_const(c1), to_tvm_const(h), to_tvm_const(w), to_tvm_const(c0)) else: if not is_dynamic and c < last_channel: rank = 5 # (n, c1, h, w, c0) pad_before = [] pad_after = [] for _ in range(rank): pad_before.append(0) pad_after.append(0) pad_after[-1] = last_channel - c # As c < last_channel, c1 is 1 output = akg.tvm.compute((bs, c1, h, w, c), lambda bs_i, _, h_i, w_i, c_i: cast_data[ bs_i, h_i, w_i, c_i], name="output") output = tvm_pad(output, pad_before, pad_after=pad_after, name='pad_output') else: output = nhwc_to_nc1hwc0( cast_data, zero_, to_tvm_const(bs), to_tvm_const(c1), to_tvm_const(h), to_tvm_const(w), to_tvm_const(c0)) # float16 -> float32, need to cast after transform if data.dtype == 'float16' and dst_dtype == 'float32': output = akg.lang.cce.cast_to(output, dst_dtype) vc_util.davinci_format_check(output.shape, "NC1HWC0", dim=5) if not is_dynamic: dim_info, _ = four2five_set_dim_func(data, format_, dst_dtype) if dim_info != "": attrs["dim"] = dim_info if need_custom_tiling: attrs["custom_tiling"] = four2five_tiling_strategy(output, format_, expansion) elif need_custom_tiling: attrs["custom_tiling"] = four2five_tiling_strategy_dynamic(output, format_) if is_dynamic: attrs["enable_feature_library_pre_poly"] = True return output, attrs
71de138f15e5b407a244c1670c48eb806b3be765
6,846
def MT33_SDR(MT33): """Converts 3x3 matrix to strike dip and rake values (in radians) Converts the 3x3 Moment Tensor to the strike, dip and rake. Args MT33: 3x3 numpy matrix Returns (float, float, float): tuple of strike, dip, rake angles in radians (Note: Function from MTFIT.MTconvert) """ T,N,P,E=MT33_TNPE(MT33) N1,N2=TP_FP(T,P) return FP_SDR(N1,N2)
0b80df0e43bc546aa36a06858f144f32d75cb478
6,847
from generate_changelog.utilities import pairs from typing import Optional from typing import List import re def get_commits_by_tags(repository: Repo, tag_filter_pattern: str, starting_tag: Optional[str] = None) -> List[dict]: """ Group commits by the tags they belong to. Args: repository: The git repository object tag_filter_pattern: A regular expression pattern that matches valid tags as versions starting_tag: Only include tags after this one Returns: A list of dictionaries with tag information with most recent first """ tags = [tag for tag in get_tags(repository) if re.match(tag_filter_pattern, tag.name)] head_commit = repository.commit("HEAD") head_tagger = head_commit.committer.name if head_commit.committer.email: head_tagger += f" <{head_commit.committer.email}>" head = TagInfo( name="HEAD", commit=head_commit.hexsha, tagger=head_tagger, tagged_datetime=head_commit.committed_datetime, ) tags.insert(0, head) groups = [] for end_tag, start_tag in pairs(tags): start_tag_name = getattr(start_tag, "name", None) groups.append( { "tag_name": end_tag.name, "tag_info": end_tag, "commits": parse_commits(repository, start_tag_name, end_tag.name), } ) if starting_tag and start_tag_name == starting_tag: break return groups
1e870d2169496e183c1df8d90ddceb4e63cb1689
6,848
def GetObject(): """ Required module function. @returns class object of the implemented adapter. """ return SpacePacketAdapter
38ddac47a1ac7a58f203fc5552a00340a542518d
6,849
def parse(args): """Parse the command-line arguments of the `inpaint` command. Parameters ---------- args : list of str List of arguments, without the command name. Returns ------- InPaint Filled structure """ struct = InPaint() struct.files = [] while cli.next_isvalue(args): val, *args = args struct.files.append(val) while args: if cli.next_isvalue(args): raise ParseError(f'Value {args[0]} does not seem to belong ' f'to a tag.') tag, *args = args if tag in ('-m', '--missing'): struct.missing = [] while cli.next_isvalue(args): val, *args = args struct.missing.append(float(val)) elif tag in ('-nrls', '--max-rls'): cli.check_next_isvalue(args, tag) struct.max_rls, *args = args struct.max_rls = int(struct.max_rls) elif tag in ('-trls', '--tol-rls'): cli.check_next_isvalue(args, tag) struct.tol_rls, *args = args struct.tol_rls = float(struct.tol_rls) elif tag in ('-ncg', '--max-cg'): cli.check_next_isvalue(args, tag) struct.max_cg, *args = args struct.max_cg = int(struct.max_cg) elif tag in ('-tcg', '--tol-cg'): cli.check_next_isvalue(args, tag) struct.tol_cg, *args = args struct.tol_cg = float(struct.tol_cg) elif tag in ('-cpu', '--cpu'): struct.device = 'cpu' elif tag in ('-gpu', '--gpu'): struct.device = 'cuda' if cli.next_isvalue(args): gpu, *args = args struct.device = 'cuda:{:d}'.format(int(gpu)) elif tag in ('-o', '--output'): struct.output = [] while cli.next_isvalue(args): val, *args = args struct.output.append(val) elif tag in ('-v', '--verbose'): struct.verbose = 1 if cli.next_isvalue(args): struct.verbose, *args = args struct.verbose = int(struct.verbose) elif tag in ('-h', '--help'): print(help) return None else: raise ParseError(f'Unknown tag {tag}') return struct
6b3eb929ce13559f9bd3d50ee2b15dd25e967d33
6,851
from unittest.mock import Mock from datetime import datetime def log_context(servicer_context: Mock) -> LogContext: """Mock LogContext.""" context = LogContext( servicer_context, "/abc.test/GetTest", Mock(name="Request"), Mock(name="Response", ByteSize=Mock(return_value=10)), datetime(2021, 4, 3, 0, 0, 0, 0, timezone.utc), datetime(2021, 4, 3, 0, 1, 0, 0, timezone.utc), ) return context
1ea1b8d4e6dad80ac8d95925a70a7f79ec84a686
6,852
import base64 def base64_decode(string): """ Decodes data encoded with MIME base64 """ return base64.b64decode(string)
38870882fca9e6595e3f5b5f8943d0bf781f006c
6,854
import re def convert_operand_kind(operand_tuple): """Returns the corresponding operand type used in spirv-tools for the given operand kind and quantifier used in the JSON grammar. Arguments: - operand_tuple: a tuple of two elements: - operand kind: used in the JSON grammar - quantifier: '', '?', or '*' Returns: a string of the enumerant name in spv_operand_type_t """ kind, quantifier = operand_tuple # The following cases are where we differ between the JSON grammar and # spirv-tools. if kind == 'IdResultType': kind = 'TypeId' elif kind == 'IdResult': kind = 'ResultId' elif kind == 'IdMemorySemantics' or kind == 'MemorySemantics': kind = 'MemorySemanticsId' elif kind == 'IdScope' or kind == 'Scope': kind = 'ScopeId' elif kind == 'IdRef': kind = 'Id' elif kind == 'ImageOperands': kind = 'Image' elif kind == 'Dim': kind = 'Dimensionality' elif kind == 'ImageFormat': kind = 'SamplerImageFormat' elif kind == 'KernelEnqueueFlags': kind = 'KernelEnqFlags' elif kind == 'LiteralExtInstInteger': kind = 'ExtensionInstructionNumber' elif kind == 'LiteralSpecConstantOpInteger': kind = 'SpecConstantOpNumber' elif kind == 'LiteralContextDependentNumber': kind = 'TypedLiteralNumber' elif kind == 'PairLiteralIntegerIdRef': kind = 'LiteralIntegerId' elif kind == 'PairIdRefLiteralInteger': kind = 'IdLiteralInteger' elif kind == 'PairIdRefIdRef': # Used by OpPhi in the grammar kind = 'Id' if kind == 'FPRoundingMode': kind = 'FpRoundingMode' elif kind == 'FPFastMathMode': kind = 'FpFastMathMode' if quantifier == '?': kind = 'Optional{}'.format(kind) elif quantifier == '*': kind = 'Variable{}'.format(kind) return 'SPV_OPERAND_TYPE_{}'.format( re.sub(r'([a-z])([A-Z])', r'\1_\2', kind).upper())
3d26a0b330ae64209655b24dfe86578cb4b8724c
6,855
from datetime import datetime def screen_missing_data(database,subject,begin=None,end=None): """ Returns a DataFrame contanining the percentage (range [0,1]) of loss data calculated based on the transitions of screen status. In general, if screen_status(t) == screen_status(t+1), we declared we have at least one missing point. Parameters ---------- database: Niimpy database user: string begin: datetime, optional end: datetime, optional Returns ------- count: Dataframe """ assert isinstance(database, niimpy.database.Data1),"database not given in Niimpy database format" assert isinstance(subject, str),"usr not given in string format" screen = database.raw(table='AwareScreen', user=subject) if(begin!=None): assert isinstance(begin,pd.Timestamp),"begin not given in timestamp format" else: begin = screen.iloc[0]['datetime'] if(end!= None): assert isinstance(end,pd.Timestamp),"end not given in timestamp format" else: end = screen.iloc[len(screen)-1]['datetime'] screen=screen.drop_duplicates(subset=['datetime'],keep='first') screen = screen.drop(['device','user','time'],axis=1) screen=screen.loc[begin:end] screen['screen_status']=pd.to_numeric(screen['screen_status']) #Include the missing points that are due to shutting down the phone shutdown = shutdown_info(database,subject,begin,end) shutdown=shutdown.rename(columns={'battery_status':'screen_status'}) shutdown['screen_status']=0 screen = screen.merge(shutdown, how='outer', left_index=True, right_index=True) screen['screen_status'] = screen.fillna(0)['screen_status_x'] + screen.fillna(0)['screen_status_y'] screen = screen.drop(['screen_status_x','screen_status_y'],axis=1) dates=screen.datetime_x.combine_first(screen.datetime_y) screen['datetime']=dates screen = screen.drop(['datetime_x','datetime_y'],axis=1) #Detect missing data points screen['missing']=0 screen['next']=screen['screen_status'].shift(-1) screen['dummy']=screen['screen_status']-screen['next'] screen['missing'] = np.where(screen['dummy']==0, 1, 0) screen['missing'] = screen['missing'].shift(1) screen = screen.drop(['dummy','next'], axis=1) screen = screen.fillna(0) screen['datetime'] = screen['datetime'].apply( lambda screen : datetime.datetime(year=screen.year, month=screen.month, day=screen.day)) screen = screen.drop(['screen_status'], axis=1) count=pd.pivot_table(screen,values='missing',index='datetime', aggfunc='count') count = screen.groupby(['datetime','missing'])['missing'].count().unstack(fill_value=0) count['missing'] = count[1.0]/(count[0.0]+count[1.0]) count = count.drop([0.0,1.0], axis=1) if (pd.Timestamp.tzname(count.index[0]) != 'EET'): if pd.Timestamp.tzname(count.index[0]) != 'EEST': count.index = pd.to_datetime(count.index).tz_localize('Europe/Helsinki') return count
70666ed7ddfea359c4c91afd8b52f9821580bda6
6,856
def check(text): """Check the text.""" error_code = "example.first" msg = "First line always has an error." reverse(text) return [(1, 1, error_code, msg)]
50d8406322225153c055b925609af702bb86d7b6
6,857
def figure(**kwargs): """ Create a new figure with the given settings. Settings like the current colormap, title or axis limits as stored in the current figure. This function creates a new figure, restores the default settings and applies any settings passed to the function as keyword arguments. **Usage examples:** >>> # Restore all default settings >>> mlab.figure() >>> # Restore all default settings and set the title >>> mlab.figure(title="Example Figure") """ global _plt _plt = _Figure() _plt.kwargs.update(kwargs) return _plt
a7de48597ecc80872d8d4b108a642956200adcc2
6,858
from typing import List from bs4 import BeautifulSoup def parse(content: str, target: str = "all") -> List[Inline]: """Parses an HTML document and extracts.""" soup = BeautifulSoup(content, "html.parser") if target == "all": search_queries = chain(*_VALID_TARGETS.values()) elif target in _VALID_TARGETS.keys(): search_queries = chain(_VALID_TARGETS[target]) else: raise ValueError("Invalid Target") elements = [] for q in search_queries: for tag in soup.find_all(q.search_function): if q.attr_name: inline = Inline(tag[q.attr_name], tag.sourceline, tag.sourcepos) else: if not tag.contents: continue inline = Inline(tag.contents[0], tag.sourceline, tag.sourcepos) elements.append(inline) return elements
56238a4def01713220c7d59b266cfcc55f1daf7f
6,859
def create_output(verified_specific_headers_list:list) -> str: """ Design Output """ if args.verbose is True: print("[!] INFO: Outputting Specific Header Information") return_output = "" for specific_header in verified_specific_headers_list: split_header = specific_header.split(":") if split_header[1] != "": return_output += f"{split_header[0]:<25} is declared -> DATA:{split_header[1]:30}\n" else: return_output += f"{split_header[0]:<25} is NOT declared -> NO DATA\n" return return_output
e38fdf467d1f01167ff00040e7d0f7d8816e4915
6,860
def registered_response_data(): """Body (bytes) of the registered response.""" return b"response data"
1ee44d70592747947d76ff757901f44fde5c9946
6,861