content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def _highlight_scoring( original_example, subset_adversarial_result, adversarial_span_dict ): """ Calculate the highlighting score using classification results of adversarial examples :param original_example: :param subset_adversarial_result: :param adversarial_span_dict: """ original_utterance = " ".join(nltk.word_tokenize(original_example[1])) original_idx = original_example[0] original_intent = original_example[3] original_confidence = original_example[4] original_position = original_example[6] tokens = original_utterance.split(" ") highlight = np.zeros(len(tokens), dtype="float32") for idx in range(len(subset_adversarial_result)): adversarial_example = subset_adversarial_result.iloc[idx] if not adversarial_example["top_predicts"]: continue predict_dict = dict() predict_intent_list = list() for prediction in adversarial_example["top_predicts"]: predict_dict[prediction["intent"]] = prediction["confidence"] predict_intent_list.append(prediction["intent"]) if original_intent in predict_dict: adversarial_position = list(predict_dict.keys()).index(original_intent) adversarial_confidence = predict_dict[original_intent] else: adversarial_position = len(list(predict_dict.keys())) adversarial_confidence = 0 start, end = adversarial_span_dict[ adversarial_example["utterance"] + "_" + str(original_idx) ] highlight = _scoring_function( highlight, original_position, adversarial_position, original_confidence, adversarial_confidence, start, end, ) return highlight
788f903fe471ef539fe337c79858c04468ae3137
3,653,239
def server_hello(cmd, response): """Test command """ return response
7e0cc03d1b64afb1a4fc44264096e6888ddb5df2
3,653,240
def test_vectorised_likelihood_not_vectorised_error(model, error): """ Assert the value is False if the likelihood is not vectorised and raises an error. """ def dummy_likelihood(x): if hasattr(x, '__len__'): raise error else: return np.log(np.random.rand()) model._vectorised_likelihood = None model.log_likelihood = MagicMock(side_effect=dummy_likelihood) model.new_point = MagicMock(return_value=np.random.rand(10)) out = Model.vectorised_likelihood.__get__(model) assert model._vectorised_likelihood is False assert out is False
1968be0c2ba147147e2ea5d4443c8bc87050d218
3,653,242
def display_timestamps_pair(time_m_2): """Takes a list of the following form: [(a1, b1), (a2, b2), ...] and returns a string (a_mean+/-a_error, b_mean+/-b_error). """ if len(time_m_2) == 0: return '(empty)' time_m_2 = np.array(time_m_2) return '({}, {})'.format( display_timestamps(time_m_2[:, 0]), display_timestamps(time_m_2[:, 1]), )
b8bb0fa727c087a6bc1761d55e55143a12693d1e
3,653,243
def get_legendre(degree, length): """ Producesthe Legendre polynomials of order `degree`. Parameters ---------- degree : int Highest order desired. length : int Number of samples of the polynomials. Returns ------- legendre : np.ndarray A `degree`*`length` array with all the polynomials up to order `degree` """ def _bonnet(d, x): if(d == 0): return np.ones_like(x) elif(d == 1): return x else: return ((2*d-1)*x*_bonnet(d-1, x)-(d-1)*_bonnet(d-2, x))/d x = np.linspace(-1, 1, length) legendre = np.empty([length, degree+1]) for n in range(degree+1): legendre[:, n] = _bonnet(n, x) return legendre
5f939c7d759678f6c686c84b074c4ac973df8255
3,653,244
def _get_controller_of(pod): """Get a pod's controller's reference. This uses the pod's metadata, so there is no guarantee that the controller object reference returned actually corresponds to a controller object in the Kubernetes API. Args: - pod: kubernetes pod object Returns: the reference to a controller object """ if pod["metadata"].get("ownerReferences"): for owner_ref in pod["metadata"]["ownerReferences"]: if owner_ref.get("controller"): return owner_ref return None
9c9e58e2fc49729c618af2c5bb9b4d033d90a831
3,653,246
from typing import Any from typing import Dict from typing import List def proxify_device_objects( obj: Any, proxied_id_to_proxy: Dict[int, ProxyObject], found_proxies: List[ProxyObject], ): """ Wrap device objects in ProxyObject Search through `obj` and wraps all CUDA device objects in ProxyObject. It uses `proxied_id_to_proxy` to make sure that identical CUDA device objects found in `obj` are wrapped by the same ProxyObject. Parameters ---------- obj: Any Object to search through or wrap in a ProxyObject. proxied_id_to_proxy: Dict[int, ProxyObject] Dict mapping the id() of proxied objects (CUDA device objects) to their proxy and is updated with all new proxied objects found in `obj`. found_proxies: List[ProxyObject] List of found proxies in `obj`. Notice, this includes all proxies found, including those already in `proxied_id_to_proxy`. Returns ------- ret: Any A copy of `obj` where all CUDA device objects are wrapped in ProxyObject """ return dispatch(obj, proxied_id_to_proxy, found_proxies)
6d410245624d2992e37b5bce1832d7326caf4fe2
3,653,247
def monotonicity(x, rounding_precision = 3): """Calculates monotonicity metric of a value of[0-1] for a given array.\nFor an array of length n, monotonicity is calculated as follows:\nmonotonicity=abs[(num. positive gradients)/(n-1)-(num. negative gradients)/(n-1)].""" n = x.shape[0] grad = np.gradient(x) pos_grad = np.sum(grad>0) neg_grad = np.sum(grad<0) monotonicity = np.abs( pos_grad/(n-1) - neg_grad/(n-1) ) return np.round(monotonicity, rounding_precision)
3ff9c37975502cb12b9e2839a5f5580412084f8c
3,653,248
import attrs def parse_value_namedobject(tt): """ <!ELEMENT VALUE.NAMEDOBJECT (CLASS | (INSTANCENAME, INSTANCE))> """ check_node(tt, 'VALUE.NAMEDOBJECT') k = kids(tt) if len(k) == 1: object = parse_class(k[0]) elif len(k) == 2: path = parse_instancename(kids(tt)[0]) object = parse_instance(kids(tt)[1]) object.path = path else: raise ParseError('Expecting one or two elements, got %s' % repr(kids(tt))) return (name(tt), attrs(tt), object)
ecb507ac9b0c3fdfbec19f807fba06236e21d7c5
3,653,251
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: """Unload Dyson cloud.""" # Nothing needs clean up return True
38a274e90c5fadc277e640cd6cc5442d5070dfd6
3,653,252
def quat_correct(quat): """ Converts quaternion to minimize Euclidean distance from previous quaternion (wxyz order) """ for q in range(1, quat.shape[0]): if np.linalg.norm(quat[q-1] - quat[q], axis=0) > np.linalg.norm(quat[q-1] + quat[q], axis=0): quat[q] = -quat[q] return quat
fc492998c5bdf2cf3b1aacbd42de72618bd74c01
3,653,253
def parse_record1(raw_record): """Parse raw record and return it as a set of unique symbols without \n""" return set(raw_record) - {"\n"}
4ffd3ebd0aaa17ddd42baf3b9d44614784c8ff33
3,653,255
import re def isValid(text): """ Returns True if the input is related to the meaning of life. Arguments: text -- user-input, typically transcribed speech """ return bool(re.search(r'\byour awesome\b', text, re.IGNORECASE))
c6e4275d53cd632b4f5e255aa62b69c80fd37794
3,653,257
def list_shared_with(uri, async_req=False): """Return array sharing policies""" (namespace, array_name) = split_uri(uri) api_instance = client.client.array_api try: return api_instance.get_array_sharing_policies( namespace=namespace, array=array_name, async_req=async_req ) except GenApiException as exc: raise tiledb_cloud_error.check_exc(exc) from None
2a9eb78c14e5bdc31a3ae5bfc077219bd06c768f
3,653,258
import html def format_html_data_table(dataframe, list_of_malformed, addLineBreak=False): """ Returns the predicted values as the data table """ if list_of_malformed: list_of_malformed = str(list_of_malformed) else: list_of_malformed = "None" # format numeric data into string format for column_name in dataframe.select_dtypes(include=[np.float]).columns: dataframe[column_name] = dataframe[column_name].apply(lambda x: "{0:.2f}%".format(x)) return html.Div([html.P("Total Number of Audio Clips : "+ str(dataframe.shape[0]), style={"color":"white", 'text-decoration':'underline'}), html.P("Error while prediction: " + list_of_malformed, style={"color":"white"})] + \ ([html.Br()] if addLineBreak else []) + \ [html.Hr(), dash_table.DataTable(id='datatable-interactivity-predictions', columns=[{"name": format_label_name(i), "id": i, "deletable": True} for i in dataframe.columns], data=dataframe.to_dict("rows"), style_header={'backgroundColor': 'rgb(30, 30, 30)', "fontWeight": "bold", 'border': '1px solid white'}, style_cell={'backgroundColor': 'rgb(50, 50, 50)', 'color': 'white', 'whiteSpace':'normal', 'maxWidth': '240px'}, style_table={"maxHeight":"350px", "overflowY":"scroll", "overflowX":"auto"}), html.Hr()] + \ ([html.Br()] if addLineBreak else []))
cc345d2cb87ddf7905d0d9a62cc6cd61b92ddc51
3,653,259
import math def colorDistance(col1, col2): """Returns a number between 0 and root(3) stating how similar two colours are - distance in r,g,b, space. Only used to find names for things.""" return math.sqrt( (col1.red - col2.red)**2 + (col1.green - col2.green)**2 + (col1.blue - col2.blue)**2 )
ef18dede8312f78b4ba4258e87d4630863f1243c
3,653,260
from typing import Iterable def combine_from_streaming(stream: Iterable[runtime_pb2.Tensor]) -> runtime_pb2.Tensor: """ Restore a result of split_into_chunks into a single serialized tensor """ stream = iter(stream) first_chunk = next(stream) serialized_tensor = runtime_pb2.Tensor() serialized_tensor.CopyFrom(first_chunk) buffer_chunks = [first_chunk.buffer] for tensor_part in stream: buffer_chunks.append(tensor_part.buffer) serialized_tensor.buffer = b''.join(buffer_chunks) return serialized_tensor
af88c9eeec99c1d3d7ca9e5753b72cf09a0c6c85
3,653,261
def portageq_envvar(options, out, err): """ return configuration defined variables. Use envvar2 instead, this will be removed. """ return env_var.function(options, out, err)
8406985ac5f5d5d4bc93ded8c1392b1fe49e9ff7
3,653,262
def create_hash_factory(hashfun, complex_types=False, universe_size=None): """Create a function to make hash functions :param hashfun: hash function to use :type hashfun: callable :param complex_types: whether hash function supports hashing of complex types, either through native support or through repr :type complex_types: bool :param universe_size: upper limit to hash value :type universe_size: int, long :returns: a hash factory :rtype: callable """ def hash_factory(seed): if complex_types: if universe_size is None: fun = lambda x: hashfun(x, seed) else: fun = lambda x: hashfun(x, seed) % universe_size else: if universe_size is None: fun = lambda x: hashfun(hashable(x), seed) else: fun = lambda x: hashfun(hashable(x), seed) % universe_size return fun return hash_factory
23dee13f06f754caa9f7de5a89b855adbe7313a4
3,653,263
def estimate_key(note_info, method="krumhansl", *args, **kwargs): """ Estimate key of a piece by comparing the pitch statistics of the note array to key profiles [2]_, [3]_. Parameters ---------- note_info : structured array, `Part` or `PerformedPart` Note information as a `Part` or `PerformedPart` instances or as a structured array. If it is a structured array, it has to contain the fields generated by the `note_array` properties of `Part` or `PerformedPart` objects. If the array contains onset and duration information of both score and performance, (e.g., containing both `onset_beat` and `onset_sec`), the score information will be preferred. method : {'krumhansl'} Method for estimating the key. For now 'krumhansl' is the only supported method. args, kwargs Positional and Keyword arguments for the key estimation method Returns ------- str String representing the key name (i.e., Root(alteration)(m if minor)). See `partitura.utils.key_name_to_fifths_mode` and `partitura.utils.fifths_mode_to_key_name`. References ---------- .. [2] Krumhansl, Carol L. (1990) "Cognitive foundations of musical pitch", Oxford University Press, New York. .. [3] Temperley, D. (1999) "What's key for key? The Krumhansl-Schmuckler key-finding algorithm reconsidered". Music Perception. 17(1), pp. 65--100. """ if method not in ("krumhansl",): raise ValueError('For now the only valid method is "krumhansl"') if method == "krumhansl": kid = ks_kid if "key_profiles" not in kwargs: kwargs["key_profiles"] = "krumhansl_kessler" else: if kwargs["key_profiles"] not in VALID_KEY_PROFILES: raise ValueError( "Invalid key_profiles. " 'Valid options are "ks", "cmbs" or "kp"' ) note_array = ensure_notearray(note_info) return kid(note_array, *args, **kwargs)
af2383ab2a94cf49a93a1f00d5bf575f19e0daa0
3,653,264
def print_parsable_dstip(data, srcip, dstip): """Returns a parsable data line for the destination data. :param data: the data source :type data: dictionary :param scrip: the source ip :type srcip: string :param dstip: the destination ip :type dstip: string :return: a line of urls and their hitcount """ line = [] for item in header_order: if item in data[srcip]['targets'][dstip]: value = data[srcip]['targets'][dstip][item] elif item == "src": value = srcip elif item == "dst": value = dstip.replace(":", "|") else: value = "" if value != "": line.append(str(value)) if 'url' in data[srcip]['targets'][dstip]: line.append(print_parsable_urls(data[srcip]['targets'][dstip]['url'])) line = "|".join(line) return line
9e27733a9821e184e53f21ca38af9cdb61192743
3,653,265
def OrListSelector(*selectors) -> pyrosetta.rosetta.core.select.residue_selector.OrResidueSelector: """ OrResidueSelector but 2+ (not a class, but returns a Or :param selectors: :return: """ sele = pyrosetta.rosetta.core.select.residue_selector.FalseResidueSelector() for subsele in selectors: sele = pyrosetta.rosetta.core.select.residue_selector.OrResidueSelector(subsele, sele) return sele
8f4443a6ee1bbcd2e76133e6a08eea2737e01383
3,653,266
def plot_regress_exog(res, exog_idx, exog_name='', fig=None): """Plot regression results against one regressor. This plots four graphs in a 2 by 2 figure: 'endog versus exog', 'residuals versus exog', 'fitted versus exog' and 'fitted plus residual versus exog' Parameters ---------- res : result instance result instance with resid, model.endog and model.exog as attributes exog_idx : int index of regressor in exog matrix fig : Matplotlib figure instance, optional If given, this figure is simply returned. Otherwise a new figure is created. Returns ------- fig : matplotlib figure instance Notes ----- This is currently very simple, no options or varnames yet. """ fig = utils.create_mpl_fig(fig) if exog_name == '': exog_name = 'variable %d' % exog_idx #maybe add option for wendog, wexog #y = res.endog x1 = res.model.exog[:,exog_idx] ax = fig.add_subplot(2,2,1) #namestr = ' for %s' % self.name if self.name else '' ax.plot(x1, res.model.endog, 'o') ax.set_title('endog versus exog', fontsize='small')# + namestr) ax = fig.add_subplot(2,2,2) #namestr = ' for %s' % self.name if self.name else '' ax.plot(x1, res.resid, 'o') ax.axhline(y=0) ax.set_title('residuals versus exog', fontsize='small')# + namestr) ax = fig.add_subplot(2,2,3) #namestr = ' for %s' % self.name if self.name else '' ax.plot(x1, res.fittedvalues, 'o') ax.set_title('Fitted versus exog', fontsize='small')# + namestr) ax = fig.add_subplot(2,2,4) #namestr = ' for %s' % self.name if self.name else '' ax.plot(x1, res.fittedvalues + res.resid, 'o') ax.set_title('Fitted plus residuals versus exog', fontsize='small')# + namestr) fig.suptitle('Regression Plots for %s' % exog_name) return fig
e4c7859c32892d2d8e94ff884652846f5f15f513
3,653,267
from typing import List def get_circles_with_admin_access(account_id: int) -> List[Circle]: """ SELECT management_style, c_name FROM ( SELECT 'SELF_ADMIN' AS management_style, c.management_style AS c_management_style, c.admin_circle AS c_admin_circle, c.created_by AS c_created_by, c.updated_by AS c_updated_by, c.id AS c_id, c.created_at AS c_created_at, c.updated_at AS c_updated_at, c.name AS c_name, c.description AS c_description FROM circle AS c JOIN circle_member ON c.id = circle_member.circle WHERE C.management_style = 'SELF_ADMIN' AND circle_member.account = 5 UNION SELECT 'ADMIN_CIRCLE' AS management_style, c.management_style AS c_management_style, c.admin_circle AS c_admin_circle, c.created_by AS c_created_by, c.updated_by AS c_updated_by, c.id AS c_id, c.created_at AS c_created_at, c.updated_at AS c_updated_at, c.name AS c_name, c.description AS c_description FROM circle AS ac JOIN circle AS C ON c.admin_circle = ac.id JOIN circle_member ON ac.id = circle_member.circle WHERE c.management_style = 'ADMIN_CIRCLE' AND circle_member.account = 5 ) AS anon_1 """ ac = aliased(Circle, name="ac") c = aliased(Circle, name="c") self_admin = db.session.query(c). \ join(c.members). \ filter(CircleMember.account_id == account_id). \ filter(c._management_style == CircleManagementStyle.SELF_ADMIN.name) admin_circle = db.session.query(c). \ join(ac, c.admin_circle_id == ac.id). \ join(ac.members). \ filter(c._management_style == CircleManagementStyle.ADMIN_CIRCLE.name). \ filter(CircleMember.account_id == account_id) return self_admin.union(admin_circle).all()
ef0a24d299bdad549f9b0220e4a34499097eb19d
3,653,268
def combine(arr): """ makes overlapping sequences 1 sequence """ def first(item): return item[0] def second(item): return item[1] if len(arr) == 0 or len(arr) == 1: return arr sarr = [] for c, val in enumerate(arr): sarr.append((val[0], val[1], c)) sarr = sorted(sarr, key = second) sarr = sorted(sarr, key = first) chains = [[sarr[0][0], sarr[0][1], [sarr[0][2]]]] for s, e, c in sarr[1:]: #start, end, counter if s <= chains[-1][1] +1: chains[-1][1] = max(e, chains[-1][1]) chains[-1][2].append(c) else: chains.append([s, e, [c]]) return chains
b46bb7f73fa6857ed4c980bdbdff77acde64b18d
3,653,269
from operator import sub def sub_fft(f_fft, g_fft): """Substraction of two polynomials (FFT representation).""" return sub(f_fft, g_fft)
a559429a4d10889be3ffa776153854248ac7a496
3,653,270
def recursive_fill_fields(input, output): """ Fills fields from output with fields from input, with support for nested structures. Parameters ---------- input : ndarray Input array. output : ndarray Output array. Notes ----- * `output` should be at least the same size as `input` Examples -------- >>> from numpy_demo.lib import recfunctions as rfn >>> a = np.array([(1, 10.), (2, 20.)], dtype=[('A', np.int64), ('B', np.float64)]) >>> b = np.zeros((3,), dtype=a.dtype) >>> rfn.recursive_fill_fields(a, b) array([(1, 10.), (2, 20.), (0, 0.)], dtype=[('A', '<i8'), ('B', '<f8')]) """ newdtype = output.dtype for field in newdtype.names: try: current = input[field] except ValueError: continue if current.dtype.names is not None: recursive_fill_fields(current, output[field]) else: output[field][:len(current)] = current return output
5508f1681eaa3f2c5ccb44b6329ad012f85c42e8
3,653,271
def handle_dat_edge(data_all): """ 把dat_edge个每一条记录的info拆开,然后输出,方便后续的计算 为了简化计算,忽略时间信息,把所有的月份的联系记录汇总起来 """ def cal_multi_3(string): s = string.split(',') month_times = len(s) df = list(map(lambda x: list(map(eval, x.split(':')[1].split('_'))), s)) times_sum, weight_sum = pd.DataFrame(df).sum().values return month_times, times_sum, weight_sum def cal_single_3(string): times_sum, weight_sum = list(map(eval, string.split(':')[1].split('_'))) return 1, times_sum, weight_sum length = list(map(len, map(lambda x: x.split(','), data_all['info']))) dat_edge_single = data_all[np.array(length) == 1] dat_edge_multi = data_all[np.array(length) > 1] multi_pre_df = map(cal_multi_3, dat_edge_multi['info']) multi_feature_3 = pd.DataFrame(list(multi_pre_df), columns=['month_times', 'times_sum', 'weight_sum']) id_part = dat_edge_multi[['from_id', 'to_id']].reset_index(drop=True) multi_result = pd.concat([id_part, multi_feature_3], axis=1) single_pre_df = map(cal_single_3, dat_edge_single['info']) single_feature_3 = pd.DataFrame(list(single_pre_df), columns=['month_times', 'times_sum', 'weight_sum']) id_part = dat_edge_single[['from_id', 'to_id']].reset_index(drop=True) single_result = pd.concat([id_part, single_feature_3], axis=1) both_result = pd.concat([multi_result, single_result], ignore_index=True) return both_result
4ae92d337a70326bae87399809b920b1ad2cce1e
3,653,273
def open_path(path, **kwargs): """ Parameters ---------- path: str window: tuple e.g. ('1990-01-01','2030-01-01') kwargs: all other kwargs the particular file might take, see the module for details Returns ------- """ info = _tools.path2info(path) module = arm_products[info['product']]['module'] out = module.open_path(path, **kwargs) return out
c4e87d0649dfde2139a4ecac797775309eb6a72e
3,653,276
import uuid def generate_code() -> str: """Generates password reset code :return: Password reset code :rtype: str """ return str(uuid.uuid4())
bcd8377afd5598e71f8bb8eb217c3f3fd53fc5c7
3,653,277
def decode_auth_token(auth_token): """ Decodes the auth token :param auth_token: :return: integer|string """ try: payload = jwt.decode(auth_token, app.config.get('SECRET_KEY')) return payload['sub'] except jwt.ExpiredSignatureError: return 'Signature expired. Please log in again.' except jwt.InvalidTokenError: return 'Invalid token. Please log in again.'
16fceb539f7aafb775b851e55f1b606a1c917cf9
3,653,279
import ast def json(*arguments): """ Transform *arguments parameters into JSON. """ return ast.Json(*arguments)
3e3333617b63dc1b5e8e4b71ea5c2f0ea08bfff8
3,653,281
def get_default_volume_size(): """ :returns int: the default volume size (in bytes) supported by the backend the acceptance tests are using. """ default_volume_size = environ.get("FLOCKER_ACCEPTANCE_DEFAULT_VOLUME_SIZE") if default_volume_size is None: raise SkipTest( "Set acceptance testing default volume size using the " + "FLOCKER_ACCEPTANCE_DEFAULT_VOLUME_SIZE environment variable.") return int(default_volume_size)
ec24bfb9c07add5d1a800a1aaf9db3efb8727b3d
3,653,282
import functools def set_global_user(**decorator_kwargs): """ Wrap a Flask blueprint view function to set the global user ``flask.g.user`` to an instance of ``CurrentUser``, according to the information from the JWT in the request headers. The validation will also set the current token. This requires a flask application and request context. """ def decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): set_current_user(**decorator_kwargs) return func(*args, **kwargs) return wrapper return decorator
f73a89d94d188b1c258cedca3439ab9c1c94180c
3,653,283
def sample_wfreq(sample): """Return the Weekly Washing Frequency as a number.""" # `sample[3:]` strips the `BB_` prefix results = session.query(Samples_Metadata.WFREQ).\ filter(Samples_Metadata.SAMPLEID == sample[3:]).all() wfreq = np.ravel(results) # Return only the first integer value for washing frequency return jsonify(int(wfreq[0]))
6cb2ee0866efc9e841143c32e10cbb8feea813bc
3,653,284
def mlp_hyperparameter_tuning(no_of_hidden_neurons, epoch, alpha, roh, n_iter_no_change, X_train, X_validation, y_train, y_validation): """ INPUT no_of_hidden_neurons: 1D int arary contains different values of no of neurons present in 1st hidden layer (hyperparameter) epoch: 1D int arary contains different values of epochs (hyperparameter) alpha: 1D float array contains different values of alphas or learning rates (hyperparameter) roh: 1D float array contains different values of tolerence or roh (hyperparameter) n_iter_no_change: 1D int array conatins different values of Number of iterations with no improvement to wait before stopping fitting (hyperparameter). X_train: 2D array of shape = (no of patterns, no of features) X_validation: 2D array of shape = (no of patterns, no of features) y_train: 2D array of shape = (no of patterns, ) y_validation: 2D array of shape = (no of patterns, ) OUTPUT best_hyperparameter: a tuple (epoch, alpha, roh, n_iter_no_change) which has best accuracy on the validation set. """ val_acc = [] for i in range(0, epoch.shape[0]): mlp_classifier = MLPClassifier(hidden_layer_sizes = (no_of_hidden_neurons[i],), activation = 'logistic', solver = 'sgd', learning_rate = 'constant',\ learning_rate_init = alpha[i], max_iter = epoch[i], shuffle = True, random_state = 100, tol = roh[i],\ verbose = False, early_stopping = True, n_iter_no_change = n_iter_no_change[i]).fit(X_train, y_train) # we are taking logloss function for error calculation predicted = mlp_classifier.predict(X_validation) val_acc.append(accuracy_score(y_validation, predicted)*100) # Get the maximum accuracy on validation max_value = max(val_acc) max_index = val_acc.index(max_value) best_hyperparameter = (no_of_hidden_neurons[max_index], epoch[max_index], alpha[max_index], roh[max_index], n_iter_no_change[max_index]) print("Best Hyperparameter:") print("No of neurons in the 1st hidden layer = ", no_of_hidden_neurons[max_index]) print("Epoch = ", epoch[max_index]) print("Alpha = ", alpha[max_index]) print("Roh = ", roh[max_index]) print("n_iter_no_change (Number of iterations with no improvement) = ", n_iter_no_change[max_index]) return best_hyperparameter
3ec079bbeae32a5e7e6b80e833336ecb8662cbf1
3,653,286
def play(p1:list[int], p2:list[int]) -> list[int]: """Gets the final hand of the winning player""" while p1 and p2: a = p1.pop(0) b = p2.pop(0) if a > b: p1 += [a, b] else: p2 += [b, a] return p1 + p2
2a2b561474b3cd0841dcbe881e74b4767b4102b1
3,653,287
def oda_update_uhf(dFs, dDs, dE): """ ODA update: lbd = 0.5 - dE / E_deriv """ if type(dFs) is not list: raise Exception("arg1 and arg2 are list of alpha/beta matrices.") E_deriv = np.sum(dFs[0] * dDs[0] + dFs[1] * dDs[1]) lbd = 0.5 * (1. - dE / E_deriv) if lbd < 0 or lbd > 1: lbd = 0.9999 if dE < 0 else 1.e-4 return lbd
fcead1536db11f80ae9eb2e912ac9857f93de669
3,653,288
def authorize(*roles): """Decorator that authorizes (or not) the current user Raises an exception if the current user does not have at least one of the listed roles. """ def wrapper(func): """wraps the protected function""" def authorize_and_call(*args, **kwargs): """checks authorization and calls function if authorized""" user = context.request.user if user.is_active: if user.is_administrator: return func(*args, **kwargs) for role in roles: if role in user.groups: return func(*args, **kwargs) raise zoom.exceptions.UnauthorizedException('Unauthorized') return authorize_and_call return wrapper
f3fd8eb42924f8f956d0e3eae1499f64387fe96e
3,653,289
import base64 import itertools import six def decrypt(secret, ciphertext): """Given the first 16 bytes of splunk.secret, decrypt a Splunk password""" plaintext = None if ciphertext.startswith("$1$"): ciphertext = base64.b64decode(ciphertext[3:]) key = secret[:16] algorithm = algorithms.ARC4(key) cipher = Cipher(algorithm, mode=None, backend=default_backend()) decryptor = cipher.decryptor() plaintext = decryptor.update(ciphertext) chars = [] for char1, char2 in zip(plaintext[:-1], itertools.cycle("DEFAULTSA")): chars.append(six.byte2int([char1]) ^ ord(char2)) plaintext = "".join([six.unichr(c) for c in chars]) elif ciphertext.startswith("$7$"): ciphertext = base64.b64decode(ciphertext[3:]) kdf = PBKDF2HMAC( algorithm=hashes.SHA256(), length=32, salt=b"disk-encryption", iterations=1, backend=default_backend() ) key = kdf.derive(secret) iv = ciphertext[:16] # pylint: disable=invalid-name tag = ciphertext[-16:] ciphertext = ciphertext[16:-16] algorithm = algorithms.AES(key) cipher = Cipher(algorithm, mode=modes.GCM(iv, tag), backend=default_backend()) decryptor = cipher.decryptor() plaintext = decryptor.update(ciphertext).decode() return plaintext
d4b0caca50d649633bb973d26bb174875d23b0e0
3,653,291
from unet_core.vessel_analysis import VesselTree def load_skeleton(path): """ Load the skeleton from a pickle """ # Delayed import so script can be run with both Python 2 and 3 v = VesselTree() v.load_skeleton(path) return v.skeleton
d9632de40310dd738eb7d07966d6eb04360c3b81
3,653,292
def industries_hierarchy() -> pd.DataFrame: """Read the Dow Jones Industry hierarchy CSV file. Reads the Dow Jones Industry hierarchy CSV file and returns its content as a Pandas DataFrame. The root node has the fcode `indroot` and an empty parent. Returns ------- DataFrame : A Pandas DataFrame with the columns: * ind_fcode : string Industry Factiva Code * name : string Name of the Industry * parent : string Factiva Code of the parent Industry """ ret_ind = pd.read_csv(ind_hrchy_path) ret_ind = ret_ind.replace(np.nan, '', regex=True) return ret_ind
9d1de27e3e01572637e7afc729e0cd07d96b14e2
3,653,293
import typing def setup_callback(callback: typing.Awaitable): """ This function is used to setup the callback. """ callback.is_guild = False """ The guild of the callback. """ callback.has_permissions = [] """ The permissions of the callback. """ callback.has_roles = [] """ The roles of the callback. """ callback.checks = [] """ The checks of the callback. """ callback.check_any = False """ The check_any of the callback. """ return callback
4cb7849d9746166c95c96d18e27227b52160ff7a
3,653,295
def prepare_for_training(ds, ds_name, conf, cache): """ Cache -> shuffle -> repeat -> augment -> batch -> prefetch """ AUTOTUNE = tf.data.experimental.AUTOTUNE # Resample dataset. NB: dataset is cached in resamler if conf["resample"] and 'train' in ds_name: ds = oversample(ds, ds_name, conf) # Cache to SSD elif cache: cache_string = "{}/{}_{}_{}".format( conf["cache_dir"], conf["img_shape"][0], conf["ds_info"], ds_name ) ds = ds.cache(cache_string) # Shuffle if conf["shuffle_buffer_size"]>1: ds = ds.shuffle( buffer_size=conf["shuffle_buffer_size"], seed=tf.constant(conf["seed"], tf.int64) if conf["seed"] else None ) # Repeat forever ds = ds.repeat() #Augment if conf["augment"] and "train" in ds_name: ds = augment_ds(ds, conf, AUTOTUNE) # Batch ds = ds.batch(conf["batch_size"], drop_remainder=False) # Prefetch - lets the dataset fetch batches in the background while the model is training. ds = ds.prefetch(buffer_size=AUTOTUNE) return ds
b072a7ced028b1627288a3f2bbf0233c85afbab8
3,653,296
def residual3d(inp, is_training, relu_after=True, add_bn=True, name=None, reuse=None): """ 3d equivalent to 2d residual layer Args: inp (tensor[batch_size, d, h, w, channels]): is_training (tensor[bool]): relu_after (bool): add_bn (bool): add bn before every relu name (string): reuse (bool): """ if name == None: name = "residual3d" out_dim = (int)(inp.shape[-1]) with tf.variable_scope(name, reuse=reuse): out1 = tf.layers.conv3d( inp, filters=out_dim, kernel_size=[3, 3, 3], strides=[1, 1, 1], padding="same", activation=None, kernel_initializer=tf.contrib.layers.xavier_initializer(), bias_initializer=tf.zeros_initializer(), name="layer1", reuse=reuse) if add_bn: out1 = batch_norm( inp=out1, is_training=is_training, name="norm1", reuse=reuse) out1 = tf.nn.relu(out1) out2 = tf.layers.conv3d( out1, filters=out_dim, kernel_size=[3, 3, 3], strides=[1, 1, 1], padding="same", activation=None, kernel_initializer=tf.contrib.layers.xavier_initializer(), bias_initializer=tf.zeros_initializer(), name="layer2", reuse=reuse) if relu_after and add_bn: out2 = batch_norm( inp=out2, is_training=is_training, name="norm2", reuse=reuse) if relu_after: return tf.nn.relu(inp + out2) else: return inp + out2
28fb8651c9a9755e8d0636c702b2bd77e7186fd1
3,653,297
def marks(family, glyph): """ :param family: :param glyph: :return: True when glyph has at least one anchor """ has_mark_anchor = False for anchor in glyph.anchors: if anchor.name: if anchor.name.startswith("_"): has_mark_anchor = True break return has_mark_anchor
101555dcadfd78b0550606f843e32dce99de62b8
3,653,298
import re def generate_bom(pcb_modules, config, extra_data): # type: (list, Config, dict) -> dict """ Generate BOM from pcb layout. :param pcb_modules: list of modules on the pcb :param config: Config object :param extra_data: Extra fields data :return: dict of BOM tables (qty, value, footprint, refs) and dnp components """ def convert(text): return int(text) if text.isdigit() else text.lower() def alphanum_key(key): return [convert(c) for c in re.split('([0-9]+)', key)] def natural_sort(l): """ Natural sort for strings containing numbers """ return sorted(l, key=lambda r: (alphanum_key(r[0]), r[1])) # build grouped part list warning_shown = False skipped_components = [] part_groups = {} for i, m in enumerate(pcb_modules): if skip_component(m, config, extra_data): skipped_components.append(i) continue # group part refs by value and footprint norm_value = units.componentValue(m.val) extras = [] if config.extra_fields: if m.ref in extra_data: extras = [extra_data[m.ref].get(f, '') for f in config.extra_fields] else: # Some components are on pcb but not in schematic data. # Show a warning about possibly outdated netlist/xml file. log.warn( 'Component %s is missing from schematic data.' % m.ref) warning_shown = True extras = [''] * len(config.extra_fields) group_key = (norm_value, tuple(extras), m.footprint, m.attr) valrefs = part_groups.setdefault(group_key, [m.val, []]) valrefs[1].append((m.ref, i)) if warning_shown: log.warn('Netlist/xml file is likely out of date.') # build bom table, sort refs bom_table = [] for (norm_value, extras, footprint, attr), valrefs in part_groups.items(): bom_row = ( len(valrefs[1]), valrefs[0], footprint, natural_sort(valrefs[1]), extras) bom_table.append(bom_row) # sort table by reference prefix, footprint and quantity def sort_func(row): qty, _, fp, rf, e = row prefix = re.findall('^[A-Z]*', rf[0][0])[0] if prefix in config.component_sort_order: ref_ord = config.component_sort_order.index(prefix) else: ref_ord = config.component_sort_order.index('~') return ref_ord, e, fp, -qty, alphanum_key(rf[0][0]) if '~' not in config.component_sort_order: config.component_sort_order.append('~') bom_table = sorted(bom_table, key=sort_func) result = { 'both': bom_table, 'skipped': skipped_components } for layer in ['F', 'B']: filtered_table = [] for row in bom_table: filtered_refs = [ref for ref in row[3] if pcb_modules[ref[1]].layer == layer] if filtered_refs: filtered_table.append((len(filtered_refs), row[1], row[2], filtered_refs, row[4])) result[layer] = sorted(filtered_table, key=sort_func) return result
7645929bfcfd3d7447a32ae2ea3074d6da86c368
3,653,299
def validateFilename(value): """ Validate filename. """ if 0 == len(value): raise ValueError("Name of SimpleGridDB file must be specified.") return value
b8b3c23772437c1ddca597c44c66b239955a26fb
3,653,302
def readPNM(fd): """Reads the PNM file from the filehandle""" t = noncomment(fd) s = noncomment(fd) m = noncomment(fd) if not (t.startswith('P1') or t.startswith('P4')) else '1' data = fd.read() ls = len(s.split()) if ls != 2 : name = "<pipe>" if fd.name=="<fdopen>" else "Filename = {0}".format(fd.name) raise IOError("Expected 2 elements from parsing PNM file, got {0}: {1}".format(ls, name)) xs, ys = s.split() width = int(xs) height = int(ys) m = int(m) if m != 255 : print "Just want 8 bit pgms for now!" d = fromstring(data,dtype=uint8) d = reshape(d, (height,width) ) return (m,width,height, d)
c03633069b2b8f3302a8f28e03f4476ac7478055
3,653,303
def gdxfile(rawgdx): """A gdx.File fixture.""" return gdx.File(rawgdx)
6138077fa959cecd4a7402fe3c7b6b7dee5d99f9
3,653,304
import typing from typing import Union from typing import Dict from typing import Any def AppBar( absolute: bool = None, app: bool = None, attributes: dict = {}, bottom: bool = None, children: list = [], class_: str = None, clipped_left: bool = None, clipped_right: bool = None, collapse: bool = None, collapse_on_scroll: bool = None, color: str = None, dark: bool = None, dense: bool = None, elevate_on_scroll: bool = None, elevation: typing.Union[float, str] = None, extended: bool = None, extension_height: typing.Union[float, str] = None, fade_img_on_scroll: bool = None, fixed: bool = None, flat: bool = None, floating: bool = None, height: typing.Union[float, str] = None, hide_on_scroll: bool = None, inverted_scroll: bool = None, layout: Union[Dict[str, Any], Element[ipywidgets.widgets.widget_layout.Layout]] = {}, light: bool = None, max_height: typing.Union[float, str] = None, max_width: typing.Union[float, str] = None, min_height: typing.Union[float, str] = None, min_width: typing.Union[float, str] = None, prominent: bool = None, scroll_off_screen: bool = None, scroll_target: str = None, scroll_threshold: typing.Union[str, float] = None, short: bool = None, shrink_on_scroll: bool = None, slot: str = None, src: typing.Union[str, dict] = None, style_: str = None, tag: str = None, tile: bool = None, v_model: Any = "!!disabled!!", v_on: str = None, v_slots: list = [], value: bool = None, width: typing.Union[float, str] = None, on_absolute: typing.Callable[[bool], Any] = None, on_app: typing.Callable[[bool], Any] = None, on_attributes: typing.Callable[[dict], Any] = None, on_bottom: typing.Callable[[bool], Any] = None, on_children: typing.Callable[[list], Any] = None, on_class_: typing.Callable[[str], Any] = None, on_clipped_left: typing.Callable[[bool], Any] = None, on_clipped_right: typing.Callable[[bool], Any] = None, on_collapse: typing.Callable[[bool], Any] = None, on_collapse_on_scroll: typing.Callable[[bool], Any] = None, on_color: typing.Callable[[str], Any] = None, on_dark: typing.Callable[[bool], Any] = None, on_dense: typing.Callable[[bool], Any] = None, on_elevate_on_scroll: typing.Callable[[bool], Any] = None, on_elevation: typing.Callable[[typing.Union[float, str]], Any] = None, on_extended: typing.Callable[[bool], Any] = None, on_extension_height: typing.Callable[[typing.Union[float, str]], Any] = None, on_fade_img_on_scroll: typing.Callable[[bool], Any] = None, on_fixed: typing.Callable[[bool], Any] = None, on_flat: typing.Callable[[bool], Any] = None, on_floating: typing.Callable[[bool], Any] = None, on_height: typing.Callable[[typing.Union[float, str]], Any] = None, on_hide_on_scroll: typing.Callable[[bool], Any] = None, on_inverted_scroll: typing.Callable[[bool], Any] = None, on_layout: typing.Callable[[Union[Dict[str, Any], Element[ipywidgets.widgets.widget_layout.Layout]]], Any] = None, on_light: typing.Callable[[bool], Any] = None, on_max_height: typing.Callable[[typing.Union[float, str]], Any] = None, on_max_width: typing.Callable[[typing.Union[float, str]], Any] = None, on_min_height: typing.Callable[[typing.Union[float, str]], Any] = None, on_min_width: typing.Callable[[typing.Union[float, str]], Any] = None, on_prominent: typing.Callable[[bool], Any] = None, on_scroll_off_screen: typing.Callable[[bool], Any] = None, on_scroll_target: typing.Callable[[str], Any] = None, on_scroll_threshold: typing.Callable[[typing.Union[str, float]], Any] = None, on_short: typing.Callable[[bool], Any] = None, on_shrink_on_scroll: typing.Callable[[bool], Any] = None, on_slot: typing.Callable[[str], Any] = None, on_src: typing.Callable[[typing.Union[str, dict]], Any] = None, on_style_: typing.Callable[[str], Any] = None, on_tag: typing.Callable[[str], Any] = None, on_tile: typing.Callable[[bool], Any] = None, on_v_model: typing.Callable[[Any], Any] = None, on_v_on: typing.Callable[[str], Any] = None, on_v_slots: typing.Callable[[list], Any] = None, on_value: typing.Callable[[bool], Any] = None, on_width: typing.Callable[[typing.Union[float, str]], Any] = None, ) -> Element[ipyvuetify.generated.AppBar]: """ """ kwargs: Dict[Any, Any] = without_default(AppBar, locals()) if isinstance(kwargs.get("layout"), dict): kwargs["layout"] = w.Layout(**kwargs["layout"]) widget_cls = ipyvuetify.generated.AppBar comp = react.core.ComponentWidget(widget=widget_cls) return Element(comp, **kwargs)
51de728b0d2935161bd040248d94b3d15aba5d16
3,653,305
def conjoin(*funcs): """ Creates a function that composes multiple predicate functions into a single predicate that tests whether **all** elements of an object pass each predicate. Args: *funcs (callable): Function(s) to conjoin. Returns: Conjoin: Function(s) wrapped in a :class:`Conjoin` context. Example: >>> conjoiner = conjoin(lambda x: isinstance(x, int), lambda x: x > 3) >>> conjoiner([1, 2, 3]) False >>> conjoiner([1.0, 2, 1]) False >>> conjoiner([4.0, 5, 6]) False >>> conjoiner([4, 5, 6]) True .. versionadded:: 2.0.0 """ return Conjoin(*funcs)
835c2962bcc3a2c3dcf0bf19649221aebb73b63b
3,653,307
import hashlib def calculate_file_sha256(file_path): """calculate file sha256 hash code.""" with open(file_path, 'rb') as fp: sha256_cal = hashlib.sha256() sha256_cal.update(fp.read()) return sha256_cal.hexdigest()
bfa7a43516e51a80ccd63ea3ace6be6e5e9dd2c0
3,653,308
from collections import OrderedDict import warnings def select_columns_by_feature_type(df, unique_value_to_total_value_ratio_threshold=.05, text_unique_threshold=.9, exclude_strings = True, return_dict = False, return_type='categoric'): """ Determine if a column fits into one of the following types: numeric, categoric, datetime, text. set return_type to one of these return_types to return a list of the column names associated. Determination is made based on if a column in the dataframe is continous based on a ratio between the number of unique values in a column and the total number of values Low cardinality values will get cut off if above the specified ratio. Optionally specify return_dict to return a dictionary where values are column names and values are boolean True if categoric and false if continouous Default ratio threshold is .05 'exclude_strings' is True by default (i.e. if a column has string values it will be marked as a categoric column). If looking for columns that may be numeric/continuous but first need to be processed, this can be set to False. Parameters ---------- df : Pandas DataFrame A DataFrame to search columns within unique_value_to_total_value_ratio_threshold : float The maximum ratio of unique values in a column / total observations. Akin to a cardinality ratio. Default is .05, or that anyting with more than 5% of its values being unique will be considered non-categoric. exclude_strings : Boolean Flag to include all columns with any string values as categoric columns. Default is True. return_dict: Boolean Flag to return a dictionary of the form {column: Categoric_Boolean} where the value is True if a column is categoric. Default is False return_categoric: Boolean Flag to return a list of the categoric columns. Default is True. return_numeric: Boolean Flag to return a list of the continuous columns. Default is False Returns ------- Dict/List A list of the column names that are categoric/continuous OR a dictionary with keys of column names and values True if categoric """ if return_type not in ['categoric', 'numeric', 'text', 'datetime']: warnings.warn("'return_type' must be one of: ['categoric', 'numeric', 'text', 'datetime']") likely_categoric = OrderedDict() for column in df.columns: likely_categoric[column] = 1.*df[column].nunique()/df[column].count() < unique_value_to_total_value_ratio_threshold # Check if any of the values in the column are strings. if exclude_strings: # If so, its value should be true to indicate it is categoric if df[column].apply(type).eq(str).any(): likely_categoric[column] = True likely_text = OrderedDict() for column in df.columns: # Check for unique pct above threshold and value is string likely_text[column] = (1.*df[column].nunique()/df[column].count() > text_unique_threshold) #& isinstance(df[column].values[0], str) likely_datetime = [] for dtype in [np.datetime64, 'datetime', 'datetime64', np.timedelta64, 'timedelta', 'timedelta64', 'datetimetz']: # Add any datetime columns found to likely_datetime collection time_cols = df.select_dtypes(include=dtype).columns.values.tolist() # Append if not empty if time_cols: likely_datetime.append(time_cols) likely_datetime = np.array(likely_datetime).flatten().tolist() if return_dict: return likely_categoric if return_type == 'numeric': numeric_cols = [col for col, value in likely_categoric.items() if (not value) & (col not in likely_datetime)] return numeric_cols elif return_type == 'categoric': categoric_cols = [col for col, value in likely_categoric.items() if value] return categoric_cols elif return_type == 'text': text_cols = [col for col, value in likely_text.items() if value] return text_cols elif return_type == 'datetime': return likely_datetime else: print('Please specify valid return option')
6335152405bc175805e8484ff23f58d4f6ce6f6a
3,653,309
def _Counter_random(self, filter=None): """Return a single random elements from the Counter collection, weighted by count.""" return _Counter_randoms(self, 1, filter=filter)[0]
95dc2ab7857b27a831b273af7dba143b8b791b27
3,653,310
def EnsureAndroidSdkPackagesInstalled(abi): """Return true if at least one package was not already installed.""" abiPackageList = SdkPackagesForAbi(abi) installedSomething = False packages = AndroidListSdk() for package in abiPackageList: installedSomething |= EnsureSdkPackageInstalled(packages, package) return installedSomething
b43ee6094dc4cd8f71ec1319dbd5bd32d272b55a
3,653,311
def dataframe_like(value, name, optional=False, strict=False): """ Convert to dataframe or raise if not dataframe_like Parameters ---------- value : object Value to verify name : str Variable name for exceptions optional : bool Flag indicating whether None is allowed strict : bool If True, then only allow dataframe. If False, allow types that support casting to dataframe. Returns ------- converted : dataframe value converted to a dataframe """ if optional and value is None: return None if not isinstance(value, dict) or ( strict and not (isinstance(value, pd.DataFrame)) ): extra_text = "If not None, " if optional else "" strict_text = " or dataframe_like " if strict else "" msg = "{0}{1} must be a dict{2}".format(extra_text, name, strict_text) raise TypeError(msg) return pd.DataFrame(value)
016ffcda7050ac639d04522a666526753eb52a84
3,653,312
def pcaFunc(z, n_components=100): """ PCA """ pca = PCA(n_components=100) pca_result = pca.fit_transform(z) re = pd.DataFrame() re['pca-one'] = pca_result[:, 0] re['pca-two'] = pca_result[:, 1] re['pca-three'] = pca_result[:, 2] # Not print Now # print('Explained variation per principal component: {}'.format(pca.explained_variance_ratio_)) return pca_result, re
1dda1542a045eab69aab5488be2c754bde555311
3,653,313
def choose_optimizer(discriminator, generator, netD, netG, lr_d=2e-4, lr_g=2e-3): """ Set optimizers for discriminator and generator :param discriminator: str, name :param generator: str, name :param netD: :param netG: :param lr_d: :param lr_g: :return: optimizerD, optimizerG """ if discriminator == 'Adam': optimizerD = optim.Adam(netD.parameters(), lr=lr_d, betas=(0.5, 0.999)) elif discriminator == 'RMSprop': optimizerD = optim.RMSprop(netD.parameters(), lr=lr_d) elif discriminator == 'SGD': optimizerD = optim.SGD(netD.parameters(), lr=lr_d, momentum=0.9) elif discriminator == 'zoVIA': optimizerD = zoVIA(netD, lr=lr_d) elif discriminator == 'zoESVIA': optimizerD = zoESVIA(netD, lr=lr_d) elif discriminator == 'zoscESVIA': optimizerD = zoscESVIA(netD, lr=lr_d) if generator == 'Adam': optimizerG = optim.Adam(netG.parameters(), lr=lr_g, betas=(0.5, 0.999)) elif generator == 'RMSprop': optimizerG = optim.RMSprop(netG.parameters(), lr=lr_g) elif generator == 'SGD': optimizerG = optim.SGD(netG.parameters(), lr=lr_g, momentum=0.9) elif generator == 'zoVIA': optimizerG = zoVIA(netG, lr=lr_g) elif generator == 'zoESVIA': optimizerG = zoESVIA(netG, lr=lr_g) elif generator == 'zoscESVIA': optimizerG = zoscESVIA(netG, lr=lr_g) print('Discriminator optimizer: {}, lr={}'.format(discriminator, lr_d)) print('Generator optimizer: {}, lr={}'.format(generator, lr_g)) return optimizerD, optimizerG
b3784d98c1743c10e3d1e9bca76288bd45c9c99e
3,653,314
def prod(*args: int) -> int: """ This function is wrapped and documented in `_polymorphic.prod()`. """ prod_ = 1 for arg in args: prod_ *= arg return prod_
eec30bf6339280173e0e2fa517558e6a452b9c37
3,653,315
def field_value(field): """ Returns the value for this BoundField, as rendered in widgets. """ if field.form.is_bound: if isinstance(field.field, FileField) and field.data is None: val = field.form.initial.get(field.name, field.field.initial) else: val = field.data else: val = field.form.initial.get(field.name, field.field.initial) if callable(val): val = val() if val is None: val = '' return val
5dc3792e0d6cd2cb6173c2479a024881f80a6d2b
3,653,316
def distances(p): """Compute lengths of shortest paths between all nodes in Pharmacophore. Args: p (Pharmacophore): model to analyse Returns: dist (numpy array): array with distances between all nodes """ if not isinstance(p, Pharmacophore): raise TypeError("Expected Pharmacophore, got %s instead" % type(p).__name__) dist = np.array(p.edges) for i in range(p.numnodes): for j in range(i): if dist[i][j] == 0: dist[i][j] = dist[j][i] = float("inf") for i in range(len(dist)): compute = False for j in range(i): if dist[i][j] == float("inf"): compute = True break if compute: queue = [k for k in range(p.numnodes)] while queue: queue.sort(key=lambda x: dist[i, x]) u = queue[0] del queue[0] for v in np.where(p.edges[u] > 0)[0]: if v in queue: alt = dist[i, u] + p.edges[u, v] if alt < dist[i, v]: dist[i, v] = dist[v, i] = alt return dist
40e77672ad9447ed4c7b69b14aadbc2f125cb499
3,653,317
def initial_data(logged_on_user, users_fixture, streams_fixture): """ Response from /register API request. """ return { 'full_name': logged_on_user['full_name'], 'email': logged_on_user['email'], 'user_id': logged_on_user['user_id'], 'realm_name': 'Test Organization Name', 'unsubscribed': [{ 'audible_notifications': False, 'description': 'announce', 'stream_id': 7, 'is_old_stream': True, 'desktop_notifications': False, 'pin_to_top': False, 'stream_weekly_traffic': 0, 'invite_only': False, 'name': 'announce', 'push_notifications': False, 'email_address': '', 'color': '#bfd56f', 'in_home_view': True }], 'result': 'success', 'queue_id': '1522420755:786', 'realm_users': users_fixture, 'cross_realm_bots': [{ 'full_name': 'Notification Bot', 'timezone': '', 'is_bot': True, 'date_joined': '2015-12-28T19:58:29.035543+00:00', 'email': '[email protected]', 'user_id': 5, 'is_admin': False, 'avatar_url': 'dummy_avatar_url' }, { 'full_name': 'Email Gateway', 'timezone': '', 'is_bot': True, 'date_joined': '2015-12-28T19:58:29.037658+00:00', 'email': '[email protected]', 'user_id': 6, 'is_admin': False, 'avatar_url': 'dummy_avatar_url' }, { 'full_name': 'Welcome Bot', 'timezone': '', 'is_bot': True, 'date_joined': '2015-12-28T19:58:29.033231+00:00', 'email': '[email protected]', 'user_id': 4, 'is_admin': False, 'avatar_url': 'dummy_avatar_url' }, { 'full_name': 'Zulip Feedback Bot', 'timezone': '', 'is_bot': True, 'date_joined': '2015-12-28T19:58:28.972281+00:00', 'email': '[email protected]', 'user_id': 1, 'is_admin': False, 'avatar_url': 'dummy_avatar_url' }], 'subscriptions': streams_fixture, 'msg': '', 'max_message_id': 552761, 'never_subscribed': [{ 'invite_only': False, 'description': 'Announcements from the Zulip GCI Mentors', 'stream_id': 87, 'name': 'GCI announce', 'is_old_stream': True, 'stream_weekly_traffic': 0 }, { 'invite_only': False, 'description': 'General discussion', 'stream_id': 74, 'name': 'GCI general', 'is_old_stream': True, 'stream_weekly_traffic': 0 }], 'unread_msgs': { 'pms': [{ 'sender_id': 1, 'unread_message_ids': [1, 2] }, { 'sender_id': 2, 'unread_message_ids': [3] }], 'count': 0, 'mentions': [], 'streams': [{ 'stream_id': 1000, 'topic': 'Some general unread topic', 'unread_message_ids': [4, 5, 6], 'sender_ids': [1, 2] }, { 'stream_id': 99, 'topic': 'Some private unread topic', 'unread_message_ids': [7], 'sender_ids': [1, 2] }], 'huddles': [{ 'user_ids_string': '1001,11,12', 'unread_message_ids': [11, 12, 13] }, { 'user_ids_string': '1001,11,12,13', 'unread_message_ids': [101, 102], }] }, 'presences': { '[email protected]': { 'ZulipElectron': { 'pushable': False, 'client': 'ZulipElectron', 'status': 'idle', 'timestamp': 1522484059 }, 'ZulipMobile': { 'pushable': False, 'client': 'ZulipMobile', 'status': 'idle', 'timestamp': 1522384165 }, 'aggregated': { 'timestamp': 1522484059, 'client': 'ZulipElectron', 'status': 'idle' } }, logged_on_user['email']: { 'website': { 'pushable': True, 'client': 'website', 'status': 'active', 'timestamp': 1522458138 }, 'ZulipMobile': { 'pushable': True, 'client': 'ZulipMobile', 'status': 'active', 'timestamp': 1522480103 }, 'aggregated': { 'timestamp': 1522480103, 'client': 'ZulipMobile', 'status': 'active' } } }, 'twenty_four_hour_time': True, 'last_event_id': -1, 'muted_topics': [], 'realm_user_groups': [], # Deliberately use hard-coded zulip version and feature level to avoid # adding extra tests unnecessarily. 'zulip_version': MINIMUM_SUPPORTED_SERVER_VERSION[0], 'zulip_feature_level': MINIMUM_SUPPORTED_SERVER_VERSION[1], }
b85eafbf359a6c34decc866f4d1fbb494ac907f8
3,653,318
def affine_relu_backward(dout, cache): """ Backward pass for the affine-relu convenience layer """ fc_cache, relu_cache = cache da = relu_backward(dout, relu_cache) dx, dw, db = affine_backward(da, fc_cache) return dx, dw, db
201f37d4d6ac9e170a52766f41d892527681a3d1
3,653,319
from typing import List def create_initial_population() -> List[Image]: """ Create population at step 0 """ return [random_image() for _ in range(POP_SIZE)]
895632869962014695382e34961f6e6636619fbe
3,653,320
from typing import Any def adapt(value: Any, pg_type: str) -> Any: """ Coerces a value with a PG type into its Python equivalent. :param value: Value :param pg_type: Postgres datatype :return: Coerced value. """ if value is None: return None if pg_type in _TYPE_MAP: return _TYPE_MAP[pg_type](value) return value
f040cd6fbf5aa8a396efa36879b83e13b5d89da7
3,653,321
import uuid from datetime import datetime def createPREMISEventXML(eventType, agentIdentifier, eventDetail, eventOutcome, outcomeDetail=None, eventIdentifier=None, linkObjectList=[], eventDate=None): """ Actually create our PREMIS Event XML """ eventXML = etree.Element(PREMIS + "event", nsmap=PREMIS_NSMAP) eventIDXML = etree.SubElement(eventXML, PREMIS + "eventIdentifier") eventTypeXML = etree.SubElement(eventXML, PREMIS + "eventType") eventTypeXML.text = eventType eventIDTypeXML = etree.SubElement( eventIDXML, PREMIS + "eventIdentifierType" ) eventIDTypeXML.text = \ "http://purl.org/net/untl/vocabularies/identifier-qualifiers/#UUID" eventIDValueXML = etree.SubElement( eventIDXML, PREMIS + "eventIdentifierValue" ) if eventIdentifier: eventIDValueXML.text = eventIdentifier else: eventIDValueXML.text = uuid.uuid4().hex eventDateTimeXML = etree.SubElement(eventXML, PREMIS + "eventDateTime") if eventDate is None: eventDateTimeXML.text = xsDateTime_format(datetime.utcnow()) else: eventDateTimeXML.text = xsDateTime_format(eventDate) eventDetailXML = etree.SubElement(eventXML, PREMIS + "eventDetail") eventDetailXML.text = eventDetail eventOutcomeInfoXML = etree.SubElement( eventXML, PREMIS + "eventOutcomeInformation" ) eventOutcomeXML = etree.SubElement( eventOutcomeInfoXML, PREMIS + "eventOutcome" ) eventOutcomeXML.text = eventOutcome if outcomeDetail: eventOutcomeDetailXML = etree.SubElement( eventOutcomeInfoXML, PREMIS + "eventOutcomeDetail" ) eventOutcomeDetailNoteXML = etree.SubElement( eventOutcomeDetailXML, PREMIS + "eventOutcomeDetailNote" ) eventOutcomeDetailNoteXML.text = outcomeDetail # Assuming it's a list of 3-item tuples here [ ( identifier, type, role) ] linkAgentIDXML = etree.SubElement( eventXML, PREMIS + "linkingAgentIdentifier") linkAgentIDTypeXML = etree.SubElement( linkAgentIDXML, PREMIS + "linkingAgentIdentifierType" ) linkAgentIDTypeXML.text = \ "http://purl.org/net/untl/vocabularies/identifier-qualifiers/#URL" linkAgentIDValueXML = etree.SubElement( linkAgentIDXML, PREMIS + "linkingAgentIdentifierValue" ) linkAgentIDValueXML.text = agentIdentifier linkAgentIDRoleXML = etree.SubElement( linkAgentIDXML, PREMIS + "linkingAgentRole" ) linkAgentIDRoleXML.text = \ "http://purl.org/net/untl/vocabularies/linkingAgentRoles/#executingProgram" for linkObject in linkObjectList: linkObjectIDXML = etree.SubElement( eventXML, PREMIS + "linkingObjectIdentifier" ) linkObjectIDTypeXML = etree.SubElement( linkObjectIDXML, PREMIS + "linkingObjectIdentifierType" ) linkObjectIDTypeXML.text = linkObject[1] linkObjectIDValueXML = etree.SubElement( linkObjectIDXML, PREMIS + "linkingObjectIdentifierValue" ) linkObjectIDValueXML.text = linkObject[0] if linkObject[2]: linkObjectRoleXML = etree.SubElement( linkObjectIDXML, PREMIS + "linkingObjectRole" ) linkObjectRoleXML.text = linkObject[2] return eventXML
25836d6cd4b40ad672ca3438ba3583cd147a52bb
3,653,322
def get_primary_key(conn, table, columns): """ attempts to reverse lookup the primary key by querying the table using the first column and iteratively adding the columns that comes after it until the query returns a unique row in the table. :param conn: an SQLite connection object table: a string denoting the table name to query columns: a list containing column names of the table :return: the list of columns which makes up the primary key """ select_row_query = "SELECT * FROM `{}`".format(table) count_row_query = "SELECT COUNT(*) FROM `{}` WHERE `{}`" primary_key = [] row = conn.execute(select_row_query).fetchone() if row is not None: for i, column in enumerate(columns): if i == 0: count_row_query = count_row_query.format(table, column) else: count_row_query += " AND `{}`".format(column) count_row_query += append_eql_condition(row[i]) primary_key.append(column) count = conn.execute(count_row_query).fetchone() if count[0] == 1: return primary_key # if no primary key was found then the primary key is made up of all columns return columns
3b74f85214e89af322fd1da1e6c8de1eba4f4ca7
3,653,323
def redirect_to_docs(): """Redirect to API docs when at site root""" return RedirectResponse('/redoc')
f284167e238845651eedaf3bcc1b85e64979df6a
3,653,324
def init_neighbours(key): """ Sets then neighbouring nodes and initializes the edge count to the neighbours to 1 :param key: str - key of node to which we are searching the neighbours :return: dictionary of neighbours with corresponding edge count """ neighbours = {} neighbouring_nodes = graph[key] for node in neighbouring_nodes: if neighbouring_nodes[node] == {}: neighbours[node] = 1 else: neighbours[node] = neighbouring_nodes[node] return neighbours
6fa49ffa75051eeca9bd1714ec3e4817ef429bad
3,653,325
def computeNumericalGradient(J, theta): """ Compute numgrad = computeNumericalGradient(J, theta) theta: a matrix of parameters J: a function that outputs a real-number and the gradient. Calling y = J(theta)[0] will return the function value at theta. """ # Initialize numgrad with zeros numgrad = np.zeros(theta.shape) ## ---------- YOUR CODE HERE -------------------------------------- # Instructions: # Implement numerical gradient checking, and return the result in numgrad. # You should write code so that numgrad[i][j] is (the numerical approximation to) the # partial derivative of J with respect to theta[i][j], evaluated at theta. # I.e., numgrad[i][j] should be the (approximately) partial derivative of J with # respect to theta[i][j]. # # Hint: You will probably want to compute the elements of numgrad one at a time. # Set Epsilon epsilon = 0.0001 # Outer for loop to check across the x-axis for i in range(theta.shape[0]): # Inner for loop to check across the y-axis for j in range(theta.shape[1]): # Copy current theta value to min theta_min = theta.copy() # Subtract min point by epsilon and store theta_min[i,j] = theta_min[i,j] - epsilon # Not sure cost_min, dW, db = J(theta_min) # Copy current theta for max theta_max = theta.copy() # Add max point by epsilon and store theta_max[i,j] = theta_max[i,j] + epsilon # ? cost_max, dW, db = J(theta_max) # Final Result for gradient k numgrad[i][j] = (cost_max - cost_min) / (2 * epsilon) ## --------------------------------------------------------------- return numgrad
2d4e4ed190bbb0c5507ecb896c13d33fcd7aa1b5
3,653,326
def get_error_msg(handle): """ Get the latest and greatest DTrace error. """ txt = LIBRARY.dtrace_errmsg(handle, LIBRARY.dtrace_errno(handle)) return c_char_p(txt).value
73d945367e3003beb29505852004f0c71b205873
3,653,327
def sigma_hat(frequency, sigma, epsilon=epsilon_0, quasistatic=False): """ conductivity with displacement current contribution .. math:: \hat{\sigma} = \sigma + i \omega \\varepsilon **Required** :param (float, numpy.array) frequency: frequency (Hz) :param float sigma: electrical conductivity (S/m) **Optional** :param float epsilon: dielectric permittivity. Default :math:`\\varepsilon_0` :param bool quasistatic: use the quasi-static assumption? Default: False """ if quasistatic is True: return sigma return sigma + 1j*omega(frequency)*epsilon
17aee0f33ba8786934d750e37e1afd0617e8aa1d
3,653,328
def encode_list(key, list_): # type: (str, Iterable) -> Dict[str, str] """ Converts a list into a space-separated string and puts it in a dictionary :param key: Dictionary key to store the list :param list_: A list of objects :return: A dictionary key->string or an empty dictionary """ if not list_: return {} return {key: " ".join(str(i) for i in list_)}
6cde65017d20e777e27ac86d7f8eb1d025d04947
3,653,329
async def delete_relationship(request: web.Request): """ Remove relationships of resource. Uses the :meth:`~aiohttp_json_api.schema.BaseSchema.delete_relationship` method of the schema to update the relationship. :seealso: http://jsonapi.org/format/#crud-updating-relationships """ relation_name = request.match_info['relation'] ctx = JSONAPIContext(request) relation_field = ctx.schema.get_relationship_field(relation_name, source_parameter='URI') resource_id = request.match_info.get('id') validate_uri_resource_id(ctx.schema, resource_id) pagination = None if relation_field.relation is Relation.TO_MANY: pagination_type = relation_field.pagination if pagination_type: pagination = pagination_type(request) data = await request.json() sp = JSONPointer('') field = ctx.schema.get_relationship_field(relation_name) if field.relation is not Relation.TO_MANY: raise RuntimeError('Wrong relationship field.' 'Relation to-many is required.') await ctx.schema.pre_validate_field(field, data, sp) deserialized_data = field.deserialize(ctx.schema, data, sp) resource = await ctx.controller.fetch_resource(resource_id) old_resource, new_resource = \ await ctx.controller.remove_relationship(field, resource, deserialized_data, sp) if old_resource == new_resource: return web.HTTPNoContent() result = ctx.schema.serialize_relationship(relation_name, new_resource, pagination=pagination) return jsonapi_response(result)
6397ebab365b9339dca7692b4188945401d54779
3,653,330
def cost_efficiency(radius, height, cost): """Compute and return the cost efficiency of a steel can size. The cost efficiency is the volume of the can divided by its cost. Parameters radius: the radius of the steel can height: the height of the steel can cost: the cost of the steel can Return: the cost efficiency of the steel can """ volume = cylinder_volume(radius, height) efficiency = volume / cost return efficiency
e21f767676d5a1e9e5d97ba8bd8f943ecaad5060
3,653,331
def cb_xmlrpc_register(args): """ Register as a pyblosxom XML-RPC plugin """ args['methods'].update({'pingback.ping': pingback}) return args
e9f5cdde32d1a7b3145918d4fadfc80f4de7301f
3,653,333
def try_except(method): """ A decorator method to catch Exceptions :param: - `func`: A function to call """ def wrapped(self, *args, **kwargs): try: return method(self, *args, **kwargs) except self.error as error: log_error(error, self.logger, self.error_message) if hasattr(self, 'close'): self.close() return wrapped
069c5abd6a2f2dcab8424c829f1dae27e8a294b8
3,653,334
def sosfilter_double_c(signal, sos, states=None): """Second order section filter function using cffi, double precision. signal_out, states = sosfilter_c(signal_in, sos, states=None) Parameters ---------- signal : ndarray Signal array of shape (N x 0). sos : ndarray Second order section coefficients array of shape (K*6 x 0). One biquad -> 6 coefficients: ``[b00, b01, b02, a00, a01, a02, ..., b10, bK1 ... , aK2]`` states : ndarray Filter states, initial value can be None. Returns ------- signal : Filtered signal array of shape (N x 0). states : ndarray Filter states, initial value can be None. """ signal_c = ffi.new( 'char[]', np.array(signal, dtype=np.double).flatten().tostring()) sos_c = ffi.new( 'char[]', np.array(sos, dtype=np.double).flatten().tostring()) nsamp = int(len(signal)) ksos = int(sos.size/6) if isinstance(states, type(None)): states = np.zeros(ksos*2).astype(np.double) states_c = ffi.new( 'char[]', np.array(states, dtype=np.double).flatten().tostring()) _c.sosfilter_double(ffi.cast("double*", signal_c), nsamp, ffi.cast("double*", sos_c), ksos, ffi.cast("double*", states_c)) out = np.fromstring( ffi.buffer(signal_c), dtype=np.double, count=nsamp) states = np.fromstring( ffi.buffer(states_c), dtype=np.double, count=len(states)) return out, states
387d921f86ec6bc9c814d0ca757b36f803d122af
3,653,335
import logging def node_exporter_check(): """ Checks existence & health of node exporter pods """ kube = kube_api() namespaces = kube.list_namespace() ns_names = [] for nspace in namespaces.items: ns_names.append(nspace.metadata.name) result = {'category': 'observability', 'case_name': 'node_exporter_check', 'criteria': 'pass', 'details': [] } status = [] flag = False logger = logging.getLogger(__name__) if 'monitoring' in ns_names: pod_list = kube.list_namespaced_pod('monitoring', watch=False) pods = pod_list.items for pod in pods: if 'node-exporter' in pod.metadata.name: pod_stats = pod_status(logger, pod) if pod_stats['criteria'] == 'fail': pod_stats['logs'] = get_logs(kube, pod) result['criteria'] = 'fail' status.append(pod.metadata.name) status.append(pod_stats) flag = True else: for nspace in namespaces.items: pod_list = kube.list_namespaced_pod(nspace.metadata.name, watch=False) pods = pod_list.items for pod in pods: if 'node-exporter' in pod.metadata.name: pod_stats = pod_status(logger, pod) if pod_stats['criteria'] == 'fail': pod_stats['logs'] = get_logs(kube, pod) result['criteria'] = 'fail' status.append(pod.metadata.name) status.append(pod_stats) flag = True if flag is False: result['criteria'] = 'fail' result['details'].append(status) store_result(logger, result) return result
25a1c23107654a6b561d54ffce08aa6025ae1d2e
3,653,337
def functional_domain_min(braf_gene_descr_min, location_descriptor_braf_domain): """Create functional domain test fixture.""" params = { "status": "preserved", "name": "Serine-threonine/tyrosine-protein kinase, catalytic domain", "id": "interpro:IPR001245", "gene_descriptor": braf_gene_descr_min, "location_descriptor": location_descriptor_braf_domain } return FunctionalDomain(**params)
905e6b3dc4c1507c57d71879b582794cd66cdd8e
3,653,339
def rsa_encrypt(rsa_key, data): """ rsa_key: 密钥 登录密码加密 """ data = bytes(data, encoding="utf8") encrypt = PKCS1_v1_5.new(RSA.importKey(rsa_key)) Sencrypt = b64encode(encrypt.encrypt(data)) return Sencrypt.decode("utf-8")
07384216eff4d0f109e9a0b3bf45c0c1ab108b26
3,653,340
import numpy def shuffle_and_split_data(data_frame): """ Shuffle and split the data into 2 sets: training and validation. Args: data_frame (pandas.DataFrame): the data to shuffle and split Returns: 2 numpy.ndarray objects -> (train_indices, validation_indices) Each hold the index positions for data in the pandas.DataFrame """ shuffled_indices = numpy.random.permutation(len(data_frame)) train_up_to = int(len(data_frame) * 0.7) train_indices = shuffled_indices[:train_up_to] validation_indices = shuffled_indices[train_up_to:] return train_indices, validation_indices
dfcad7edb9ec17b81057e00816fe3d5bdadc39be
3,653,341
def parse_array_from_string(list_str, dtype=int): """ Create a 1D array from text in string. Args: list_str: input string holding the array elements. Array elements should be contained in brackets [] and seperated by comma. dtype: data type of the array elements. Default is "int" Returns: 1D numpy array """ list_str = list_str.lstrip().rstrip() if not (list_str.startswith('[') and list_str.endswith(']')): msg = 'list_str should start with "[" and end with "]".' raise (SyntaxError(msg)) return np.array(list_str[1:-1].split(','), dtype=dtype)
b05204a1c6d516a4f4eed298819bda97c5637f37
3,653,342
def Maj(x, y, z): """ Majority function: False when majority are False Maj(x, y, z) = (x ∧ y) ⊕ (x ∧ z) ⊕ (y ∧ z) """ return (x & y) ^ (x & z) ^ (y & z)
7d4013dfc109b4fc39fd3b0bd3f2f5947d207ff0
3,653,343
import pickle def get_package_data(): """Load services and conn_states data into memory""" with open(DATA_PKL_FILE, "rb") as f: services, conn_states = pickle.load(f) return services, conn_states
8bff214f2256f98e43599f4e5ce73d53232e9a7a
3,653,344
def reload_county(): """ Return bird species, totals, location to map """ # receive data from drop-down menu ajax request bird = request.args.get("bird") county = request.args.get("county") # get the zoom level of the new chosen county zoomLevel = get_zoom(county) # reset session data from the ajax request session["bird_name"] = bird session["county_name"] = county session["zoom_level"] = zoomLevel # CENTER map; get_county returns long, lat tuple. long_lat = get_county(county) longitude, latitude = long_lat birding_locations = create_geoFeature(bird, county) # send all this information to website using json bird_data = { "longitude": longitude, "latitude": latitude, "mapbox_api_key": mapbox_api_key, "birding_locations": birding_locations, "bird": bird, "county": county, "zoomLevel": zoomLevel} return jsonify(bird_data)
6c3ad39e12483579d0c9031b5c9a56babcac3823
3,653,347
import re def get_conv2d_out_channels(kernel_shape, kernel_layout): """Get conv2d output channels""" kernel_shape = get_const_tuple(kernel_shape) if len(kernel_shape) == 4: idx = kernel_layout.find("O") assert idx >= 0, "Invalid conv2d kernel layout {}".format(kernel_layout) return kernel_shape[idx] if re.match(r"OIHW\d*i\d*o", kernel_layout): return kernel_shape[0] * kernel_shape[5] if re.match(r"OIHW\d*o", kernel_layout): return kernel_shape[0] * kernel_shape[4] raise ValueError("Unknown conv2d kernel layout {}".format(kernel_layout))
4b26979b873f36b79f5e29d0c814417a4c21eb32
3,653,348
def bindparam(key, value=None, type_=None, unique=False, required=False, callable_=None): """Create a bind parameter clause with the given key. :param key: the key for this bind param. Will be used in the generated SQL statement for dialects that use named parameters. This value may be modified when part of a compilation operation, if other :class:`_BindParamClause` objects exist with the same key, or if its length is too long and truncation is required. :param value: Initial value for this bind param. This value may be overridden by the dictionary of parameters sent to statement compilation/execution. :param callable\_: A callable function that takes the place of "value". The function will be called at statement execution time to determine the ultimate value. Used for scenarios where the actual bind value cannot be determined at the point at which the clause construct is created, but embedded bind values are still desirable. :param type\_: A ``TypeEngine`` object that will be used to pre-process the value corresponding to this :class:`_BindParamClause` at execution time. :param unique: if True, the key name of this BindParamClause will be modified if another :class:`_BindParamClause` of the same name already has been located within the containing :class:`.ClauseElement`. :param required: a value is required at execution time. """ if isinstance(key, ColumnClause): return _BindParamClause(key.name, value, type_=key.type, callable_=callable_, unique=unique, required=required) else: return _BindParamClause(key, value, type_=type_, callable_=callable_, unique=unique, required=required)
5dc1b311d0dfae04b31d1e869015dbaef9fc2f42
3,653,349
def create_dictionary(timestamp, original_sentence, sequence_switched, err_message, suggestion_list): """Create Dictionary Function Generates and exports a dictionary object with relevant data for website interaction to take place. """ if len(suggestion_list) != 0: err_message_str = "Possible error: " + err_message + "\n \n" new_dictionary = { "timestamp": timestamp, "original_sentence": original_sentence, "masked_sentence": sequence_switched, "err_message": err_message, "possible_corrections": suggestion_list } return new_dictionary else: return {}
057d407089a7bb4e445bd0db2632dfcb9f291ed6
3,653,350
def get_L_BB_b2_d_t(L_BB_b2_d, L_dashdash_b2_d_t): """ Args: L_BB_b2_d: param L_dashdash_b2_d_t: L_dashdash_b2_d_t: Returns: """ L_BB_b2_d_t = np.zeros(24 * 365) L_BB_b2_d = np.repeat(L_BB_b2_d, 24) L_dashdash_b2_d = np.repeat(get_L_dashdash_b2_d(L_dashdash_b2_d_t), 24) f = L_dashdash_b2_d > 0 L_BB_b2_d_t[f] = L_BB_b2_d[f] * L_dashdash_b2_d_t[f] / L_dashdash_b2_d[f] return L_BB_b2_d_t
51b3551e68e9bbccbe756156d0d623b32a47c23f
3,653,352
def _get_tab_counts(business_id_filter, conversation_tab, ru_ref_filter, survey_id): """gets the thread count for either the current conversation tab, or, if the ru_ref_filter is active it returns the current conversation tab and all other tabs. i.e the value for the 'current' tab is always populated. Calls two different secure message endpoints depending on if ru_ref_filter is set as the get all is more expensive""" if ru_ref_filter: return message_controllers.get_all_conversation_type_counts(survey_id=survey_id, conversation_tab=conversation_tab, business_id=business_id_filter) thread_count = message_controllers.get_conversation_count(survey_id=survey_id, business_id=business_id_filter, conversation_tab=conversation_tab) return {'current': thread_count}
9e79d7d692661496a49db93754716e10644bccf2
3,653,353
def IsInverseTime(*args): """Time delay is inversely adjsuted, proportinal to the amount of voltage outside the regulating band.""" # Getter if len(args) == 0: return lib.RegControls_Get_IsInverseTime() != 0 # Setter Value, = args lib.RegControls_Set_IsInverseTime(Value)
e0c1b3fef4d3c8b6a822a2946703503628a3f775
3,653,354
def create_userinfo(fname, lname, keypass): """ function to create new user """ new_userinfo = Userinfo(fname, lname, keypass) return new_userinfo
ec7ae9a8cf79482498218571d04bee11ab767d98
3,653,355
from typing import Dict def get_networks() -> Dict[str, SpikingNetwork]: """Get a set of spiking networks to train.""" somatic_spike_fn = get_spike_fn(threshold=15) dendritic_nl_fn = get_default_dendritic_fn( threshold=2, sensitivity=10, gain=1 ) neuron_params = RecurrentNeuronParameters( tau_mem=10e-3, tau_syn=5e-3, backprop_gain=0.5, feedback_strength=15, somatic_spike_fn=somatic_spike_fn, dendritic_spike_fn=dendritic_nl_fn, ) parallel_params = PRCNeuronParameters( tau_mem=10e-3, tau_syn=5e-3, backprop_gain=0.05, feedback_strength=15, somatic_spike_fn=somatic_spike_fn, dend_na_fn=dendritic_nl_fn, dend_ca_fn=get_sigmoid_fn(threshold=4, sensitivity=10, gain=1), dend_nmda_fn=dendritic_nl_fn, tau_dend_na=5e-3, tau_dend_ca=40e-3, tau_dend_nmda=80e-3, ) simple_network_architecture = deepcopy(NETWORK_ARCHITECTURE) simple_network_architecture.weight_scale_by_layer = (3, 7) two_compartment_network_architecture = deepcopy(NETWORK_ARCHITECTURE) two_compartment_network_architecture.weight_scale_by_layer = (0.5, 7) parallel_network_architecture = deepcopy(NETWORK_ARCHITECTURE) parallel_network_architecture.weight_scale_by_layer = (0.02, 7) nets = { 'One compartment': SpikingNetwork( neuron_params, simple_network_architecture ), 'No BAP': TwoCompartmentSpikingNetwork( neuron_params, two_compartment_network_architecture ), 'BAP': RecurrentSpikingNetwork( neuron_params, two_compartment_network_architecture ), 'Parallel subunits, no BAP': ParallelSpikingNetwork( parallel_params, parallel_network_architecture ), 'Parallel subunits + BAP (full PRC model)': PRCSpikingNetwork( parallel_params, parallel_network_architecture ), } return nets
d20f93eb849134c5104c22e9724bcadf09a4a141
3,653,356
import collections def metric_group_max(df, metric_names=None): """Find the step which achieves the highest mean value for a group of metrics.""" # Use METRIC_NAMES defined at the top as default metric_names = metric_names or METRIC_NAMES group_to_metrics = collections.defaultdict(set) for metric in metric_names.values(): group_to_metrics[metric.group].add(metric.name) group_df = pd.DataFrame() for group, metrics in group_to_metrics.items(): if not all(m in df for m in metrics): continue group_df[group] = df[metrics].mean(axis=1) # Need to replace nan with large negative value for idxmax group_max_step = group_df.fillna(-1e9).idxmax(axis=0) metric_max = pd.Series() metric_max_step = pd.Series() for group_name, max_step in group_max_step.iteritems(): for metric in group_to_metrics[group_name]: metric_max[metric] = df[metric][max_step] metric_max_step[metric] = max_step metric_max = metric_max.reindex(df.columns) metric_max_step = metric_max_step.reindex(df.columns) return metric_max, metric_max_step
6f58e9f3a18f6185c1956a994b47f9f4fb9936ea
3,653,358
def get_settings_value(definitions: Definitions, setting_name: str): """Get a Mathics Settings` value with name "setting_name" from definitions. If setting_name is not defined return None""" settings_value = definitions.get_ownvalue(setting_name) if settings_value is None: return None return settings_value.replace.to_python(string_quotes=False)
3d05b234f85a13746b47ca97f3db578d3c7d6856
3,653,359
def show_clusterhost(clusterhost_id): """Get clusterhost.""" data = _get_request_args() return utils.make_json_response( 200, _reformat_host(cluster_api.get_clusterhost( clusterhost_id, user=current_user, **data )) )
a49a0027b8f7ab1ce20e762f960b6d8285d8850c
3,653,360
import math def resize3d_cubic(data_in, scale, coordinate_transformation_mode): """Tricubic 3d scaling using python""" dtype = data_in.dtype d, h, w = data_in.shape new_d, new_h, new_w = [int(round(i * s)) for i, s in zip(data_in.shape, scale)] data_out = np.ones((new_d, new_h, new_w)) def _cubic_spline_weights(t, alpha=-0.5): """create cubic spline weights in 1D""" t2 = t * t t3 = t * t * t w1 = alpha * (t3 - 2 * t2 + t) w2 = (alpha + 2) * t3 - (3 + alpha) * t2 + 1 w3 = -(alpha + 2) * t3 + (3 + 2 * alpha) * t2 - alpha * t w4 = -alpha * t3 + alpha * t2 return np.array([w1, w2, w3, w4]) indexes = np.mgrid[-1:3, -1:3, -1:3] def _get_patch(zint, yint, xint): # Get the surrounding values indices = indexes.copy() indices[0] = np.maximum(np.minimum(indexes[0] + zint, d - 1), 0) indices[1] = np.maximum(np.minimum(indexes[1] + yint, h - 1), 0) indices[2] = np.maximum(np.minimum(indexes[2] + xint, w - 1), 0) p = data_in[indices[0], indices[1], indices[2]] return p for m in range(new_d): for j in range(new_h): for k in range(new_w): in_z = get_inx(m, d, new_d, coordinate_transformation_mode) in_y = get_inx(j, h, new_h, coordinate_transformation_mode) in_x = get_inx(k, w, new_w, coordinate_transformation_mode) zint = math.floor(in_z) zfract = in_z - math.floor(in_z) yint = math.floor(in_y) yfract = in_y - math.floor(in_y) xint = math.floor(in_x) xfract = in_x - math.floor(in_x) wz = _cubic_spline_weights(zfract) wy = _cubic_spline_weights(yfract) wx = _cubic_spline_weights(xfract) p = _get_patch(zint, yint, xint) l = np.sum(p * wx, axis=-1) col = np.sum(l * wy, axis=-1) data_out[m, j, k] = np.sum(col * wz) return data_out
42f1a14e5c1133c7ce53b5770d62001e1dacbc6d
3,653,361
def seasurface_skintemp_correct(*args): """ Description: Wrapper function which by OOI default applies both of the METBK seasurface skin temperature correction algorithms (warmlayer, coolskin in coare35vn). This behavior is set by the global switches JWARMFL=1 and JCOOLFL=1. The switch construction is retained for generality. Most of the METBK L2 data products and 2 of the metadata products require the skin corrections to be applied before their values can be calculated. Warmlayer corrections dsea are added. Coolskin corrections dter and dqer are subtracted. Implemented by: 2014-09-01: Russell Desiderio. Initial code. Usage (command line spaced out for clarity): (usr, tsr, qsr, ut, dter, dqer, tkt, L, zou, zot, zoq, # coare35vn output dt_wrm, tk_pwp, dsea) = # warmlayer output seasurface_skintemp_correct (rain_rate, timestamp, lon, ztmpwat, tC_sea, wnd, zwindsp, tC_air, ztmpair, relhum, zhumair, pr_air, Rshort_down, Rlong_down, lat, zinvpbl, jcool, jwarm) where OUTPUTS (documentation from coare35vn matlab code): usr = friction veclocity that includes gustiness [m/s] tsr = temperature scaling parameter [K] qsr = specific humidity scaling parameter [g/g, I changed this from Edson code] ut = not an output of the original code dter = coolskin temperature depression [degC] dqer = coolskin humidity depression [kg/kg] tkt = coolskin thickness [m] L = Obukhov length scale [m] zou = wind roughness length [m] zot = thermal roughness length [m] zoq = moisture roughness length [m] OUTPUTS (documentation from coare35vnWarm matlab code): dt_wrm = warming across entire warmlayer [degC] tk_pwp = warmlayer thickness [m] dsea = additive warmlayer temperature correction [degC]; (this is warmlayer's key output) INPUTS: rain_rate = rainfall [mm/hr] timestamp = seconds since 01-01-1900 lon = longitude [deg] ztmpwat = depth of bulk sea temperature measurement [m] tC_sea = bulk sea surface temperature [degC] wnd = windspeed relative to current [m/s] zwindsp = height of windspeed measurement[m] tC_air = air temperature [degC] ztmpair = height of air temperature measurement [m] relhum = relative humidity [%] zhumair = height of air humidity measurement [m] pr_air = air pressure [mb] Rshort_down = downwelling shortwave irradiation [W/m^2] Rlong_down = downwelling longwave irradiation [W/m^2] lat = latitude [deg] zinvpbl = inversion height; default is 600m [m] jcool = switch to activate coolskin algorithm (hardwired to 1 = true) jwarm = switch to activate warmlayer algoritgm (hardwired to 1 = true) References: Fairall, C.W., E.F. Bradley, J.S. Godfrey, G.A. Wick, J.B. Edson, and G.S. Young (1996) Cool-skin and warm-layer effects on sea surface temperature. JGR, Vol. 101, No. C1, 1295-1308, 1996. OOI (2014). Data Product Specification for L2 BULKFLX Data Products. Document Control Number 1341-00370. https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI >> Controlled >> 1000 System Level >> 1341-00370_Data_Product_Spec_BULKFLX_OOI.pdf) OOI (2014). 1341-00370_BULKFLX Artifacts. https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI >> REFERENCE >> Data Product Specification Artifacts >> 1341-00370_BULKFLX (Original matlab code). Notes: (1) the jwarm switch selects whether or not the warmlayer code is run. the jcool 'switch' is itself a variable within the (original) coare35vn code; it was used as a multiplicative factor when calculating coolskin corrections, so that when jcool=0, the corrections are set to 0. (2) for OOI jwarm and jcool are always 1, because all of the OOI sea temperature measurements are bulk, not skin, measurements. (3) in the more general case, jwarm = jcool always, because: (a) jcool = 1 indicates that the input sea temperature values are bulk measurements, not surface skin measurements made with an infrared thermometer. in this bulk measurement case, both coolskin and warmlayer corrections to the bulk temperature are required to model the skin temperature (jcool = jwarm = 1). (b) jcool = 0 indicates that the input sea temperature values are surface skin temperatures directly measured with an infrared thermometer, and therefore both the coolskin and warmlayer corrections are not to be applied (jcool = jwarm = 0). (4) however, both switches are retained for generality in case this open source code is appropriated and adapted. (plus, the DPS specified archiving the jwarm and jcool switches as metadata). (5) the OOI cyberinfrastructure model originally required that each data product be specifically calculated by one function. This is the main reason that the wrapper function construct is used. In addition, I've chosen to explicitly write out its output tuple arguments for each data product call, so that the dependence of the various data products on these tuple arguments is obvious (underscores are used as placeholders for those arguments not used in any particular function call). In particular, this construct specifically identifies when coolskin and warmlayer temperature corrections have been applied to various variables in the original code. (For example - the latent heat of vaporization for water depends on water temperature, but only the warmlayer correction is used calculate it). """ jwarm = args[-1] # jwarm (and jcool) are scalars if jwarm: (dt_wrm, tk_pwp, dsea) = warmlayer(*args[0:-1]) # does not pass jwarm else: # the tk_pwp parameter is often used as a divisor in warmlayer calculations to # compare the warmlayer depth with the depth of the bulk temperature sensor. # when the warmlayer code is not run, the desired results will be obtained if # dt_warm and dsea are set to 0 where tk_pwp is nonzero so that a divide by # zero error does not result. the value chosen is the default value specified # in the warmlayer code itself. (dt_wrm, tk_pwp, dsea) = (0.0, 19.0, 0.0) # construct tuple containing coolskin input arguments; # add the warmlayer temperature correction to the msrd bulk sea temp. coolskin_args = (args[4]+dsea,) + args[5:-1] # does not pass jwarm # append results of warmlayer calculation to output, # as is also done in original coare35vn warmlayer matlab code. return coare35vn(*coolskin_args) + (dt_wrm, tk_pwp, dsea)
80ccf63dcf961a4fa488a89023c2516e69862f86
3,653,362