content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def modulelink(module, baseurl=''): """Hyperlink to a module, either locally or on python.org""" if module+'.py' not in local_files: baseurl = 'http://www.python.org/doc/current/lib/module-' return link(baseurl+module+'.html', module)
b907d013b25570d062d49314bbbab637aeb4ffec
3,655,684
from typing import Optional from typing import Callable import inspect def add_reference( *, short_purpose: str, reference: Optional[str] = None, doi: Optional[str] = None ) -> Callable: """Decorator to link a reference to a function or method. Acts as a marker in code where particular alogrithms/data/... originates. General execution of code silently passes these markers, but remembers how and where they were called. Which markers were passed in a particular program run can be recalled with `print(BIBLIOGRAPHY)`. One and only one method for providing the reference is allowed. Args: short_purpose (str): Identify the thing being referenced. reference (Optional, str): The reference itself, as a plain text string. doi (Optional, str): DOI of the reference. Returns: The decorated function. """ if reference and doi: raise ValueError("Only one method for providing the reference is allowed.") elif reference: ref = reference elif doi: ref = doi if "doi.org" in doi else f"https://doi.org/{doi}" else: raise ValueError("No reference information provided!") @wrapt.decorator(enabled=lambda: BIBLIOGRAPHY.track_references) def wrapper(wrapped, instance, args, kwargs): source = inspect.getsourcefile(wrapped) line = inspect.getsourcelines(wrapped)[1] identifier = f"{source}:{line}" if identifier in BIBLIOGRAPHY and ref in BIBLIOGRAPHY[identifier].references: return wrapped(*args, **kwargs) if identifier not in BIBLIOGRAPHY: BIBLIOGRAPHY[identifier] = FunctionReference( wrapped.__name__, line, source, [], [] ) BIBLIOGRAPHY[identifier].short_purpose.append(short_purpose) BIBLIOGRAPHY[identifier].references.append(ref) return wrapped(*args, **kwargs) return wrapper
8e1a4c6425213779edabdb0879eacbb44d4e479a
3,655,685
def eval_curvature(poly, x_vals): """ This function returns a vector with the curvature based on path defined by `poly` evaluated on distance vector `x_vals` """ # https://en.wikipedia.org/wiki/Curvature# Local_expressions def curvature(x): a = abs(2 * poly[1] + 6 * poly[0] * x) / (1 + (3 * poly[0] * x**2 + 2 * poly[1] * x + poly[2])**2)**(1.5) return a return np.vectorize(curvature)(x_vals)
0e0e04b7c49b0cdfaa0658df23816d61ac19141c
3,655,687
def templates(): """Return all of the templates and settings.""" return settings
6cf1c151f2e0798e1b26002c29db898bcd3c42cf
3,655,689
def get_semitones(interval_tuplet): """ Takes an interval tuplet of the form returned by get_interval() Returns an int representing the semitones within the interval. """ return mintervals.semitones_from_shorthand(interval_tuplet[0]) + 12*interval_tuplet[1]
179f3894da3607b4fd4aa7915ec5e9c38fcdc592
3,655,690
import numpy def svds(a, k=6, *, ncv=None, tol=0, which='LM', maxiter=None, return_singular_vectors=True): """Finds the largest ``k`` singular values/vectors for a sparse matrix. Args: a (cupy.ndarray or cupyx.scipy.sparse.csr_matrix): A real or complex array with dimension ``(m, n)`` k (int): The number of singular values/vectors to compute. Must be ``1 <= k < min(m, n)``. ncv (int): The number of Lanczos vectors generated. Must be ``k + 1 < ncv < min(m, n)``. If ``None``, default value is used. tol (float): Tolerance for singular values. If ``0``, machine precision is used. which (str): Only 'LM' is supported. 'LM': finds ``k`` largest singular values. maxiter (int): Maximum number of Lanczos update iterations. If ``None``, default value is used. return_singular_vectors (bool): If ``True``, returns singular vectors in addition to singular values. Returns: tuple: If ``return_singular_vectors`` is ``True``, it returns ``u``, ``s`` and ``vt`` where ``u`` is left singular vectors, ``s`` is singular values and ``vt`` is right singular vectors. Otherwise, it returns only ``s``. .. seealso:: :func:`scipy.sparse.linalg.svds` .. note:: This is a naive implementation using cupyx.scipy.sparse.linalg.eigsh as an eigensolver on ``a.H @ a`` or ``a @ a.H``. """ if a.ndim != 2: raise ValueError('expected 2D (shape: {})'.format(a.shape)) if a.dtype.char not in 'fdFD': raise TypeError('unsupprted dtype (actual: {})'.format(a.dtype)) m, n = a.shape if k <= 0: raise ValueError('k must be greater than 0 (actual: {})'.format(k)) if k >= min(m, n): raise ValueError('k must be smaller than min(m, n) (actual: {})' ''.format(k)) aH = a.conj().T if m >= n: aa = aH @ a else: aa = a @ aH if return_singular_vectors: w, x = eigsh(aa, k=k, which=which, ncv=ncv, maxiter=maxiter, tol=tol, return_eigenvectors=True) else: w = eigsh(aa, k=k, which=which, ncv=ncv, maxiter=maxiter, tol=tol, return_eigenvectors=False) w = cupy.maximum(w, 0) t = w.dtype.char.lower() factor = {'f': 1e3, 'd': 1e6} cond = factor[t] * numpy.finfo(t).eps cutoff = cond * cupy.max(w) above_cutoff = (w > cutoff) n_large = above_cutoff.sum() s = cupy.zeros_like(w) s[:n_large] = cupy.sqrt(w[above_cutoff]) if not return_singular_vectors: return s x = x[:, above_cutoff] if m >= n: v = x u = a @ v / s[:n_large] else: u = x v = aH @ u / s[:n_large] u = _augmented_orthnormal_cols(u, k - n_large) v = _augmented_orthnormal_cols(v, k - n_large) return u, s, v.conj().T
9a96fc2fbca100a53ba81f609a58fc0934b5c524
3,655,691
def register_mongodb(app: Flask) -> Flask: """Instantiates database and initializes collections.""" config = app.config # Instantiate PyMongo client mongo = create_mongo_client(app=app, config=config) # Add database db = mongo.db[get_conf(config, "database", "name")] # Add database collection for '/service-info' collection_service_info = mongo.db["service-info"] # Add database collection for '/data_objects' collection_data_objects = mongo.db["data_objects"] collection_data_objects.create_index([("id", ASCENDING)], unique=True, sparse=True) # Add database to app config config["database"]["drs_db"] = collection_data_objects config["database"]["service_info"] = collection_service_info app.config = config return app
6b5bd3f5694470b3ba7dbbf94bacb9beb8ee55cd
3,655,692
def look(table, limit=0, vrepr=None, index_header=None, style=None, truncate=None, width=None): """ Format a portion of the table as text for inspection in an interactive session. E.g.:: >>> import petl as etl >>> table1 = [['foo', 'bar'], ... ['a', 1], ... ['b', 2]] >>> etl.look(table1) +-----+-----+ | foo | bar | +=====+=====+ | 'a' | 1 | +-----+-----+ | 'b' | 2 | +-----+-----+ >>> # alternative formatting styles ... etl.look(table1, style='simple') === === foo bar === === 'a' 1 'b' 2 === === >>> etl.look(table1, style='minimal') foo bar 'a' 1 'b' 2 >>> # any irregularities in the length of header and/or data ... # rows will appear as blank cells ... table2 = [['foo', 'bar'], ... ['a'], ... ['b', 2, True]] >>> etl.look(table2) +-----+-----+------+ | foo | bar | | +=====+=====+======+ | 'a' | | | +-----+-----+------+ | 'b' | 2 | True | +-----+-----+------+ Three alternative presentation styles are available: 'grid', 'simple' and 'minimal', where 'grid' is the default. A different style can be specified using the `style` keyword argument. The default style can also be changed by setting ``petl.config.look_style``. """ # determine defaults if limit == 0: limit = config.look_limit if vrepr is None: vrepr = config.look_vrepr if index_header is None: index_header = config.look_index_header if style is None: style = config.look_style if width is None: width = config.look_width return Look(table, limit=limit, vrepr=vrepr, index_header=index_header, style=style, truncate=truncate, width=width)
356d6fb1f0afe0f8812e460b8ee3b13f7c4ded4b
3,655,693
def warn_vars_naming_style(messages, line, style): """ Check whether varibales and function argumens fit the naming rule.""" naming_style_name = style.Get('CHECK_VAR_NAMING_STYLE') if not naming_style_name: return def is_expr(uwl): return (uwl.tokens and _find_parent(uwl.first.node, None, [syms.expr_stmt])) def is_assignment(uwl): return (is_expr(uwl) and next(filter(lambda t: t.is_name, uwl.tokens), None)) def get_lhs_tokens(uwl): root = _find_parent(uwl.first.node, None, [syms.expr_stmt]) lvalues = _FindLValues(root).lvalues for tok in uwl.tokens: if tok.name == 'EQUAL': break if tok.is_name and id(tok.node) in lvalues: chain = lvalues[id(tok.node)] if (len(chain) == 1 or (len(chain) == 2 and chain[0] == 'self')): yield tok def iter_token_range(first, last): while True: yield first if first is last: break first = first.next_token def iter_parameters(paramlist): for item in paramlist: tokens = iter_token_range(item.first_token, item.last_token) tokens = filter(lambda t: t.name in {'NAME', 'STAR'}, tokens) first = next(tokens, None) if first is None: # This is possible when a comment is added to a function # argument (in some cases, when there is a trailing comma): # # def fn(arg1, # arg2, #comment # arg3, # ): # pass # assert item.first_token.name == 'COMMENT' continue if first.name == 'STAR': yield next(tokens, first) yield first def get_func_args(uwl): for tok in uwl.tokens: if not tok.parameters: continue yield from iter_parameters(tok.parameters) if is_assignment(line): tokens = get_lhs_tokens(line) elif line.tokens and line.is_func_definition: tokens = get_func_args(line) else: return naming_style = REGEXPS['varname'][naming_style_name] for tok in tokens: # explicitly allow UPPER CASE names, because constants sould be # named this way regargless the naming style if not (tok.value == 'self' or tok.value.isupper() or naming_style.match(tok.value)): messages.add(tok, line.AsCode(), Warnings.VAR_NAMING_STYLE, variable=tok.value)
4b8d4cf72395d66ea80f5fbd364cdd47973bb332
3,655,695
import json def validate_schema(path, data, schema): """ Warns and returns the number of errors relating to JSON Schema validation. Uses the `jsonschema <https://python-jsonschema.readthedocs.io/>`__ module. :param object schema: the metaschema against which to validate :returns: the number of errors :rtype: int """ errors = 0 for error in validator(schema, format_checker=FormatChecker()).iter_errors(data): errors += 1 warn(f"{json.dumps(error.instance, indent=2)}\n{error.message} ({'/'.join(error.absolute_schema_path)})\n", SchemaWarning) return errors
abd6a2a05021586da41fd597eb4137d706c08b41
3,655,696
def get_playlist_decreasing_popularity(): """This function is used to return playlists in decreasing popularity""" all_ = PlaylistPopularityPrefixed.objects.all() results = [{"playlist_name": obj.playlist_name, "popularity": obj.played} for obj in all_] return results
45c8bb79af32cba58282910d1841611bc7f42d84
3,655,698
from typing import Any def validate_numeric_scalar(var: Any) -> bool: """Evaluates whether an argument is a single numeric value. Args: var: the input argument to validate Returns: var: the value if it passes validation Raises: AssertionError: `var` was not numeric. """ assert isinstance(var, (int, float)), "Argument must be single numeric value" return var
4db95a31021fd6c8ab0c31d9077a12fa5edd580b
3,655,699
def irrf(valor=0): """ -> Função para cálcular o valor do IRRF. :param valor: Valor base do salário para cálculo do IRRF. :return: Retorna o valor do IRRF e alíquota utilizada. """ irrf = [] if valor < 1903.99: irrf.append(0) irrf.append(0) elif valor >= 1903.99 and valor <= 2826.65: irrf.append((valor * 7.5) / 100 - 142.80) # Alíquota de 7.5%, menos parcela de dedução. irrf.append('7,5') elif valor >= 2826.66 and valor <= 3751.05: irrf.append((valor * 15) / 100 - 354.80) # Alíquota de 15%, menos parcela de dedução. irrf.append('15') elif valor >= 3751.06 and valor <= 4664.68: irrf.append((valor * 22.5) / 100 - 636.13) # Alíquota de 22.5%, menos parcela de dedução. irrf.append('22,5') elif valor > 4664.68: irrf.append((valor * 27.5) / 100 - 869.36) # Alíquota de 27.5%, menos parcela de dedução. irrf.append('27,5') return irrf
53646b770b2c2359e1e8c4f725b27396cc972050
3,655,700
def find_adcp_files_within_period(working_directory,max_gap=20.0,max_group_size=6): """ Sorts a directory of ADCPRdiWorkHorseData raw files into groups by closeness in time, with groups being separated by more than 'max_gap_minutes'. This method first sorts the files by start time, and then splits the observations where they are more than 'max_gap_minutes' apart. Inputs: working_directory = directory path containing ADCP raw or netcdf files max_gap = maximum time allowed between ADCP observations when grouping (minutes) max_group_size = maximum number of ADCPData objects per group Returns: List of lists that contain groups of input ADCPData objects """ if os.path.exists(working_directory): data_files = glob.glob(os.path.join(working_directory,'*[rR].000')) data_files.extend(glob.glob(os.path.join(working_directory,'*.nc'))) else: print "Path (%s) not found - exiting."%working_directory exit() start_times = list() for data_file in data_files: try: a = adcpy.open_adcp(data_file, file_type="ADCPRdiWorkhorseData", num_av=1) start_times.append(a.mtime[0]) except: start_times.append(None) if start_times: gaps, nn, nnan = find_start_time_gaps(start_times) data_files_sorted = [ data_files[i] for i in nn ] # convert nnan boolean list to integer index nnan_i = nnan * range(len(nnan)) data_files_sorted = [ data_files_sorted[i] for i in nnan_i ] return group_according_to_gap(data_files_sorted,gaps,max_gap,max_group_size)
6e3afc4dd8532c579870541fe42519078e86f935
3,655,701
def regular_transport_factory(host, port, env, config_file): """ Basic unencrypted Thrift transport factory function. Returns instantiated Thrift transport for use with cql.Connection. Params: * host .........: hostname of Cassandra node. * port .........: port number to connect to. * env ..........: environment variables (os.environ) - not used by this implementation. * config_file ..: path to cqlsh config file - not used by this implementation. """ tsocket = TSocket.TSocket(host, port) return TTransport.TFramedTransport(tsocket)
bccee131d61a9a251a63ee021e0ab0c5b6033c44
3,655,702
def smoothed_abs(x, eps=1e-8): """A smoothed version of |x| with improved numerical stability.""" return jnp.sqrt(jnp.multiply(x, x) + eps)
f0b63e9482e602b29b85ce3f0d602d9918557ada
3,655,703
def increment(t1, seconds): """Adds seconds to a Time object.""" assert valid_time(t1) seconds += time_to_int(t1) return int_to_time(seconds)
f7807fc12a9ed9350d13d0f8c4c707c79165e9d5
3,655,704
def dense(input_shape, output_shape, output_activation='linear', name=None): """ Build a simple Dense model Parameters ---------- input_shape: shape Input shape output_shape: int Number of actions (Discrete only so far) Returns ------- model: Model Keras tf model """ # Create inputs inputs = Input(shape=input_shape) x = Flatten()(inputs) # Create one dense layer and one layer for output x = Dense(256, activation='tanh')(x) x = Dense(256, activation='tanh')(x) predictions = Dense(output_shape, activation='linear')(x) # Finally build model model = Model(inputs=inputs, outputs=predictions, name=name) model.summary() return model
6f7ba28834ecfe7b5e74aa40ef30fcd9aa531836
3,655,706
def dataset_labels(alldata, tag=None): """ Return label for axis of dataset Args: ds (DataSet): dataset tag (str): can be 'x', 'y' or 'z' """ if tag == 'x': d = alldata.default_parameter_array() return d.set_arrays[0].label if tag == 'y': d = alldata.default_parameter_array() return d.set_arrays[1].label if tag is None or tag == 'z': d = alldata.default_parameter_array() return d.label return '?'
4ccd3af38d3f18e9fbf43e98f8a898426c6c1440
3,655,707
from typing import Optional from typing import Any from typing import Callable from typing import Tuple from typing import List from typing import Union def spread( template: Template, data: Optional[Any], flavor: Flavor, postprocess: Optional[Callable] = None, start_at: int = 0, replace_missing_with: Optional[str] = None, ) -> Tuple[List[Union["pygsheets.Cell"]], int]: """Spread data into cells. Parameters ---------- template A list of expressions which determines how the cells are layed out. data Data to render. Can be a dictionary, a dataclass, a list; just as long as the template expressions can be applied to the data. flavor Determines what kind of cells to generate. postprocess An optional function to call for each cell once it has been created. start_at The row number where the layout begins. Zero-based. replace_missing_with An optional value to be used when a variable isn't found in the data. An exception is raised if a variable is not found and this is not specified. Returns ------- cells The list of cells. n_rows The number of rows which the cells span over. """ data = data or {} # Unpack the template table = [] for c, col in enumerate(template): cells = [] if callable(col): col = col(data) for r, expr in enumerate(col if isinstance(col, list) else [col]): if callable(expr): expr = expr(data) # expr can be: # - expr # - (expr, postprocessor) # - (expr, postprocessor, note) pp = None note = None if isinstance(expr, tuple): if len(expr) == 2: expr, pp = expr else: expr, pp, note, *_ = expr cell = _Cell( r=r + start_at, c=c, expr=_normalize_expression(expr), note=note, postprocess=pp, ) cells.append(cell) table.append(cells) # We're going to add the positions of the named variables to the data named_variables = {} cell_names = {} for c, col in enumerate(table): for r, cell in enumerate(col): if _is_named_formula(cell.expr): name = cell.expr.split(" = ")[0] named_variables[name] = cell.address cell_names[len(cell_names)] = name elif _is_variable(cell.expr): cell_names[len(cell_names)] = cell.expr[1:] else: cell_names[len(cell_names)] = None if flavor == Flavor.PYGSHEETS.value: cells = [ cell.as_pygsheets( data=data, named_variables=named_variables, replace_missing_with=replace_missing_with, ) for col in table for cell in col ] else: raise ValueError( f"Unknown flavor {flavor}. Available options: {', '.join(f.value for f in Flavor)}" ) if postprocess: for i, cell in enumerate(cells): cells[i] = postprocess(cell, cell_names[i]) n_rows = max(map(len, table)) return cells, n_rows
db354b3d190f1bff5b78c29a3ff6b4021287b27f
3,655,708
from sklearn.model_selection import train_test_split def train_validate_test_split(DataFrame, ratios=(0.6,0.2,0.2)): """ Parameters ---------- DataFrame : pandas.DataFrame DataFrame ratios : tuple E.g. (train, validate, test) = (0.6, 0.25, 0.15) (train, test) = (0.6, 0.4) -> validate = test Returns ------- TrainDataset : pandas.DataFrame ValidateDataset : pandas.DataFrame TestDataset : pandas.DataFrame """ N = len(DataFrame.index) if len(ratios)==3: train_size = ratios[0]/np.sum(ratios) test_size = ratios[2]/np.sum(ratios[1:3]) TrainDataset, TestDataset = train_test_split(DataFrame, train_size=train_size, random_state=42) ValidateDataset, TestDataset = train_test_split(TestDataset, test_size=test_size, random_state=42) elif len(ratios)==2: train_size = ratios[0]/np.sum(ratios) TrainDataset, TestDataset = train_test_split(DataFrame, train_size=train_size, random_state=42) ValidateDataset = TestDataset print('Validate = Test') else: print('ERROR in splitting train, validate, test') return None, None, None n_train = len(TrainDataset.index) n_validate = len(ValidateDataset.index) n_test = len(TestDataset.index) print('Train Samples: {} [{:.1f}%]'.format(n_train, n_train/N*100)) print('Validate Samples: {} [{:.1f}%]'.format(n_validate, n_validate/N*100)) print('Test Samples: {} [{:.1f}%]'.format(n_test, n_test/N*100)) return TrainDataset, ValidateDataset, TestDataset
3d4b8424f66e72d3dd28328afb6465768b1778cb
3,655,709
from typing import Union def is_error(code: Union[Error, int]) -> bool: """Returns True, if error is a (fatal) error, not just a warning.""" if isinstance(code, Error): code = code.code return code >= ERROR
347bde61feb36ce70bf879d713ff9feb41e67085
3,655,710
def unpack_triple(item): """Extracts the indices and values from an object. The argument item can either be an instance of SparseTriple or a sequence of length three. Example usage: >>> st = SparseTriple() >>> ind1, ind2, val = unpack_triple(st) >>> quad_expr = [[], [], []] >>> ind1, ind2, val = unpack_triple(quad_expr) """ try: assert item.isvalid() ind1, ind2, val = item.unpack() except AttributeError: ind1, ind2, val = item[0:3] validate_arg_lengths([ind1, ind2, val]) return ind1, ind2, val
bae536d313140952927875640f925876700bf981
3,655,711
def max_sequence(arr): """ The maximum sum subarray problem consists in finding the maximum sum of a contiguous subsequence in an array or list of integers. :param arr: an array or list of integers. :return: the maximum value found within the subarray. """ best = 0 for x in range(len(arr)): for y in range(len(arr)): if sum(arr[x:y+1]) > best: best = sum(arr[x:y+1]) return best
3ae6dafb4879476ba6e15610645f26299a4c6719
3,655,712
def get_by_username(username): """ Retrieve a user from the database by their username :param username: :return: """ return database.get(User, username, field="username")
354d323c464cbdbaf72b88284b2305657d03a027
3,655,713
def evalPoint(u, v): """ Evaluates the surface point corresponding to normalized parameters (u, v) """ a, b, c, d = 0.5, 0.3, 0.5, 0.1 s = TWO_PI * u t = (TWO_PI * (1 - v)) * 2 r = a + b * cos(1.5 * t) x = r * cos(t) y = r * sin(t) z = c * sin(1.5 * t) dv = PVector() dv.x = (-1.5 * b * sin(1.5 * t) * cos(t) - (a + b * cos(1.5 * t)) * sin(t)) dv.y = (-1.5 * b * sin(1.5 * t) * sin(t) + (a + b * cos(1.5 * t)) * cos(t)) dv.z = 1.5 * c * cos(1.5 * t) q = dv q.normalize() qvn = PVector(q.y, -q.x, 0) qvn.normalize() ww = q.cross(qvn) pt = PVector() pt.x = x + d * (qvn.x * cos(s) + ww.x * sin(s)) pt.y = y + d * (qvn.y * cos(s) + ww.y * sin(s)) pt.z = z + d * ww.z * sin(s) return pt
a3598739dc28e9fcd47539e4a51b00c351eb4e3d
3,655,714
def decode_funcname2(subprogram_die, address): """ Get the function name from an PC address""" for DIE in subprogram_die: try: lowpc = DIE.attributes['DW_AT_low_pc'].value # DWARF v4 in section 2.17 describes how to interpret the # DW_AT_high_pc attribute based on the class of its form. # For class 'address' it's taken as an absolute address # (similarly to DW_AT_low_pc); for class 'constant', it's # an offset from DW_AT_low_pc. highpc_attr = DIE.attributes['DW_AT_high_pc'] highpc_attr_class = describe_form_class(highpc_attr.form) if highpc_attr_class == 'address': highpc = highpc_attr.value elif highpc_attr_class == 'constant': highpc = lowpc + highpc_attr.value else: print('Error: invalid DW_AT_high_pc class:', highpc_attr_class) continue if lowpc <= address < highpc: return DIE.attributes['DW_AT_name'].value except KeyError: continue return None
b322282b9f908311dedbd73ade3d31bbb86cebe8
3,655,715
def get_reddit_slug(permalink): """ Get the reddit slug from a submission permalink, with '_' replaced by '-' Args: permalink (str): reddit submission permalink Returns: str: the reddit slug for a submission """ return list(filter(None, permalink.split("/")))[-1].replace("_", "-")
587239a0b7bbd88e10d49985dd6ebfd3768038d8
3,655,716
def newton_halley(func, x0, fprime, fprime2, args=(), tol=1.48e-8, maxiter=50, disp=True): """ Find a zero from Halley's method using the jitted version of Scipy's. `func`, `fprime`, `fprime2` must be jitted via Numba. Parameters ---------- func : callable and jitted The function whose zero is wanted. It must be a function of a single variable of the form f(x,a,b,c...), where a,b,c... are extra arguments that can be passed in the `args` parameter. x0 : float An initial estimate of the zero that should be somewhere near the actual zero. fprime : callable and jitted The derivative of the function (when available and convenient). fprime2 : callable and jitted The second order derivative of the function args : tuple, optional Extra arguments to be used in the function call. tol : float, optional The allowable error of the zero value. maxiter : int, optional Maximum number of iterations. disp : bool, optional If True, raise a RuntimeError if the algorithm didn't converge Returns ------- results : namedtuple root - Estimated location where function is zero. function_calls - Number of times the function was called. iterations - Number of iterations needed to find the root. converged - True if the routine converged """ if tol <= 0: raise ValueError("tol is too small <= 0") if maxiter < 1: raise ValueError("maxiter must be greater than 0") # Convert to float (don't use float(x0); this works also for complex x0) p0 = 1.0 * x0 funcalls = 0 status = _ECONVERR # Halley Method for itr in range(maxiter): # first evaluate fval fval = func(p0, *args) funcalls += 1 # If fval is 0, a root has been found, then terminate if fval == 0: status = _ECONVERGED p = p0 itr -= 1 break fder = fprime(p0, *args) funcalls += 1 # derivative is zero, not converged if fder == 0: p = p0 break newton_step = fval / fder # Halley's variant fder2 = fprime2(p0, *args) p = p0 - newton_step / (1.0 - 0.5 * newton_step * fder2 / fder) if abs(p - p0) < tol: status = _ECONVERGED break p0 = p if disp and status == _ECONVERR: msg = "Failed to converge" raise RuntimeError(msg) return _results((p, funcalls, itr + 1, status))
96531b47a399ee0d897e5feadaa93eb56bee2b52
3,655,717
def staff_dash(request): """Route for displaying the staff dashboard of the site. """ # Empty context to populate: context = {} def get_account_name(path): """Method contains logic to extract the app name from a url path. Method uses the django.urls.resolve method with basic string splitting. """ try: appname = resolve(path).func.__module__.split(".")[1] except: appname = None return appname # Ensuring that the user is a staff member if not redirect home: if request.user.is_staff is False: return redirect("user_account_dashboard") else: # Determining a one month window for queying request data: prev_month = date.today() - timedelta(days=30) # Querying all of the requests made to the database in the last month: max_queryset = Request.objects.filter(time__gt=prev_month) # QuerySet to Dataframe Conversions: requests_timeseries = max_queryset.values_list("time", "response", "method", "path", "user") timeframe_df = pd.DataFrame.from_records(requests_timeseries, columns=["time", "response", "method", "path", "user"]) # Adding columns: timeframe_df["_count"] = 1 timeframe_df['app'] = timeframe_df["path"].apply(lambda x: get_account_name(x)) timeframe_df.set_index(timeframe_df['time'], inplace=True) # Resampling/Transforming data: daily_resample_get = timeframe_df.loc[timeframe_df['method'] == 'GET', "_count"].squeeze().resample('H').sum() daily_resample_posts = timeframe_df.loc[timeframe_df['method'] != 'GET', "_count"].squeeze().resample('H').sum() # Extracting Series for all response codes: daily_200_response = timeframe_df.loc[timeframe_df["response"] < 300, "_count"] daily_300_response = timeframe_df.loc[ (timeframe_df["response"] >= 300) & (timeframe_df["response"] < 400), "_count"] daily_400_response = timeframe_df.loc[ (timeframe_df["response"] >= 400) & (timeframe_df["response"] < 500), "_count"] daily_500_response = timeframe_df.loc[timeframe_df["response"] >= 500, "_count"] # Building a dict of unique get/post timeseries based on unique apps: app_timeseries_dict = {} # Getting relevant list of installed apps: third_party_apps = [app.split(".")[0] for app in settings.INSTALLED_APPS if not app.startswith("django.") and app not in ['rest_framework', 'rest_framework.authtoken', 'rest_auth', 'request'] ] for app in third_party_apps: # Nested dict structure for GET and POST request storage: application_dict = {} # Populating application dict w/ GET and POST request timeseries: try: app_timeseries_get = timeframe_df.loc[ (timeframe_df["app"] == app) & (timeframe_df["method"] == "GET"), "_count"].resample("H").sum() application_dict["GET"] = { "Data" : app_timeseries_get.values.tolist(), "Index": app_timeseries_get.index.tolist() } except: application_dict["GET"] = [0] * len(daily_resample_get.index) try: app_timeseries_post = timeframe_df.loc[ (timeframe_df["app"] == app) & (timeframe_df["method"] == "POST"), "_count"].resample("H").sum() application_dict["POST"] = { "Data": app_timeseries_post.values.tolist(), "Index": app_timeseries_post.index.tolist() } except: application_dict["POST"] = [0] * len(daily_resample_get.index) # Fully Building nested dict: app_timeseries_dict[app] = application_dict print(len(application_dict["GET"]["Data"]), len(application_dict["GET"]['Index'])) # Seralzing dataframe columns to pass to template: context['get_datetime'] = daily_resample_get.index.tolist() # Error-Catching daily response codes when resampling: response_code_dict = {} try: response_code_dict[200] = daily_200_response.squeeze().resample("H").sum().values.tolist() except Exception: response_code_dict[200] = [0] * len(daily_resample_get.index) try: response_code_dict[300] = daily_300_response.squeeze().resample("H").sum().values.tolist() except Exception: response_code_dict[300] = [0] * len(daily_resample_get.index) try: response_code_dict[400] = daily_400_response.squeeze().resample("H").sum().values.tolist() except Exception: response_code_dict[400] = [0] * len(daily_resample_get.index) try: response_code_dict[500] = daily_500_response.squeeze().resample("H").sum().values.tolist() except Exception: response_code_dict[500] = [0] * len(daily_resample_get.index) # Populating Context: context['app_timeseries'] = app_timeseries_dict context['get_requests_count'] = daily_resample_get.values.tolist() context['post_requests_count'] = daily_resample_posts.values.tolist() context['response_codes'] = response_code_dict return render(request, "accounts/staff_dashboard.html", context)
83d1d3027b64349dba5560934ba9d7bdb3536c91
3,655,718
import csv def read_v1_file(path: str = "CBETHUSD.csv") -> tuple: """ Read the data from the file path, reconstruct the format the the data and return a 3d matrix. """ lst = [] res = [] with open(path) as data: reader = csv.reader(data) next(reader) # skip the header row for row in reader: lst.append(float(row[1])) lst_con = [] for i in range(len(lst) - 30): temp = lst[i:i + 25] lst_con.append(temp) res_temp = lst[i + 30] - temp[-1] res_cat = [0, 0, 0] if abs(res_temp) < abs(temp[-1] * 0.05): res_cat[1] = 1 elif res_temp < 0: res_cat[0] = 1 else: res_cat[2] = 1 res.append(res_cat) np_lst = np.array(lst_con).reshape(len(lst_con), 25, 1) np_res = np.array(res) return (np_lst, np_res)
6fd80fda5f327464e63f34df1f16b923349bc7a4
3,655,719
import torch def get_adjacent_th(spec: torch.Tensor, filter_length: int = 5) -> torch.Tensor: """Zero-pad and unfold stft, i.e., add zeros to the beginning so that, using the multi-frame signal model, there will be as many output frames as input frames. Args: spec (torch.Tensor): input spectrum (B, F, T, 2) filter_length (int): length for frame extension Returns: ret (torch.Tensor): output spectrum (B, F, T, filter_length, 2) """ # noqa: D400 return ( torch.nn.functional.pad(spec, pad=[0, 0, filter_length - 1, 0]) .unfold(dimension=-2, size=filter_length, step=1) .transpose(-2, -1) .contiguous() )
4009b41fd4e729e16c749f4893f61b61ca922215
3,655,720
def K2(eps): """ Radar dielectric factor |K|**2 Parameters ---------- eps : complex nd array of complex relative dielectric constants Returns ------- nd - float Radar dielectric factor |K|**2 real """ K_complex = (eps-1.0)/(eps+2.0) return (K_complex*K_complex.conj()).real
8754bee38a46de14d205764c4843cad7c4d5d88f
3,655,721
def permutation_test_mi(x, y, B=100, random_state=None, **kwargs): """Permutation test for mutual information Parameters ---------- x : 1d array-like Array of n elements y : 1d array-like Array of n elements n_classes : int Number of classes B : int Number of permutations random_state : int Sets seed for random number generator Returns ------- p : float Achieved significance level """ np.random.seed(random_state) # Estimate correlation from original data theta = mi(x, y) # Permutations y_ = y.copy() theta_p = np.zeros(B) for i in range(B): np.random.shuffle(y_) theta_p[i] = mi(x, y_) # Achieved significance level return np.mean(theta_p >= theta)
ea60f7ddf483f3a095971ab3c52a07e34ac863d5
3,655,722
def convert_time_units(value, value_unit="s", result_unit="s", case_sensitive=True): """ Convert `value` from `value_unit` to `result_unit`. The possible time units are ``'s'``,``'ms'``, ``'us'``, ``'ns'``, ``'ps'``, ``'fs'``, ``'as'``. If ``case_sensitive==True``, matching units is case sensitive. """ if string_utils.string_equal(value_unit,"s",case_sensitive=case_sensitive): value_s=value elif string_utils.string_equal(value_unit,"ms",case_sensitive=case_sensitive): value_s=value*1E-3 elif string_utils.string_equal(value_unit,"us",case_sensitive=case_sensitive): value_s=value*1E-6 elif string_utils.string_equal(value_unit,"ns",case_sensitive=case_sensitive): value_s=value*1E-9 elif string_utils.string_equal(value_unit,"ps",case_sensitive=case_sensitive): value_s=value*1E-12 elif string_utils.string_equal(value_unit,"fs",case_sensitive=case_sensitive): value_s=value*1E-15 elif string_utils.string_equal(value_unit,"as",case_sensitive=case_sensitive): value_s=value*1E-18 else: raise IOError("unrecognized length unit: {0}".format(value_unit)) if string_utils.string_equal(result_unit,"s",case_sensitive=case_sensitive): return value_s elif string_utils.string_equal(result_unit,"ms",case_sensitive=case_sensitive): return value_s*1E3 elif string_utils.string_equal(result_unit,"us",case_sensitive=case_sensitive): return value_s*1E6 elif string_utils.string_equal(result_unit,"ns",case_sensitive=case_sensitive): return value_s*1E9 elif string_utils.string_equal(result_unit,"ps",case_sensitive=case_sensitive): return value_s*1E12 elif string_utils.string_equal(result_unit,"fs",case_sensitive=case_sensitive): return value_s*1E15 elif string_utils.string_equal(result_unit,"as",case_sensitive=case_sensitive): return value_s*1E18 else: raise IOError("unrecognized length unit: {0}".format(result_unit))
dab3fdb88a5d137d45efe440a6075cd0339194ac
3,655,723
import tqdm def compute_distribution_clusters(columns: list, dataset_name: str, threshold: float, pool: Pool, chunk_size: int = None, quantiles: int = 256): """ Algorithm 2 of the paper "Automatic Discovery of Attributes in Relational Databases" from M. Zhang et al. [1]. This algorithm captures which columns contain data with similar distributions based on the EMD distance metric. Parameters --------- columns : list(str) The columns of the database dataset_name : str Other name of the dataset threshold : float The conservative global EMD cutoff threshold described in [1] pool: multiprocessing.Pool The process pool that will be used in the pre-processing of the table's columns chunk_size : int, optional The number of chunks of each job process (default let the framework decide) quantiles : int, optional The number of quantiles that the histograms are split on (default is 256) Returns ------- list(list(str)) A list that contains the distribution clusters that contain the column names in the cluster """ combinations = list(column_combinations(columns, dataset_name, quantiles, intersection=False)) total = len(combinations) if chunk_size is None: chunk_size = int(calc_chunksize(pool._processes, total)) A: dict = transform_dict(dict(tqdm(pool.imap_unordered(process_emd, combinations, chunksize=chunk_size), total=total))) edges_per_column = list(pool.map(parallel_cutoff_threshold, list(cuttoff_column_generator(A, columns, dataset_name, threshold)))) graph = create_graph(columns, edges_per_column) connected_components = list(nx.connected_components(graph)) return connected_components
bdbdf233c02f6eced3504543c3adbd8ea12505f7
3,655,724
def get_eventframe_sequence(event_deque, is_x_first, is_x_flipped, is_y_flipped, shape, data_format, frame_width, frame_gen_method): """ Given a single sequence of x-y-ts events, generate a sequence of binary event frames. """ inp = [] while len(event_deque) > 0: inp.append(get_binary_frame(event_deque, is_x_first, is_x_flipped, is_y_flipped, shape, data_format, frame_width, frame_gen_method)) return np.stack(inp, -1)
9d65bfa59c42b327cc7f5c02a044f545ec5f5a5e
3,655,725
def creation_sequence_to_weights(creation_sequence): """ Returns a list of node weights which create the threshold graph designated by the creation sequence. The weights are scaled so that the threshold is 1.0. The order of the nodes is the same as that in the creation sequence. """ # Turn input sequence into a labeled creation sequence first = creation_sequence[0] if isinstance(first, str): # creation sequence if isinstance(creation_sequence, list): wseq = creation_sequence[:] else: wseq = list(creation_sequence) # string like 'ddidid' elif isinstance(first, tuple): # labeled creation sequence wseq = [v[1] for v in creation_sequence] elif isinstance(first, int): # compact creation sequence wseq = uncompact(creation_sequence) else: raise TypeError("Not a valid creation sequence type") # pass through twice--first backwards wseq.reverse() w = 0 prev = 'i' for j, s in enumerate(wseq): if s == 'i': wseq[j] = w prev = s elif prev == 'i': prev = s w += 1 wseq.reverse() # now pass through forwards for j, s in enumerate(wseq): if s == 'd': wseq[j] = w prev = s elif prev == 'd': prev = s w += 1 # Now scale weights if prev == 'd': w += 1 wscale = 1. / float(w) return [ww * wscale for ww in wseq] # return wseq
80147c53ccb7f44fdca148cc422a0c149a5b7864
3,655,726
def get_seg_features(string): """ Segment text with jieba features are represented in bies format s donates single word """ seg_feature = [] for word in jieba.cut(string): if len(word) == 1: seg_feature.append(0) else: tmp = [2] * len(word) tmp[0] = 1 tmp[-1] = 3 seg_feature.extend(tmp) return seg_feature
505ba3064cacc2719e11126ce504b8c84abe10e9
3,655,727
def print_device_info(nodemap): """ This function prints the device information of the camera from the transport layer; please see NodeMapInfo example for more in-depth comments on printing device information from the nodemap. :param nodemap: Transport layer device nodemap. :type nodemap: INodeMap :return: True if successful, False otherwise. :rtype: bool """ print('\n*** DEVICE INFORMATION ***\n') try: result = True node_device_information = PySpin.CCategoryPtr(nodemap.GetNode('DeviceInformation')) if PySpin.IsAvailable(node_device_information) and PySpin.IsReadable(node_device_information): features = node_device_information.GetFeatures() for feature in features: node_feature = PySpin.CValuePtr(feature) print('%s: %s' % (node_feature.GetName(), node_feature.ToString() if PySpin.IsReadable(node_feature) else 'Node not readable')) else: print('Device control information not available.') except PySpin.SpinnakerException as ex: print('Error: %s' % ex.message) return False return result
7f0affa8e8acaab48df8dc96c631ca9043f07482
3,655,728
from typing import Optional def coerce_to_pendulum_date(x: PotentialDatetimeType, assume_local: bool = False) -> Optional[Date]: """ Converts something to a :class:`pendulum.Date`. Args: x: something that may be coercible to a date assume_local: if ``True``, assume local timezone; if ``False``, assume UTC Returns: a :class:`pendulum.Date`, or ``None``. Raises: pendulum.parsing.exceptions.ParserError: if a string fails to parse ValueError: if no conversion possible """ p = coerce_to_pendulum(x, assume_local=assume_local) return None if p is None else p.date()
d2fb5d830736290eb9ddadf9fcd664d0cba88d4b
3,655,730
def loss_fixed_depl_noquench(params, loss_data): """ MSE loss function for fitting individual stellar mass histories. Only main sequence efficiency parameters. Quenching is deactivated. Depletion time is fixed at tau=0Gyr, i.e. gas conversion is instantaenous. """ ( lgt, dt, dmhdt, log_mah, sm_target, log_sm_target, sfr_target, fstar_target, index_select, fstar_indx_high, fstar_tdelay, ssfrh_floor, weight, weight_fstar, t_fstar_max, fixed_tau, q_params, ) = loss_data sfr_params = [*params[0:4], fixed_tau] _res = calculate_sm_sfr_fstar_history_from_mah( lgt, dt, dmhdt, log_mah, sfr_params, q_params, index_select, fstar_indx_high, fstar_tdelay, ) mstar, sfr, fstar = _res mstar = jnp.log10(mstar) fstar = jnp.log10(fstar) sfr_res = 1e8 * (sfr - sfr_target) / sm_target sfr_res = jnp.clip(sfr_res, -1.0, 1.0) loss = jnp.mean(((mstar - log_sm_target) / weight) ** 2) loss += jnp.mean(((fstar - fstar_target) / weight_fstar) ** 2) loss += jnp.mean((sfr_res / weight) ** 2) qt = _get_bounded_qt(q_params[0]) loss += _sigmoid(qt - t_fstar_max, 0.0, 50.0, 100.0, 0.0) return loss
c987b17b2a64081006addf8ed9af6a3535b77bdd
3,655,731
def identityMatrix(nrow, ncol): """ Create a identity matrix of the given dimensions Works for square Matrices Retuns a Matrix Object """ if nrow == ncol: t = [] for i in range(nrow): t.append([]) for j in range(ncol): if i == j: t[i].append(1) else: t[i].append(0) s = Matrix(nrow=nrow, ncol=ncol, data=t) s.matrix.symmetry=True s.matrix.trace=nrow s.matrix.invertibility=True setattr(s.matrix,"identityMatrix",True) return s else: raise incompaitableTypeException
3584c75cd0683f4dd547ac9708d03cdc5500dcef
3,655,733
def extract_packages(matched, package_source): """ Extract packages installed in the "Successfully installed" line e.g. Successfully installed Abjad Jinja2-2.10 MarkupSafe-1.0 PyPDF2-1.26.0 Pygments-2.2.0 alabaster-0.7.10 \ babel-2.5.1 bleach-2.1.2 decorator-4.1.2 docutils-0.14 entrypoints-0.2.3 html5lib-1.0.1 imagesize-0.7.1 \ ipykernel-4.7.0 ipython-6.2.1 ipython-genutils-0.2.0 ipywidgets-7.1.0 jedi-0.11.1 jsonschema-2.6.0 \ jupyter-1.0.0 jupyter-client-5.2.1 jupyter-console-5.2.0 jupyter-core-4.4.0 mistune-0.8.3 nbconvert-5.3.1 \ nbformat-4.4.0 notebook-5.2.2 pandocfilters-1.4.2 parso-0.1.1 pexpect-4.3.1 pickleshare-0.7.4 \ prompt-toolkit-1.0.15 .... """ result = [] package_list = matched.groups()[0].split(' ') for package in package_list: package, version = split_package_and_version(package) if not version or not package: continue else: source = package_source.get(package) if source is None: continue # The following line is recommended when developing # assert source == PACKAGE_SOURCE_INDEX result.append('{}=={}'.format(package, version)) return result
a4cf9c18122dd89b2c46647fa328ab72c4d7dd8a
3,655,734
from typing import List from typing import Optional from datetime import datetime def create_relationship( relationship_type: str, created_by: Identity, source: _DomainObject, target: _DomainObject, confidence: int, object_markings: List[MarkingDefinition], start_time: Optional[datetime] = None, stop_time: Optional[datetime] = None, ) -> Relationship: """Create a relationship.""" return Relationship( created_by_ref=created_by, relationship_type=relationship_type, source_ref=source, target_ref=target, start_time=start_time, stop_time=stop_time, confidence=confidence, object_marking_refs=object_markings, allow_custom=True, )
4d961aae8521c53c61090823484e8b12862b29e0
3,655,735
def _find_partition(G, starting_cell): """ Find a partition of the vertices of G into cells of complete graphs Parameters ---------- G : NetworkX Graph starting_cell : tuple of vertices in G which form a cell Returns ------- List of tuples of vertices of G Raises ------ NetworkXError If a cell is not a complete subgraph then G is not a line graph """ G_partition = G.copy() P = [starting_cell] # partition set G_partition.remove_edges_from(list(combinations(starting_cell, 2))) # keep list of partitioned nodes which might have an edge in G_partition partitioned_vertices = list(starting_cell) while G_partition.number_of_edges() > 0: # there are still edges left and so more cells to be made u = partitioned_vertices[-1] deg_u = len(G_partition[u]) if deg_u == 0: # if u has no edges left in G_partition then we have found # all of its cells so we do not need to keep looking partitioned_vertices.pop() else: # if u still has edges then we need to find its other cell # this other cell must be a complete subgraph or else G is # not a line graph new_cell = [u] + list(G_partition[u]) for u in new_cell: for v in new_cell: if (u != v) and (v not in G_partition[u]): msg = ( "G is not a line graph" "(partition cell not a complete subgraph)" ) raise nx.NetworkXError(msg) P.append(tuple(new_cell)) G_partition.remove_edges_from(list(combinations(new_cell, 2))) partitioned_vertices += new_cell return P
92c63176d6c2f366c549a24982dbc64c9879a9b7
3,655,736
import torch def projection_from_Rt(rmat, tvec): """ Compute the projection matrix from Rotation and translation. """ assert len(rmat.shape) >= 2 and rmat.shape[-2:] == (3, 3), rmat.shape assert len(tvec.shape) >= 2 and tvec.shape[-2:] == (3, 1), tvec.shape return torch.cat([rmat, tvec], dim=-1)
90039ba7002be31d347b7793d542b1ff37abae3e
3,655,737
def verify_df(df, constraints_path, epsilon=None, type_checking=None, **kwargs): """ Verify that (i.e. check whether) the Pandas DataFrame provided satisfies the constraints in the JSON .tdda file provided. Mandatory Inputs: df A Pandas DataFrame, to be checked. constraints_path The path to a JSON .tdda file (possibly generated by the discover_constraints function, below) containing constraints to be checked. Optional Inputs: epsilon When checking minimum and maximum values for numeric fields, this provides a tolerance. The tolerance is a proportion of the constraint value by which the constraint can be exceeded without causing a constraint violation to be issued. With the default value of epsilon (EPSILON_DEFAULT = 0.01, i.e. 1%), values can be up to 1% larger than a max constraint without generating constraint failure, and minimum values can be up to 1% smaller that the minimum constraint value without generating a constraint failure. (These are modified, as appropraite, for negative values.) NOTE: A consequence of the fact that these are proportionate is that min/max values of zero do not have any tolerance, i.e. the wrong sign always generates a failure. type_checking: 'strict' or 'sloppy'. Because Pandas silently, routinely and automatically "promotes" integer and boolean columns to reals and objects respectively if they contain nulls, strict type checking can be problematical in Pandas. For this reason, type_checking defaults to 'sloppy', meaning that type changes that could plausibly be attriuted to Pandas type promotion will not generate constraint values. If this is set to strict, a Pandas "float" column c will only be allowed to satisfy a an "int" type constraint if c.dropnulls().astype(int) == c.dropnulls(). Similarly, Object fields will satisfy a 'bool' constraint only if c.dropnulls().astype(bool) == c.dropnulls(). report: 'all' or 'fields' This controls the behaviour of the __str__ method on the resulting PandasVerification object (but not its content). The default is 'all', which means that all fields are shown, together with the verification status of each constraint for that field. If report is set to 'fields', only fields for which at least one constraint failed are shown. NOTE: The method also accepts 'constraints', which will be used to indicate that only failing constraints for failing fields should be shown. This behaviour is not yet implented. Returns: PandasVerification object. This object has attributes: passed # Number of passing constriants failures # Number of failing constraints It also has a .to_frame() method for converting the results of the verification to a Pandas DataFrame, and a __str__ method to print both the detailed and summary results of the verification. Example usage (see tdda/constraints/examples/simple_verification.py for slightly fuller example). import pandas as pd from tdda.constraints.pdconstraints import verify_df df = pd.DataFrame({'a': [0, 1, 2, 10, pd.np.NaN], 'b': ['one', 'one', 'two', 'three', pd.np.NaN]}) v = verify_df(df, 'example_constraints.tdda') print('Passes:', v.passes) print('Failures: %d\n' % v.failures) print(str(v)) print(v.to_frame()) """ pdv = PandasConstraintVerifier(df, epsilon=epsilon, type_checking=type_checking) constraints = DatasetConstraints(loadpath=constraints_path) return verify(constraints, pdv.verifiers(), VerificationClass=PandasVerification, **kwargs)
477180d390e3090ec7d8211b8cee7235d58d4eba
3,655,738
import re def _getallstages_pm(pmstr): """pmstr: a pipelinemodel name in quote return a df: of all leaf stages of transformer. to print return in a cell , use print_return(df) """ pm=eval(pmstr) output=[] for i,s in enumerate(pm.stages): if str(type(s))=="<class 'pyspark.ml.pipeline.PipelineModel'>": pmstr2=f"{pmstr}.stages[{i}]" output.append(_getallstages_pm(pmstr2)) else: tn=re.sub(r"^.*\.(\w+)\b.*",r"\1",str(type(s))) pmstr2=f"{pmstr}.stages[{i}]" temp=pd.DataFrame([[pmstr2,tn,None,None,None]],columns=['stage','transformer_name','inputcol','outputcol','other_parameters']) if temp.transformer_name.iloc[0]=="SQLTransformer": st='"statement=\n'+re.sub('\t',' ',eval(pmstr2).getStatement())+'"' if len(st)>=32767: idx1=st.rfind('\n',0,10000) idx2=st.find('\n',len(st)-10000,len(st)) newst=st[:idx1]+"\n\n..........\n"+st[idx2:] st=newst.replace("statement=","TRUNCATED !!!\n\nstatement=") temp["other_parameters"]=st elif temp.transformer_name.iloc[0]=="CountVectorizerModel": temp["other_parameters"]="vocabulary="+str(eval(pmstr2).vocabulary) elif temp.transformer_name.iloc[0]=="RFormulaModel": temp["outputcol"]=[value for key, value in eval(pmstr2).extractParamMap().items() if key.name=='featuresCol'] form="formular: "+[value for key, value in eval(pmstr2).extractParamMap().items() if key.name=='formula'][0] temp["other_parameters"]=f"number of inputCol in formula: {form.count('+')+1}" elif temp.transformer_name.iloc[0]=='LogisticRegressionModel': label=[value for key, value in eval(pmstr2).extractParamMap().items() if key.name=='labelCol'][0] elasticNetParam=[value for key, value in eval(pmstr2).extractParamMap().items() if key.name=='elasticNetParam'][0] regParam=[value for key, value in eval(pmstr2).extractParamMap().items() if key.name=='regParam'][0] temp["other_parameters"]=f"labelCol : {label}, elasticNetParam : {elasticNetParam}, regParam : {regParam}" else: ip=[value for key, value in eval(pmstr2).extractParamMap().items() if key.name=='inputCol'] if len(ip)>0: temp["inputcol"]=ip op=[value for key, value in eval(pmstr2).extractParamMap().items() if key.name=='outputCol'] if len(op)>0: temp["outputcol"]=op output.append(temp) outputdf=pd.concat(output) outputdf=outputdf.reset_index(drop=True) return outputdf
8bb5643361aa5aa74c1ba477d725b575a2f15f0b
3,655,739
def midpVector(x): """ return midpoint value (=average) in each direction """ if type(x) != list: raise Exception("must be list") dim = len(x) #nx = x[0].shape for i in range(1,dim): if type(x[i]) != np.ndarray: raise Exception("must be numpy array") #if x[i].shape != nx: # raise Exception("dimensions mismatch") avgx = [] for ifield in range(dim): avgx.append([]) avgx[ifield] = midpScalar(x[ifield]) return avgx
784dcfdeb012aa114167d4b965409ca2f81ed414
3,655,741
def buy_ticket(email, name, quantity): """ Attmempt to buy a ticket in the database :param owner: the email of the ticket buyer :param name: the name of the ticket being bought :param quantity: the quantity of tickets being bought :return: an error message if there is any, or None if register succeeds """ user = User.query.filter_by(email=email).first() tik = Ticket.query.filter_by(name=name).first() user.balance = user.balance - (tik.price * quantity * 1.40) if tik.quantity == quantity: db.session.delete(tik) else: tik.quantity = tik.quantity - quantity db.session.commit() return None
cd64f745a44180594edce14eb0645f808ac645d8
3,655,742
from typing import Tuple def update_bounds( sig: float, eps: float, target_eps: float, bounds: np.ndarray, bound_eps: np.ndarray, consecutive_updates: int ) -> Tuple[np.ndarray, np.ndarray, int]: # noqa:E121,E125 """ Updates bounds for sigma around a target privacy epsilon. Updates the lower bound for sigma if `eps` is larger than `target_eps` and the upper bound otherwise. :param sig: A new value for sigma. :param eps: The corresponding value for epsilon. :param target_eps: The target value for epsilon. :param bounds: Tuple containing a lower and upper bound for the sigma corresponding to target_eps. :param bound_eps: The corresponding epsilon values for the bounds. :param consecutive_updates: Tuple counting the number of consecutive updates for lower and upper bound. :return: updated bounds, bound_eps and consecutive_updates """ assert(eps <= bound_eps[0]) assert(eps >= bound_eps[1]) if eps > target_eps: bounds[0] = sig bound_eps[0] = eps consecutive_updates = [consecutive_updates[0] + 1, 0] else: bounds[1] = sig bound_eps[1] = eps consecutive_updates = [0, consecutive_updates[1] + 1] return bounds, bound_eps, consecutive_updates
a3426220fe20a4857ac51048ab8d703decaf3e9f
3,655,743
def usd(value): """Format value as USD.""" return f"${value:,.2f}"
022502cebaced49e21a311fe0bed6feead124ee9
3,655,745
def random_mindist(N, mindist, width, height): """Create random 2D points with a minimal distance to each other. Args: N(int): number of points to generate mindist(float): Minimal distance between each point width(float): Specifies [0, width) for the x-coordinate height(float): Specifies [0, height) for the y-coordinate Returns: np.array(shape=[N, 2]): matrix of coordinates """ Pts = np.empty(shape=[0, 2]) n = 0 while n < N: X = random_uniform(1, width, height) # rejection sampling if closest_euclidean(X, Pts) > mindist: Pts = np.vstack((Pts, X)) n = n+1 return Pts
261627e47e72b95d90f9b9c409ce61535f2a4cf7
3,655,746
def deactivate_spotting(ID): """ Function to deactivate a spotting document in Elasticsearch Params: ID::str id of the document to deactivate Returns: bool If the changes have been applied or not """ if not ID: return False try: global INDEX body = get_document(INDEX, ID)['_source'] body['is_active'] = False create_or_update_document(INDEX, ID, body) return True except NotFoundError: print("No documents found at deactivate_spotting") return False except Exception as e: print("Exception @ deactivate_spotting\n{}".format(e)) return None
381c79a08e990b64a0a1032b5b54b874b8c53926
3,655,747
import watools.General.raster_conversions as RC import watools.Functions.Start as Start import numpy as np def Fraction_Based(nc_outname, Startdate, Enddate): """ This functions calculated monthly total supply based ETblue and fractions that are given in the get dictionary script Parameters ---------- nc_outname : str Path to the NetCDF containing the data Startdate : str Contains the start date of the model 'yyyy-mm-dd' Enddate : str Contains the end date of the model 'yyyy-mm-dd' Returns ------- DataCube_Tot_Sup : Array Array containing the total supply [time,lat,lon] DataCube_Non_Consumed : Array Array containing the amount of non consumed water [time,lat,lon] """ # import water accounting plus modules # import general modules # Open Arrays DataCube_LU = RC.Open_nc_array(nc_outname, "Landuse") DataCube_ETblue = RC.Open_nc_array(nc_outname, "Blue_Evapotranspiration", Startdate, Enddate) # Get Classes LU_Classes = Start.Get_Dictionaries.get_sheet5_classes() LU_Classes_Keys = list(LU_Classes.keys()) # Get fractions consumed_fractions_dict = Start.Get_Dictionaries.consumed_fractions() # Create Array for consumed fractions DataCube_Consumed_Fractions = np.ones(DataCube_LU.shape) * np.nan # Create array with consumed_fractions for Classes_LULC in LU_Classes_Keys: Values_LULC = LU_Classes[Classes_LULC] for Value_LULC in Values_LULC: DataCube_Consumed_Fractions[DataCube_LU == Value_LULC] = consumed_fractions_dict[Classes_LULC] # Calculated Total Supply DataCube_Tot_Sup = DataCube_ETblue[:,:,:] / DataCube_Consumed_Fractions[None,:,:] # Calculated Non consumed DataCube_Non_Consumed = DataCube_Tot_Sup - DataCube_ETblue return(DataCube_Tot_Sup, DataCube_Non_Consumed)
378c149cc239eee31b10d90235b78cf15527b0e0
3,655,748
def _qrd_solve(r, pmut, ddiag, bqt, sdiag): """Solve an equation given a QR factored matrix and a diagonal. Parameters: r - **input-output** n-by-n array. The full lower triangle contains the full lower triangle of R. On output, the strict upper triangle contains the transpose of the strict lower triangle of S. pmut - n-vector describing the permutation matrix P. ddiag - n-vector containing the diagonal of the matrix D in the base problem (see below). bqt - n-vector containing the first n elements of B Q^T. sdiag - output n-vector. It is filled with the diagonal of S. Should be preallocated by the caller -- can result in somewhat greater efficiency if the vector is reused from one call to the next. Returns: x - n-vector solving the equation. Compute the n-vector x such that A^T x = B, D x = 0 where A is an n-by-m matrix, B is an m-vector, and D is an n-by-n diagonal matrix. We are given information about pivoted QR factorization of A with permutation, such that A P = R Q where P is a permutation matrix, Q has orthogonal rows, and R is lower triangular with nonincreasing diagonal elements. Q is m-by-m, R is n-by-m, and P is n-by-n. If x = P z, then we need to solve R z = B Q^T, P^T D P z = 0 (why the P^T? and do these need to be updated for the transposition?) If the system is rank-deficient, these equations are solved as well as possible in a least-squares sense. For the purposes of the LM algorithm we also compute the lower triangular n-by-n matrix S such that P^T (A^T A + D D) P = S^T S. (transpose?) """ n, m = r.shape # "Copy r and bqt to preserve input and initialize s. In # particular, save the diagonal elements of r in x." Recall that # on input only the full lower triangle of R is meaningful, so we # can mirror that into the upper triangle without issues. for i in range(n): r[i,i:] = r[i:,i] x = r.diagonal().copy() zwork = bqt.copy() # "Eliminate the diagonal matrix d using a Givens rotation." for i in range(n): # "Prepare the row of D to be eliminated, locating the # diagonal element using P from the QR factorization." li = pmut[i] if ddiag[li] == 0: sdiag[i] = r[i,i] r[i,i] = x[i] continue sdiag[i:] = 0 sdiag[i] = ddiag[li] # "The transformations to eliminate the row of d modify only a # single element of (q transpose)*b beyond the first n, which # is initially zero." bqtpi = 0. for j in range(i, n): # "Determine a Givens rotation which eliminates the # appropriate element in the current row of D." if sdiag[j] == 0: continue if abs(r[j,j]) < abs(sdiag[j]): cot = r[j,j] / sdiag[j] sin = 0.5 / np.sqrt(0.25 + 0.25 * cot**2) cos = sin * cot else: tan = sdiag[j] / r[j,j] cos = 0.5 / np.sqrt(0.25 + 0.25 * tan**2) sin = cos * tan # "Compute the modified diagonal element of r and the # modified element of ((q transpose)*b,0)." r[j,j] = cos * r[j,j] + sin * sdiag[j] temp = cos * zwork[j] + sin * bqtpi bqtpi = -sin * zwork[j] + cos * bqtpi zwork[j] = temp # "Accumulate the transformation in the row of s." if j + 1 < n: temp = cos * r[j,j+1:] + sin * sdiag[j+1:] sdiag[j+1:] = -sin * r[j,j+1:] + cos * sdiag[j+1:] r[j,j+1:] = temp # Save the diagonal of S and restore the diagonal of R # from its saved location in x. sdiag[i] = r[i,i] r[i,i] = x[i] # "Solve the triangular system for z. If the system is singular # then obtain a least squares solution." nsing = n for i in range(n): if sdiag[i] == 0.: nsing = i zwork[i:] = 0 break if nsing > 0: zwork[nsing-1] /= sdiag[nsing-1] # Degenerate case # "Reverse loop" for i in range(nsing - 2, -1, -1): s = np.dot(zwork[i+1:nsing], r[i,i+1:nsing]) zwork[i] = (zwork[i] - s) / sdiag[i] # "Permute the components of z back to components of x." x[pmut] = zwork return x
3e9d75c135734770c248a39de5770c3b033262da
3,655,749
import re def find_version(): """Extract the version number from the CLI source file.""" with open('pyweek.py') as f: for l in f: mo = re.match('__version__ = *(.*)?\s*', l) if mo: return eval(mo.group(1)) else: raise Exception("No version information found.")
128f2399a37b27412d2fdf6cf0901c1486709a09
3,655,750
import pandas.core.algorithms as algos def remove_unused_levels(self): """ create a new MultiIndex from the current that removing unused levels, meaning that they are not expressed in the labels The resulting MultiIndex will have the same outward appearance, meaning the same .values and ordering. It will also be .equals() to the original. .. versionadded:: 0.20.0 Returns ------- MultiIndex Examples -------- >>> i = pd.MultiIndex.from_product([range(2), list('ab')]) MultiIndex(levels=[[0, 1], ['a', 'b']], codes=[[0, 0, 1, 1], [0, 1, 0, 1]]) >>> i[2:] MultiIndex(levels=[[0, 1], ['a', 'b']], codes=[[1, 1], [0, 1]]) The 0 from the first level is not represented and can be removed >>> i[2:].remove_unused_levels() MultiIndex(levels=[[1], ['a', 'b']], codes=[[0, 0], [0, 1]]) """ new_levels = [] new_labels = [] changed = False for lev, lab in zip(self.levels, self.labels): # Since few levels are typically unused, bincount() is more # efficient than unique() - however it only accepts positive values # (and drops order): uniques = np.where(np.bincount(lab + 1) > 0)[0] - 1 has_na = int(len(uniques) and (uniques[0] == -1)) if len(uniques) != len(lev) + has_na: # We have unused levels changed = True # Recalculate uniques, now preserving order. # Can easily be cythonized by exploiting the already existing # "uniques" and stop parsing "lab" when all items are found: uniques = algos.unique(lab) if has_na: na_idx = np.where(uniques == -1)[0] # Just ensure that -1 is in first position: uniques[[0, na_idx[0]]] = uniques[[na_idx[0], 0]] # labels get mapped from uniques to 0:len(uniques) # -1 (if present) is mapped to last position label_mapping = np.zeros(len(lev) + has_na) # ... and reassigned value -1: label_mapping[uniques] = np.arange(len(uniques)) - has_na lab = label_mapping[lab] # new levels are simple lev = lev.take(uniques[has_na:]) new_levels.append(lev) new_labels.append(lab) result = self._shallow_copy() if changed: result._reset_identity() result._set_levels(new_levels, validate=False) result._set_labels(new_labels, validate=False) return result
8f07a2b943278d5d5ae7d78ab2c10e96acd349e4
3,655,751
def _transform_playlist(playlist): """Transform result into a format that more closely matches our unified API. """ transformed_playlist = dict([ ('source_type', 'spotify'), ('source_id', playlist['id']), ('name', playlist['name']), ('tracks', playlist['tracks']['total']), ]) return transformed_playlist
62c19c132cbb9438c7a4b993e1d79111b79b86fd
3,655,752
from typing import Dict from typing import Hashable from typing import Any def decode_map_states(beliefs: Dict[Hashable, Any]) -> Any: """Function to decode MAP states given the calculated beliefs. Args: beliefs: An array or a PyTree container containing beliefs for different variables. Returns: An array or a PyTree container containing the MAP states for different variables. """ return jax.tree_util.tree_map(lambda x: jnp.argmax(x, axis=-1), beliefs)
3d8b9feecb3d612a4ff361f710ef1841cd016239
3,655,753
def plot_stretch_Q(datas, stretches=[0.01,0.1,0.5,1], Qs=[1,10,5,100]): """ Plots different normalizations of your image using the stretch, Q parameters. Parameters ---------- stretches : array List of stretch params you want to permutate through to find optimal image normalization. Default is [0.01, 0.1, 0.5, 1] Qs : array List of Q params you want to permutate through to find optimal image normalization. Default is [1, 10, 5, 100] Code adapted from: https://pmelchior.github.io/scarlet/tutorials/display.html Returns ------- fig : Figure object """ fig, ax = plt.subplots(len(stretches), len(Qs), figsize=(9,9)) for i, stretch in enumerate(stretches): for j, Q in enumerate(Qs): asinh = scarlet.display.AsinhMapping(minimum=0, stretch=stretch, Q=Q) # Scale the RGB channels for the image img_rgb = scarlet.display.img_to_rgb(datas, norm=asinh) ax[i][j].imshow(img_rgb) ax[i][j].set_title("Stretch {}, Q {}".format(stretch, Q)) ax[i][j].axis('off') return fig
d4dc4d52019aac10fc15dd96fd29c3abf6563446
3,655,754
def _isSpecialGenerateOption(target, optName): """ Returns ``True`` if the given option has a special generation function, ``False`` otherwise. """ return _getSpecialFunction(target, optName, '_generateSpecial') is not None
387fcb96d0d13e45b38a645ee61f20441905a0f8
3,655,755
def count_active_days(enable_date, disable_date): """Return the number of days the segment has been active. :param enable_date: The date the segment was enabled :type enable_date: timezone.datetime :param disable_date: The date the segment was disabled :type disable_date: timezone.datetime :returns: The amount of days a segment is/has been active :rtype: int """ if enable_date is not None: if disable_date is None or disable_date <= enable_date: # There is no disable date, or it is not relevant. delta = timezone.now() - enable_date return delta.days if disable_date > enable_date: # There is a disable date and it is relevant. delta = disable_date - enable_date return delta.days return 0
070a520c328dbe69491fc6eb991c816c9f4fccd8
3,655,756
def numpy_to_python_type(value): """ Convert to Python type from numpy with .item(). """ try: return value.item() except AttributeError: return value
f1d3a8ad77932342c182d7be76037fee3c869afe
3,655,757
def threshold_abs(image, threshold): """Return thresholded image from an absolute cutoff.""" return image > threshold
5032f632371af37e81c3ebcc587475422d5ff2bf
3,655,760
def warp_images(img1_loc, img2_loc, h_loc): """ Fill documentation """ rows1, cols1 = img1_loc.shape[:2] rows2, cols2 = img2_loc.shape[:2] print("0") list_of_points_1 = np.array( [[0, 0], [0, rows1], [cols1, rows1], [cols1, 0]], np.float32).reshape(-1, 1, 2) temp_points = np.array( [[0, 0], [0, rows2], [cols2, rows2], [cols2, 0]], np.float32).reshape(-1, 1, 2) print("1") list_of_points_2 = cv2.perspectiveTransform(temp_points, h_loc) list_of_points = np.concatenate( (list_of_points_1, list_of_points_2), axis=0) print(list_of_points) [x_min, y_min] = np.int32(list_of_points.min(axis=0).ravel() - 0.5) [x_max, y_max] = np.int32(list_of_points.max(axis=0).ravel() + 0.5) print("3") translation_dist = [-x_min, -y_min] h_translation = np.array( [[1, 0, translation_dist[0]], [0, 1, translation_dist[1]], [0, 0, 1]]) print(((x_max - x_min, x_max, x_min), (y_max - y_min, y_max, y_min))) output_img = cv2.warpPerspective( img2_loc, h_translation.dot(h_loc), (x_max - x_min, y_max - y_min)) output_img[translation_dist[1]:rows1+translation_dist[1], translation_dist[0]:cols1+translation_dist[0]] = img1_loc print("5") return output_img
ab5b364ded7647efb13c686a32acc8ee6c6487ba
3,655,761
def ValidaCpf(msg='Cadastro de Pessoa Física (CPF): ', pont=True): """ -> Função para validar um CPF :param msg: Mensagem exibida para usuário antes de ler o CPF. :param pont: Se True, retorna um CPF com pontuação (ex: xxx.xxx.xxx-xx). Se False, retorna um CPF sem pontuação (ex: xxxxxxxxxxx) :return: Retorna um CPF válido. """ while True: cpf = str(input(f'{msg}')) if '.-' in cpf and pont == False: cpf.replace('.', '') cpf.replace('-', '') contDig=0 for dig in cpf: if dig.isnumeric(): contDig += 1 # Conta a quantidade de dígitos no CPF if contDig != 11: # Se o CPF possuir mais de 11 dígitos, retorna uma mensagem de erro print('\033[1;31m3RRO! Este CPF é inválido!\033[m') continue # Volta para o tpo do laço if '.' in cpf: # Verifica a existência de pontos no CPF e se a quantidade está correta(2) if cpf.count('.') != 2: print('\033[1;31m3RRO! Este CPF é inválido!\033[m') continue else: # Se não tiver pontos e se pont=True, adiciona a pontuação if pont: cpf = list(cpf) cpf.insert(3, '.') cpf.insert(7, '.') if '-' in cpf: # Verifica a existência do hífen no CPF e se a quantidade está correta(1) if cpf.count('-') != 1: print('\033[1;31m3RRO! Este CPF é inválido!\033[m') continue else: # Se não tiver hífen e se pont=True, adiciona a pontuação if pont: cpf.insert(11, '-') result = [''.join(cpf)] # Junta a lista cpf = result[0] break return cpf
3bdc298f7a2a3a4c16919a9caba21b71bbaf8539
3,655,762
def get_xml_path(xml, path=None, func=None): """ Return the content from the passed xml xpath, or return the result of a passed function (receives xpathContext as its only arg) """ #doc = None #ctx = None #result = None #try: doc = etree.fromstring(xml) #ctx = doc.xpathNewContext() if path: #ret = ctx.xpathEval(path) ret = doc.xpath(path) if ret is not None: if type(ret) == list: if len(ret) >= 1: result = ret[0].text else: result = ret elif func: result = func(doc) else: raise ValueError("'path' or 'func' is required.") #finally: # if doc: # doc.freeDoc() # if ctx: # ctx.xpathFreeContext() return result
81bcce1806f11217a04fbc401226d727e0150735
3,655,763
def readXYdYData(filename, comment_character='#'): """ Read in a file containing 3 columns of x, y, dy Lines beginning with commentCharacter are ignored """ return read_columnar_data(filename, number_columns=3, comment_character=comment_character)
92fd9253e0b50688034e3d85d6f4589a589be066
3,655,764
def hexlen(x): """ Returns the string length of 'x' in hex format. """ return len(hex(x))+2
404ec4c3656bb35b87df6ae147db93922f2da059
3,655,765
def get_db(): """ connectionを取得します """ if not hasattr(g, 'sqlite_db'): g.sqlite_db = connect_db() return g.sqlite_db
ba3d474ba854d9dea8e8f0056ebbfd81fc86b91a
3,655,766
def list_manipulation(lst, command, location, value=None): """Mutate lst to add/remove from beginning or end. - lst: list of values - command: command, either "remove" or "add" - location: location to remove/add, either "beginning" or "end" - value: when adding, value to add remove: remove item at beginning or end, and return item removed >>> lst = [1, 2, 3] >>> list_manipulation(lst, 'remove', 'end') 3 >>> list_manipulation(lst, 'remove', 'beginning') 1 >>> lst [2] add: add item at beginning/end, and return list >>> lst = [1, 2, 3] >>> list_manipulation(lst, 'add', 'beginning', 20) [20, 1, 2, 3] >>> list_manipulation(lst, 'add', 'end', 30) [20, 1, 2, 3, 30] >>> lst [20, 1, 2, 3, 30] Invalid commands or locations should return None: >>> list_manipulation(lst, 'foo', 'end') is None True >>> list_manipulation(lst, 'add', 'dunno') is None True """ if command == "remove": if location == "end": return lst.pop() elif location == "beginning": return lst.pop(0) elif command == "add": if location == "beginning": lst.insert(0,value) return lst elif location == "end": lst.append(value) return lst
c847257ea5508f60b84282c3ac8237b43cd3825a
3,655,767
import six def solve( problem, comm=_NoArgumentGiven, dispatcher_rank=0, log_filename=None, results_filename=None, **kwds ): """Solves a branch-and-bound problem and returns the solution. Note ---- This function also collects and summarizes runtime workload statistics, which may introduce additional overhead. This overhead can be avoided by directly instantiating a :class:`Solver` object and calling the :func:`Solver.solve` method. Parameters ---------- problem : :class:`pybnb.Problem <pybnb.problem.Problem>` An object that defines a branch-and-bound problem comm : ``mpi4py.MPI.Comm``, optional The MPI communicator to use. If unset, the mpi4py.MPI.COMM_WORLD communicator will be used. Setting this keyword to None will disable the use of MPI and avoid an attempted import of mpi4py.MPI (which avoids triggering a call to `MPI_Init()`). dispatcher_rank : int, optional The process with this rank will be designated the dispatcher process. If MPI functionality is disabled (by setting comm=None, or when comm.size==1), this keyword must be left at 0. (default: 0) log_filename : string, optional A filename where solver output should be sent in addition to console. This keyword will be ignored if the `log` keyword is set. (default: None) results_filename : string, optional Saves the solver results into a YAML-formatted file with the given name. (default: None) **kwds Additional keywords to be passed to :func:`Solver.solve`. See that method for additional keyword documentation. Returns ------- results : :class:`SolverResults <pybnb.solver_results.SolverResults>` An object storing information about the solve. """ opt = Solver(comm=comm, dispatcher_rank=dispatcher_rank) if (opt.is_dispatcher) and ("log" not in kwds) and (log_filename is not None): kwds["log"] = get_simple_logger(filename=log_filename) results = opt.solve(problem, **kwds) stats = opt.collect_worker_statistics() if opt.is_dispatcher: tmp = six.StringIO() summarize_worker_statistics(stats, stream=tmp) opt._disp.log_info(tmp.getvalue()) if opt.is_dispatcher and (results_filename is not None): results.write(results_filename) return results
9c57c0748db0185fae1e731044e904d0f732b5de
3,655,768
def solution(lst): """Given a non-empty list of integers, return the sum of all of the odd elements that are in even positions. Examples solution([5, 8, 7, 1]) ==> 12 solution([3, 3, 3, 3, 3]) ==> 9 solution([30, 13, 24, 321]) ==>0 """ #[SOLUTION] return sum([x for idx, x in enumerate(lst) if idx%2==0 and x%2==1])
f98482cad7061d725389442c9811e33539df4fdc
3,655,769
def summarize_traffic_mix(l_d_flow_records, d_filters={}): """ Filter the traffic flow data and execute the processing analysis logic for network behavior metrics. """ o_tcp_src_analysis = TopProtocolAnalysis() o_tcp_dst_analysis = TopProtocolAnalysis() o_upd_src_analysis = TopProtocolAnalysis() o_upd_dst_analysis = TopProtocolAnalysis() for flow in l_d_flow_records: # print "Flow:", str(flow) if matches_desired_flows(op_src_asn_to_filter, op_dst_asn_to_filter, op_ingress_asn_to_filter, flow, d_filters): # get srcIP and dstIP int_flow_sa = flow['sa'] # get bytes and packets flow_bytes = fputil.record_to_numeric(flow['ibyt']) flow_packets = fputil.record_to_numeric(flow['ipkt']) # get ports and protocol flow_sp = fputil.record_to_numeric(flow['sp']) flow_dp = fputil.record_to_numeric(flow['dp']) str_flow_pr = fputil.proto_int_to_str(flow['pr']) # process and save traffic information per selected L7 protocols and group other using -1 port number if str_flow_pr == "TCP": if flow_sp in cons.d_proto_l7_int_str.keys(): o_tcp_src_analysis.update_port_sum(flow_sp, flow_bytes, flow_packets) o_tcp_src_analysis.update_port_ips_sum(flow_sp, int_flow_sa) else: o_tcp_src_analysis.update_port_sum(-1, flow_bytes, flow_packets) o_tcp_src_analysis.update_port_ips_sum(-1, int_flow_sa) if flow_dp in cons.d_proto_l7_int_str.keys(): o_tcp_dst_analysis.update_port_sum(flow_dp, flow_bytes, flow_packets) else: o_tcp_dst_analysis.update_port_sum(-1, flow_bytes, flow_packets) if str_flow_pr == "UDP": if flow_sp in cons.d_proto_l7_int_str.keys(): o_upd_src_analysis.update_port_sum(flow_sp, flow_bytes, flow_packets) o_upd_src_analysis.update_port_ips_sum(flow_sp, int_flow_sa) else: o_upd_src_analysis.update_port_sum(-1, flow_bytes, flow_packets) o_upd_src_analysis.update_port_ips_sum(-1, int_flow_sa) if flow_dp in cons.d_proto_l7_int_str.keys(): o_upd_dst_analysis.update_port_sum(flow_dp, flow_bytes, flow_packets) else: o_upd_dst_analysis.update_port_sum(-1, flow_bytes, flow_packets) return [o_tcp_src_analysis, o_tcp_dst_analysis, o_upd_src_analysis, o_upd_dst_analysis]
48b04cf0e1e4f8b50850a775994012af4a784728
3,655,770
def segment(X, upscale=1.0, denoise=False): """ :param X: :param upscale: :param denoise: :return: """ if upscale > 1.0: X = rescale(X, upscale) if denoise: X = denoise_wavelet(X) thresh = filters.threshold_otsu(X) bw = closing(X > thresh, square(3)) cleared = clear_border(bw) cleared = rescale(cleared, 1.0 / upscale) return label(cleared)
e81be87bdb27b7cf1cf1de434997a87ecea0cae4
3,655,771
def get_image_info(doc): """Create dictionary with key->id, values->image information """ id_img = dict() #add image information for img_infor in doc['images']: filename = img_infor['file_name'] width = img_infor['width'] height = img_infor['height'] id_img[img_infor['id']] = [filename, width, height] return id_img
b8c91e67572e5863f773db579ce26fa86530f32e
3,655,772
def __check_partial(detected,approx, width, height): """ Check if it's a partial shape It's a partial shape if the shape's contours is on the image's edges. Parameters ---------- detected : Shape The detected shape approx : numpy.ndarray Approximates a polygonal curves. width : int Image's width height : int Image's height Returns ------- detected : Shape The detected shape """ # Checks in the x,y positions of the contours. # The shape is on the image's edges if a point is less than 1 or more than width-1. result = np.where((approx <= 1) | (approx >= width-1)) if(len(result[0]) > 0): #result[0] contain the positions found by np.where. detected = Shape.Shape.PARTIAL.value else: #check if there is a point(X or Y) equals to height or height-1. result = np.where((approx == height) | (approx == height-1)) result = np.where(result[2] == 1) #check if this point is Y. if(len(result[0])>0): detected = Shape.Shape.PARTIAL.value else: detected = None return detected
7808dd156de97fa467b7b471b77fa4abdeaede95
3,655,773
import re def get_svg_size(filename): """return width and height of a svg""" with open(filename) as f: lines = f.read().split('\n') width, height = None, None for l in lines: res = re.findall('<svg.*width="(\d+)pt".*height="(\d+)pt"', l) if len(res) > 0: # need to scale up, maybe due to omni-graffle scale = 2 width = round(scale*float(res[0][0])) height = round(scale*float(res[0][1])) res = re.findall('width="([.\d]+)', l) if len(res) > 0: width = round(float(res[0])) res = re.findall('height="([.\d]+)', l) if len(res) > 0: height = round(float(res[0])) if width is not None and height is not None: return width, height assert False, 'cannot find height and width for ' + filename
7732df636657950b050be409ef2439c975d6940d
3,655,774
def index(): """Video streaming home page which makes use of /mjpeg.""" return render_template('index.html')
2fcc16af5bfc160a71f5eb74d1854b3c7e22587f
3,655,776
def tex_quoted_no_underscore (s) : """Same as tex_quoted but does NOT quote underscores. """ if isinstance (s, pyk.string_types) : s = _tex_pi_symbols.sub (_tex_subs_pi_symbols, s) s = _tex_to_quote.sub (_tex_subs_to_quote, s) s = _tex_tt_symbols.sub (_tex_subs_tt_symbols, s) s = _tex_diacritics.sub (_tex_subs_diacritics, s) return s
96eca3b927e6c7cc84d721222ceb9e9405eb8763
3,655,777
import json def load_from_json_file(filename): """ function that creates an Object from a “JSON file” """ with open(filename, 'r') as f: return json.loads(f.read())
ed46cf62548cfb7e1eb3683b688d18246b34be23
3,655,778
def _variable_to_field(v): """Transform a FuzzyVariable into a restx field""" if isinstance(v.domain, FloatDomain): a, b = v.domain.min, v.domain.max f = fields.Float(description=v.name, required=True, min=a, max=b, example=(a + b) / 2) elif isinstance(v.domain, CategoricalDomain): raise NotImplementedError else: raise ValueError("Unknown domain for variable %s" % v) return v.name, f
c97b25ff0abecedc6f44210d2672422d9c3eefd2
3,655,779
def abs_ang_mom(u, lat=None, radius=RAD_EARTH, rot_rate=ROT_RATE_EARTH, lat_str=LAT_STR): """Absolute angular momentum.""" if lat is None: lat = u[lat_str] coslat = cosdeg(lat) return radius*coslat*(rot_rate*radius*coslat + u)
57525fa5ed995208eced76b74e4263c695340575
3,655,780
def main(): """ Simple pyvmomi (vSphere SDK for Python) script that generates ESXi support bundles running from VCSA using vCenter Alarm """ # Logger for storing vCenter Alarm logs vcAlarmLog = logging.getLogger('vcenter_alarms') vcAlarmLog.setLevel(logging.INFO) vcAlarmLogFile = os.path.join('/var/log', 'vcenter_alarms.log') formatter = logging.Formatter("%(asctime)s;%(levelname)s;%(message)s","%Y-%m-%d %H:%M:%S") vcAlarmLogHandler = logging.FileHandler(vcAlarmLogFile) vcAlarmLogHandler.setFormatter(formatter) vcAlarmLog.addHandler(vcAlarmLogHandler) vcAlarmLog.propagate = False args = get_args() try: si = None try: si = connect.SmartConnect(host=args.host, user=args.user, pwd=args.password, port=int(args.port)) except IOError, e: pass if not si: vcAlarmLog.info("Could not connect to the specified host using specified username and password") print "Could not connect to the specified host using specified username and password" return -1 atexit.register(connect.Disconnect, si) content = si.RetrieveContent() # Get Diag Manager which is used to generate support bundles in VC diagManager = content.diagnosticManager # Extract the vSphere Cluster generated from vCenter Server Alarm cluster = os.environ['VMWARE_ALARM_EVENT_COMPUTERESOURCE'] #cluster = "Non-VSAN-Cluster" if cluster == None: vcAlarmLog.info("Unable to extract vSphere Cluster from VMWARE_ALARM_EVENT_COMPUTERESOURCE") print "Unable to extract vSphere Cluster from VMWARE_ALARM_EVENT_COMPUTERESOURCE" return -1 vcAlarmLog.info("Cluster passed from VC Alarm: " + cluster) # Retrieve all vSphere Clusters container = content.viewManager.CreateContainerView(content.rootFolder, [vim.ClusterComputeResource], True) # Return vSphere Cluster that matches name specified for c in container.view: if c.name == cluster: cluster_view = c break container.Destroy() # Retrieve all ESXi hosts in the vSphere Cluster # to generate log bundles for hosts_to_generate_logs = [] hosts = cluster_view.host for h in hosts: hosts_to_generate_logs.append(h) # Generate log bundle excluding VC logs vcAlarmLog.info("Generating support bundle") print "Generating support bundle" task = diagManager.GenerateLogBundles_Task(includeDefault=False,host=hosts_to_generate_logs) task_done = False result = None while not task_done: if task.info.state == "success": result = task.info.result task_done = True if task.info.state == "error": vcAlarmLog.error("An error occured while generating support logs") print "An error occured while generating support logs" vcAlarmLog.error(task.info) print task.info return -1 task_done = True if task.info.state == "running": time.sleep(60) # Path to which logs will be stored (automatically creating /esxi-support-logs dir) dir = args.filepath + "/esxi-support-logs" try: os.stat(dir) except: vcAlarmLog.info("Creating directory " + dir + " to store support bundle") os.mkdir(dir) # Loop through the result to get the download URL for each # ESXi support bundle and save it to VCSA filesystem for file in result: download_url = file.url download_file = dir + "/vmsupport-" + file.system.name + ".tgz" vcAlarmLog.info("Downloading " + download_url + " to " + download_file) print "Downloading " + download_url + " to " + download_file urllib.urlretrieve(download_url,download_file) except vmodl.MethodFault, e: vcAlarmLog.error("Caught vmodl fault : " + e.msg) print "Caught vmodl fault : " + e.msg return -1 except Exception, e: vcAlarmLog.error("Caught exception : " + str(e)) print "Caught exception : " + str(e) return -1 return 0
2c874bc06072896bb35f0288dd2ef4b5f69fe07f
3,655,781
import collections def _get_ngrams(segment, max_order): """Extracts all n-grams upto a given maximum order from an input segment. Args: segment: text segment from which n-grams will be extracted. max_order: maximum length in tokens of the n-grams returned by this methods. Returns: The Counter containing all n-grams upto max_order in segment with a count of how many times each n-gram occurred. """ ngram_counts = collections.Counter() for order in range(1, max_order + 1): for i in range(0, len(segment) - order + 1): ngram = tuple(segment[i:i + order]) ngram_counts[ngram] += 1 return ngram_counts
561dfe8c18810ce40ce4c0ff391d6838816de116
3,655,782
def print_donors_list(): """ print a list of existing donors """ print(mr.list_donors()) return False
997860c036cac95f73242174198092a1d7d3ea9b
3,655,783
def collimate(S, r, phasevec, print_values = False): """Collimate r phase vectors into a new phase vector on [S]. Output: the collimated phase vector ([b(0),b(1),...,b(L'-1)], L') on [S]. Parameters: S: output phase vectors has all multipliers on [S] r: arity, the number of phase vectors that is collimated phasevec: list of phasevectors to be collimated To be improved: -add scaled interval collimation with modulo measurement """ [b, L] = summate(r, phasevec) # calculate the values of b'(j^vec) in b q = np.floor_divide(b,S) # calculate values of q = floor(b'(j^vec)/S) q_meas = choice(q) # measured value is q_meas # take values of b with q equals the measured value q_meas b_new = np.ma.masked_where(q != q_meas, b).compressed() L_new = len(b_new) b_new = (b_new-b_new[0]) % S # modulo and substract first value to ignore global phase # another equivalent option: b_new = b_new - S*q if print_values: #print("b =", b) #print("q =", q) #print("Measured value q =", q_meas) print(phasevec[0][0], " and ", phasevec[1][0], " collimated into ", b_new) return [b_new, L_new]
1e1eb6c55cd1b51e7303613d581fda97ad14bdb0
3,655,785
import io def parse_and_load(gw, subj, primitive, cgexpr, g): """ Parse the conceptual grammar expression for the supplied subject and, if successful, add it to graph g. :param gw: parser gateway :param subj: subject of expression :param primitive: true means subClassOf, false means equivalentClass :param cgexpr: expression to parse :param g: graph to add the result to :return: true means success, false error """ ttlresult = gw.parse(subj, primitive, cgexpr) if ttlresult: ttlresult = owlbasere.sub(r'\1>', ttlresult) g.parse(io.StringIO(ttlresult), format='n3') return bool(ttlresult)
cd5b1b27b5922fb6c0e377532192a6985a0a5783
3,655,786
def pushed(property_name, **kwargs) -> Signal: """ Returns the `pushed` Signal for the given property. This signal is emitted, when a new child property is added to it. From the perspective of a state, this can be achieved with the `ContextWrapper.push(...)` function.<br> __Hint:__ All key-word arguments of #constraint.s(...) (`min_age`, `max_age`, `detached`) are supported. """ return s(f"{property_name}:pushed", **kwargs)
999e6b20a92648d5042c075400af45c809f08a32
3,655,787
import dask.dataframe as dd def _is_dask_series(ddf): """ Will determine if the given arg is a dask dataframe. Returns False if dask is not installed. """ try: return isinstance(ddf, dd.Series) except: return False
5166928c0bd54bfc69a3d7862fadc41c3a0b6d19
3,655,789
import scipy def square(t, A=1, f=1, D=0): """ t: time A: the amplitude, the peak deviation of the function from zero. f: the ordinary frequency, the number of oscillations (cycles) that occur each second of time. D: non-zero center amplitude """ square_ = A*scipy.signal.square( 2 * np.pi * f * t ) + D return square_
8e1899891d5f0df6c171404c401e94f729233147
3,655,790
def self_distance_array(reference, box=None, result=None, backend="serial"): """Calculate all possible distances within a configuration `reference`. If the optional argument `box` is supplied, the minimum image convention is applied when calculating distances. Either orthogonal or triclinic boxes are supported. If a 1D numpy array of dtype ``numpy.float64`` with the shape ``(n*(n-1)/2,)`` is provided in `result`, then this preallocated array is filled. This can speed up calculations. Parameters ---------- reference : numpy.ndarray Reference coordinate array of shape ``(3,)`` or ``(n, 3)`` (dtype is arbitrary, will be converted to ``numpy.float32`` internally). box : array_like, optional The unitcell dimensions of the system, which can be orthogonal or triclinic and must be provided in the same format as returned by :attr:`MDAnalysis.coordinates.base.Timestep.dimensions`:\n ``[lx, ly, lz, alpha, beta, gamma]``. result : numpy.ndarray, optional Preallocated result array which must have the shape ``(n*(n-1)/2,)`` and dtype ``numpy.float64``. Avoids creating the array which saves time when the function is called repeatedly. backend : {'serial', 'OpenMP'}, optional Keyword selecting the type of acceleration. Returns ------- d : numpy.ndarray (``dtype=numpy.float64``, ``shape=(n*(n-1)/2,)``) Array containing the distances ``dist[i,j]`` between reference coordinates ``i`` and ``j`` at position ``d[k]``. Loop through ``d``: .. code-block:: python for i in range(n): for j in range(i + 1, n): k += 1 dist[i, j] = d[k] .. versionchanged:: 0.13.0 Added *backend* keyword. .. versionchanged:: 0.19.0 Internal dtype conversion of input coordinates to ``numpy.float32``. """ refnum = reference.shape[0] distnum = refnum * (refnum - 1) // 2 distances = _check_result_array(result, (distnum,)) if len(distances) == 0: return distances if box is not None: boxtype, box = check_box(box) if boxtype == 'ortho': _run("calc_self_distance_array_ortho", args=(reference, box, distances), backend=backend) else: _run("calc_self_distance_array_triclinic", args=(reference, box, distances), backend=backend) else: _run("calc_self_distance_array", args=(reference, distances), backend=backend) return distances
71ee400ad48f719316a0c3f3c101f432067e2387
3,655,791
import numpy def mainRecursivePartitioningLoop(A, B, n_cutoff): """ """ # Initialize storage objects n = A.shape[0] groups = numpy.zeros((n,), dtype=int) groups_history = [] counts = {'twoway-single' : 0, 'twoway-pair' : 0, 'threeway-pair' : 0} to_split = {0 : True} # Recursively partition network while numpy.any([v for v in to_split.values()]): for gn in [g for g,v in to_split.items() if v]: # Initialize group info indx = numpy.where(groups==gn)[0] ni = len(indx) #c = numpy.zeros((1,3)) if ni > n_cutoff: # Calc and sort eigenvecs, eigenvalues BtoEigs = LinearOperator((ni, ni), matvec = lambda x: B(x, A, indx), dtype=float) try: if ni > 2: vals, vecs = eigsh(BtoEigs, k=3, which='BE') sort_inds = numpy.argsort(-vals) vals = vals[sort_inds] vecs = vecs[:,sort_inds] else: vals, vecs = eigsh(BtoEigs, k=2, which='LA') sort_inds = numpy.argsort(-vals) vals = vals[sort_inds] vecs = vecs[:,sort_inds] vals = numpy.array([vals[0], vals[1], min(0, vals[1] - 1)]) except ArpackNoConvergence: to_split[gn] = False # Initialize temporary score and groups holders temp_Q = {} temp_C = {} # Leading eignevec 2-way temp_C['twoway-single'] = twoway1(vecs, B, A, indx) temp_Q['twoway-single'] = modularity(temp_C['twoway-single'], B, A, indx) # Convert eigenvecs to vertex vectors mod_factor = numpy.sqrt(vals[:2] - vals[2]) vecs = vecs[:,0:2] * mod_factor # Leading two eigenvec 2-way temp_C['twoway-pair'] = twoway2(vecs, B, A, indx) temp_Q['twoway-pair'] = modularity(temp_C['twoway-pair'], B, A, indx) # # Leading two eigenvec 3-way # temp_C['threeway-pair'] = threewayCoarse(vecs, B, A, indx, 24) # temp_Q['threeway-pair'] = modularity(temp_C['threeway-pair'], # B, A, indx) # # Determine best Score, Grouping best_split_ind = [k for k in temp_Q.keys()]\ [numpy.where(list(temp_Q.values())==max(temp_Q.values()))[0][0]] best_Q = temp_Q[best_split_ind] best_C = temp_C[best_split_ind] # Update master group store, info regarding availalbe splitting if (best_Q > 0) and (max(best_C) - min(best_C) > 0): counts[best_split_ind] += 1 g0 = numpy.array(best_C)==0 g1 = numpy.array(best_C)==1 g2 = numpy.array(best_C)==2 max_gn = max(groups) groups[indx[g1]] = max_gn + 1 groups[indx[g2]] = max_gn + 2 to_split[gn] = sum(g0) > 2 to_split[max_gn + 1] = sum(g1) > 2 to_split[max_gn + 2] = sum(g2) > 2 groups_history.append(groups.copy()) else: to_split[gn] = False else: to_split[gn] = False groups_history = numpy.array(groups_history).T return(groups, counts, groups_history)
e2983585825f068ce1bdcc26dfd91dd85be2e060
3,655,792
def corrSmatFunc(df, metric='pearson-signed', simFunc=None, minN=None): """Compute a pairwise correlation matrix and return as a similarity matrix. Parameters ---------- df : pd.DataFrame (n_instances, n_features) metric : str Method for correlation similarity: pearson or spearman, optionally "signed" (e.g. pearson-signed) A "signed" similarity means that anti-correlated instances will have low similarity. simFunc : function Optionally supply an arbitrary distance function. Function takes two instances and returns their distance. minN : int Minimum number of non-NA values in order for correlation to be non-NA. Returns ------- smatDf : pd.DataFrame (n_instances, n_instances)""" if minN is None: minN = df.shape[0] if simFunc is None: if metric in ['spearman', 'pearson']: """Anti-correlations are also considered as high similarity and will cluster together""" smat = df.corr(method=metric, min_periods=minN).values**2 smat[np.isnan(smat)] = 0 elif metric in ['spearman-signed', 'pearson-signed']: """Anti-correlations are considered as dissimilar and will NOT cluster together""" smat = df.corr(method=metric.replace('-signed', ''), min_periods=minN).values smat = (smat**2 * np.sign(smat) + 1)/2 smat[np.isnan(smat)] = 0 else: raise NameError('metric name not recognized') else: ncols = df.shape[1] smat = np.zeros((ncols, ncols)) for i in range(ncols): for j in range(ncols): """Assume distance is symetric""" if i <= j: tmpdf = df.iloc[:, [i, j]] tmpdf = tmpdf.dropna() if tmpdf.shape[0] >= minN: d = simFunc(df.iloc[:, i], df.iloc[:, j]) else: d = np.nan smat[i, j] = d smat[j, i] = d return pd.DataFrame(smat, columns=df.columns, index=df.columns)
3d8d3ad9c992f1f1518c8fc7699058e76f616c95
3,655,793
def rank(values, axis=0, method='average', na_option='keep', ascending=True, pct=False): """ """ if values.ndim == 1: f, values = _get_data_algo(values, _rank1d_functions) ranks = f(values, ties_method=method, ascending=ascending, na_option=na_option, pct=pct) elif values.ndim == 2: f, values = _get_data_algo(values, _rank2d_functions) ranks = f(values, axis=axis, ties_method=method, ascending=ascending, na_option=na_option, pct=pct) return ranks
0dbdb923281f7dbf592cd7bd41615b235b0e0868
3,655,794
def _cmpopts(x, y): """Compare to option names. The options can be of 2 forms: option_name or group/option_name. Options without a group always comes first. Options are sorted alphabetically inside a group. """ if '/' in x and '/' in y: prex = x[:x.find('/')] prey = y[:y.find('/')] if prex != prey: return cmp(prex, prey) return cmp(x, y) elif '/' in x: return 1 elif '/' in y: return -1 else: return cmp(x, y)
9da8f8f5666b2ea3f32eb092c6a3568947655400
3,655,795
def ask(question, choices): """Prompt user for a choice from a list. Return the choice.""" choices_lc = [x.lower() for x in choices] user_choice = "" match = False while not match: print question user_choice = raw_input("[" + "/".join(choices) + "] ? ").strip().lower() for choice in choices_lc: if user_choice.startswith(choice): match = True break return user_choice
8a1f6019554dbb9e1ed6649b1a68040f99960fbe
3,655,796
def get_and_validate_user(username, password): """ Check if user with username/email exists and specified password matchs well with existing user password. if user is valid, user is returned else, corresponding exception is raised. """ user_model = apps.get_model("users", "User") qs = user_model.objects.filter(Q(username=username) | Q(email=username)) if len(qs) == 0: raise WrongArguments("Username or password does not matches user.") user = qs[0] if not user.check_password(password): raise WrongArguments("Username or password does not matches user.") return user
05b6675c12446e961d85b8c39b0437d51a7c40b8
3,655,797