hexsha
stringlengths
40
40
size
int64
5
2.06M
ext
stringclasses
10 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
3
248
max_stars_repo_name
stringlengths
5
125
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
sequencelengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
3
248
max_issues_repo_name
stringlengths
5
125
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
sequencelengths
1
10
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
3
248
max_forks_repo_name
stringlengths
5
125
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
sequencelengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
5
2.06M
avg_line_length
float64
1
1.02M
max_line_length
int64
3
1.03M
alphanum_fraction
float64
0
1
count_classes
int64
0
1.6M
score_classes
float64
0
1
count_generators
int64
0
651k
score_generators
float64
0
1
count_decorators
int64
0
990k
score_decorators
float64
0
1
count_async_functions
int64
0
235k
score_async_functions
float64
0
1
count_documentation
int64
0
1.04M
score_documentation
float64
0
1
b9a5362ea01805df4bb2ad83d0b9f037b0c75078
481
py
Python
lib/fmdplugins/list_records.py
GonzaloAlvarez/py-ga-sysadmin
fbbbbcad36df9f1b3e40328ff48c22bad13a56f4
[ "MIT" ]
2
2018-01-05T15:32:06.000Z
2021-06-02T13:15:05.000Z
lib/fmdplugins/list_records.py
GonzaloAlvarez/devops-tools
fbbbbcad36df9f1b3e40328ff48c22bad13a56f4
[ "MIT" ]
67
2017-01-09T19:39:19.000Z
2018-02-28T05:33:40.000Z
lib/fmdplugins/list_records.py
GonzaloAlvarez/devops-tools
fbbbbcad36df9f1b3e40328ff48c22bad13a56f4
[ "MIT" ]
null
null
null
from lib.fmd.namedentity import NamedEntity from lib.fmd.decorators import Action, ListStage, GetStage from lib.exceptions.workflow import EntryException @Action(ListStage.DATAGATHERING) def list_records(context, output): output = [] if hasattr(context, 'filter'): context.log.debug('Using filter [%s]' % context.filter) entries = context.ddb.list(context.filter) else: entries = context.ddb.list() return NamedEntity('records', entries)
30.0625
63
0.719335
0
0
0
0
325
0.675676
0
0
36
0.074844
b9a5aa9a635301ab37ae92c6395e50231bd81a4b
6,033
py
Python
pysoa/server/action/switched.py
zetahernandez/pysoa
006e55ba877196a42c64f2ff453583d366082d55
[ "Apache-2.0" ]
null
null
null
pysoa/server/action/switched.py
zetahernandez/pysoa
006e55ba877196a42c64f2ff453583d366082d55
[ "Apache-2.0" ]
null
null
null
pysoa/server/action/switched.py
zetahernandez/pysoa
006e55ba877196a42c64f2ff453583d366082d55
[ "Apache-2.0" ]
null
null
null
from __future__ import ( absolute_import, unicode_literals, ) import abc import six from pysoa.server.internal.types import is_switch __all__ = ( 'SwitchedAction', ) def _len(item): # Safe length that won't raise an error on values that don't support length return getattr(item, '__len__', lambda *_: -1)() class _DefaultAction(object): def __int__(self): d = id(self) return d if d < 0 else -d def __eq__(self, other): return getattr(other, '__class__', None) == _DefaultAction class _SwitchedActionMetaClass(abc.ABCMeta): def __new__(mcs, name, bases, body): """ Validate the switch_to_action_map when the class is created, instead of doing it every time the class is instantiated. This identifies problems earlier (on import) and improves performance by not performing this validation every time the action is called. """ cls = super(_SwitchedActionMetaClass, mcs).__new__(mcs, name, bases, body) # noinspection PyUnresolvedReferences if bases[0] is not object and ( not cls.switch_to_action_map or not hasattr(cls.switch_to_action_map, '__iter__') or _len(cls.switch_to_action_map) < 2 or any( True for i in cls.switch_to_action_map if not hasattr(i, '__getitem__') or _len(i) != 2 or not is_switch(i[0]) or not callable(i[1]) ) ): raise ValueError( 'Class attribute switch_to_action_map must be an iterable of at least two indexable items, each ' 'with exactly two indexes, where the first element is a switch and the second element is an action ' '(callable).' ) return cls @six.add_metaclass(_SwitchedActionMetaClass) class SwitchedAction(object): """ A specialized action that defers to other, concrete actions based on request switches. Subclasses must not override any methods and must override `switch_to_action_map`. `switch_to_action_map` should be some iterable object that provides `__len__` (such as a tuple [recommended] or list). Its items must be indexable objects that provide `__len__` (such as a tuple [recommended] or list) and have exactly two elements. For each item in `switch_to_action_map`, the first element must be a switch that provides `__int__` (such as an actual integer) or a switch that provides an attribute `value` which, itself, provides `__int__` (or is an int). The second element must be an action, such as an action class (e.g. one that extends `Action`) or any callable that accepts a server settings object and returns a new callable that, itself, accepts an `ActionRequest` object and returns an `ActionResponse` object or raises an `ActionError`. `switch_to_action_map` must have at least two items in it. `SwitchedAction` will iterate over that list, checking the first element (switch) of each item to see if it is enabled in the request. If it is, the second element (the action) of that item will be deferred to. If it finds no items whose switches are enabled, it will use the very last action in `switch_to_action_map`. As such, you can treat the last item as a default, and its switch could simply be `SwitchedAction.DEFAULT_ACTION` (although, this is not required: it could also be a valid switch, and it would still be treated as the default in the case that no other items matched). Example usage: .. code-block:: python class UserActionV1(Action): ... class UserActionV2(Action): ... class UserTransitionAction(SwitchedAction): switch_to_action_map = ( (USER_VERSION_2_ENABLED, UserActionV2), (SwitchedAction.DEFAULT_ACTION, UserActionV1), ) """ DEFAULT_ACTION = _DefaultAction() switch_to_action_map = () def __init__(self, settings=None): """ Construct a new action. Concrete classes should not override this. :param settings: The server settings object :type settings: dict """ if self.__class__ is SwitchedAction: raise TypeError('Cannot instantiate abstract SwitchedAction') self.settings = settings def get_uninitialized_action(self, action_request): """ Get the raw action (such as the action class or the base action callable) without instantiating/calling it, based on the switches in the action request, or the default raw action if no switches were present or no switches matched. :param action_request: The request object :type action_request: EnrichedActionRequest :return: The action :rtype: callable """ last_action = None matched_action = None default_action = None for switch, action in self.switch_to_action_map: if switch == self.DEFAULT_ACTION: default_action = action elif switch and action_request.switches.is_active(switch): matched_action = action break else: last_action = action return matched_action or default_action or last_action def __call__(self, action_request): """ Main entry point for actions from the `Server` (or potentially from tests). Finds the appropriate real action to invoke based on the switches enabled in the request, initializes the action with the server settings, and then calls the action with the request object, returning its response directly. :param action_request: The request object :type action_request: EnrichedActionRequest :return: The response object :rtype: ActionResponse :raise: ActionError, ResponseValidationError """ return self.get_uninitialized_action(action_request)(self.settings)(action_request)
38.673077
117
0.673463
5,645
0.935687
0
0
4,236
0.702138
0
0
3,914
0.648765
b9a6e1263697c6f30d94bde78d6313fed9c57e76
542
py
Python
Seeder/settings/tests.py
WebarchivCZ/Seeder
1958c5d3f6bdcbbdb2c81dcb6abc7f689125b6a8
[ "MIT" ]
8
2017-08-16T19:18:57.000Z
2022-01-24T10:08:19.000Z
Seeder/settings/tests.py
WebarchivCZ/Seeder
1958c5d3f6bdcbbdb2c81dcb6abc7f689125b6a8
[ "MIT" ]
242
2017-02-03T19:15:52.000Z
2022-03-25T08:02:52.000Z
Seeder/settings/tests.py
WebarchivCZ/Seeder
1958c5d3f6bdcbbdb2c81dcb6abc7f689125b6a8
[ "MIT" ]
2
2019-03-06T12:36:29.000Z
2019-07-08T12:52:20.000Z
from .base import * SECRET_KEY = 'test' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True TEMPLATE_DEBUG = True ALLOWED_HOSTS = ['127.0.0.1'] DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': 'sqlite3.db', 'USER': '', 'PASSWORD': '', 'HOST': '', }, } EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' HAYSTACK_CONNECTIONS = { 'default': { 'ENGINE': 'haystack.backends.simple_backend.SimpleEngine', }, }
19.357143
66
0.605166
0
0
0
0
0
0
0
0
285
0.52583
b9a767c55418efb8b98d12205d59e512ca419081
1,860
py
Python
blobStore.py
odeke-em/resty
838934033e7eeca521e8c6d8cb2e99778beaa4b9
[ "Apache-2.0" ]
null
null
null
blobStore.py
odeke-em/resty
838934033e7eeca521e8c6d8cb2e99778beaa4b9
[ "Apache-2.0" ]
null
null
null
blobStore.py
odeke-em/resty
838934033e7eeca521e8c6d8cb2e99778beaa4b9
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python3 # Author: Emmanuel Odeke <[email protected]> # This example steps you through using resty & restAssured to save pickled/serialized # data as a blob and then later re-using it in after deserialization. # Sample usage might be in collaborative computing ie publish results from an expensive # computation on one machine so that other machines can load it as live data. def testSerializer(): import Serializer bs = Serializer.BinarySerializer() js = Serializer.JSONSerializer() data = dict((i, i) for i in range(10)) bserial = bs.serialize(data) jserial = js.serialize(data) bdserial = bs.deserialize(bserial) jdserial = js.deserialize(jserial) print('bdserial', bdserial) ioS = bs.ioStream(bserial) ioR = ioS.read() print('ioS data from the stream', ioR) def testCloudPassagePickledVersion(): from entrails.cloudPassage import CloudPassageHandler cc = CloudPassageHandler() data = dict((i, i*10) for i in range(9)) title = 'Dict of items 0-8999, keys i*10' res = cc.push(data, title=title, asPickle=True) pulledObj = cc.pull(metaData='pickle') print('PulledObj', pulledObj, data) assert(pulledObj == data) rmTry = cc.removeTrace(data, asPickle=True) print(rmTry) def testCloudPassageJSONVersion(): from entrails.cloudPassage import CloudPassageHandler cc = CloudPassageHandler() data = dict((str(i), i*10) for i in range(9)) title = 'Dict of items 0-8999, keys i*10' res = cc.push(data, title=title, asPickle=False) pulledObj = cc.pull(metaData='json') print('PulledObj', pulledObj, data) assert(pulledObj == data) rmTry = cc.removeTrace(data) print(rmTry) def main(): testSerializer() testCloudPassageJSONVersion() testCloudPassagePickledVersion() if __name__ == '__main__': main()
31
87
0.7
0
0
0
0
0
0
0
0
532
0.286022
b9a7d3f5b98af28c51ffb55578408fad9a1d3f99
3,066
py
Python
venv/Lib/site-packages/dataframe/_dataframe_column_set.py
kavanAdeshara/Expense_Tracker
b3e4810e858a7786e05cda6b91ba674b73b87981
[ "Apache-2.0" ]
null
null
null
venv/Lib/site-packages/dataframe/_dataframe_column_set.py
kavanAdeshara/Expense_Tracker
b3e4810e858a7786e05cda6b91ba674b73b87981
[ "Apache-2.0" ]
null
null
null
venv/Lib/site-packages/dataframe/_dataframe_column_set.py
kavanAdeshara/Expense_Tracker
b3e4810e858a7786e05cda6b91ba674b73b87981
[ "Apache-2.0" ]
null
null
null
# dataframe: a data-frame implementation using method piping # # Copyright (C) 2016 Simon Dirmeier # # This file is part of dataframe. # # dataframe is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # dataframe is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with dataframe. If not, see <http://www.gnu.org/licenses/>. # # # @author = 'Simon Dirmeier' # @email = '[email protected]' from itertools import chain import tabulate from ._dataframe_column import DataFrameColumn from ._dataframe_row import DataFrameRow class DataFrameColumnSet: def __init__(self, **kwargs): self.__data_columns = [] self.__nrow = -1 self.cbind(**kwargs) def __getitem__(self, item): if isinstance(item, int): return self.__data_columns[item] raise ValueError("Item should be integer!") def __iter__(self): for col in self.__data_columns: yield col def __str__(self): stri = "\nA dataframe" ta = [] for col in self.__data_columns: vals = col.values if len(vals) > 10: vals = list(chain(vals[:3], "...", vals[-3:])) ta.append(vals) ta = tabulate.tabulate(zip(*ta), headers=self.colnames) return stri + "\n\n" + ta.__str__() @property def nrow(self): return self.__nrow @property def ncol(self): return len(self.colnames) @property def colnames(self): return [x.colname for x in self.__data_columns] def rows(self, idxs): return [self.row(i) for i in idxs] def row(self, idx): """ Returns DataFrameRow of the DataFrame given its index. :param idx: the index of the row in the DataFrame. :return: returns a DataFrameRow """ return DataFrameRow(idx, [x[idx] for x in self], self.colnames) def which_colnames(self, *args): idx = [] for i in range(len(self.__data_columns)): if self.colnames[i] in args: idx.append(i) return idx def cbind(self, **columns): keys = sorted([x for x in columns.keys()]) for k in keys: self.__cbind(DataFrameColumn(str(k), columns.get(k))) def __cbind(self, column): if column.colname in self.colnames: ValueError("Appending duplicate col-name!") self.__data_columns.append(column) self.__nrow = self.__data_columns[-1].size() for col in self.__data_columns: if col.size() != self.__nrow: raise ValueError("Columns do not have equal lengths!")
30.356436
71
0.63242
2,098
0.684279
81
0.026419
208
0.067841
0
0
1,105
0.360404
b9a7d44b00e1b419e797c8637498d8abc23d4def
13,322
bzl
Python
java/image.bzl
Springworks/rules_docker
b943cd1fe3bf1c6c5fdac1889e952408599cffff
[ "Apache-2.0" ]
null
null
null
java/image.bzl
Springworks/rules_docker
b943cd1fe3bf1c6c5fdac1889e952408599cffff
[ "Apache-2.0" ]
null
null
null
java/image.bzl
Springworks/rules_docker
b943cd1fe3bf1c6c5fdac1889e952408599cffff
[ "Apache-2.0" ]
null
null
null
# Copyright 2017 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A rule for creating a Java container image. The signature of java_image is compatible with java_binary. The signature of war_image is compatible with java_library. """ load( "//container:container.bzl", "container_pull", _repositories = "repositories", ) # Load the resolved digests. load( ":java.bzl", _JAVA_DIGESTS = "DIGESTS", ) load( ":jetty.bzl", _JETTY_DIGESTS = "DIGESTS", ) def repositories(): # Call the core "repositories" function to reduce boilerplate. # This is idempotent if folks call it themselves. _repositories() excludes = native.existing_rules().keys() if "java_image_base" not in excludes: container_pull( name = "java_image_base", registry = "gcr.io", repository = "distroless/java", digest = _JAVA_DIGESTS["latest"], ) if "java_debug_image_base" not in excludes: container_pull( name = "java_debug_image_base", registry = "gcr.io", repository = "distroless/java", digest = _JAVA_DIGESTS["debug"], ) if "jetty_image_base" not in excludes: container_pull( name = "jetty_image_base", registry = "gcr.io", repository = "distroless/java/jetty", digest = _JETTY_DIGESTS["latest"], ) if "jetty_debug_image_base" not in excludes: container_pull( name = "jetty_debug_image_base", registry = "gcr.io", repository = "distroless/java/jetty", digest = _JETTY_DIGESTS["debug"], ) if "servlet_api" not in excludes: native.maven_jar( name = "javax_servlet_api", artifact = "javax.servlet:javax.servlet-api:3.0.1", ) DEFAULT_JAVA_BASE = select({ "@io_bazel_rules_docker//:fastbuild": "@java_image_base//image", "@io_bazel_rules_docker//:debug": "@java_debug_image_base//image", "@io_bazel_rules_docker//:optimized": "@java_image_base//image", "//conditions:default": "@java_image_base//image", }) DEFAULT_JETTY_BASE = select({ "@io_bazel_rules_docker//:fastbuild": "@jetty_image_base//image", "@io_bazel_rules_docker//:debug": "@jetty_debug_image_base//image", "@io_bazel_rules_docker//:optimized": "@jetty_image_base//image", "//conditions:default": "@jetty_image_base//image", }) load( "//container:container.bzl", _container = "container", ) def java_files(f): files = [] if java_common.provider in f: java_provider = f[java_common.provider] files += list(java_provider.transitive_runtime_jars) if hasattr(f, "files"): # a jar file files += list(f.files) return files load( "//lang:image.bzl", "dep_layer_impl", "layer_file_path", ) def _jar_dep_layer_impl(ctx): """Appends a layer for a single dependency's runfiles.""" return dep_layer_impl(ctx, runfiles = java_files) jar_dep_layer = rule( attrs = dict(_container.image.attrs.items() + { # The base image on which to overlay the dependency layers. "base": attr.label(mandatory = True), # The dependency whose runfiles we're appending. "dep": attr.label(mandatory = True), # Whether to lay out each dependency in a manner that is agnostic # of the binary in which it is participating. This can increase # sharing of the dependency's layer across images, but requires a # symlink forest in the app layers. "agnostic_dep_layout": attr.bool(default = True), # Override the defaults. "directory": attr.string(default = "/app"), # https://github.com/bazelbuild/bazel/issues/2176 "data_path": attr.string(default = "."), }.items()), executable = True, outputs = _container.image.outputs, implementation = _jar_dep_layer_impl, ) def _jar_app_layer_impl(ctx): """Appends the app layer with all remaining runfiles.""" available = depset() for jar in ctx.attr.jar_layers: available += java_files(jar) # We compute the set of unavailable stuff by walking deps # in the same way, adding in our binary and then subtracting # out what it available. unavailable = depset() for jar in ctx.attr.deps + ctx.attr.runtime_deps: unavailable += java_files(jar) unavailable += java_files(ctx.attr.binary) unavailable = [x for x in unavailable if x not in available] classpath = ":".join([ layer_file_path(ctx, x) for x in available + unavailable ]) # Classpaths can grow long and there is a limit on the length of a # command line, so mitigate this by always writing the classpath out # to a file instead. classpath_file = ctx.new_file(ctx.attr.name + ".classpath") ctx.actions.write(classpath_file, classpath) binary_path = layer_file_path(ctx, ctx.files.binary[0]) classpath_path = layer_file_path(ctx, classpath_file) entrypoint = [ "/usr/bin/java", "-cp", # Support optionally passing the classpath as a file. "@" + classpath_path if ctx.attr._classpath_as_file else classpath, ] + ctx.attr.jvm_flags + [ctx.attr.main_class] + ctx.attr.args file_map = { layer_file_path(ctx, f): f for f in unavailable + [classpath_file] } return _container.image.implementation( ctx, # We use all absolute paths. directory = "/", file_map = file_map, entrypoint = entrypoint, ) jar_app_layer = rule( attrs = dict(_container.image.attrs.items() + { # The binary target for which we are synthesizing an image. "binary": attr.label(mandatory = True), # The full list of dependencies that have their own layers # factored into our base. "jar_layers": attr.label_list(), # The rest of the dependencies. "deps": attr.label_list(), "runtime_deps": attr.label_list(), "jvm_flags": attr.string_list(), # The base image on which to overlay the dependency layers. "base": attr.label(mandatory = True), # The main class to invoke on startup. "main_class": attr.string(mandatory = True), # Whether to lay out each dependency in a manner that is agnostic # of the binary in which it is participating. This can increase # sharing of the dependency's layer across images, but requires a # symlink forest in the app layers. "agnostic_dep_layout": attr.bool(default = True), # Whether the classpath should be passed as a file. "_classpath_as_file": attr.bool(default = False), # Override the defaults. "directory": attr.string(default = "/app"), # https://github.com/bazelbuild/bazel/issues/2176 "data_path": attr.string(default = "."), "legacy_run_behavior": attr.bool(default = False), }.items()), executable = True, outputs = _container.image.outputs, implementation = _jar_app_layer_impl, ) def java_image( name, base = None, main_class = None, deps = [], runtime_deps = [], layers = [], jvm_flags = [], **kwargs): """Builds a container image overlaying the java_binary. Args: layers: Augments "deps" with dependencies that should be put into their own layers. **kwargs: See java_binary. """ binary_name = name + ".binary" native.java_binary( name = binary_name, main_class = main_class, # If the rule is turning a JAR built with java_library into # a binary, then it will appear in runtime_deps. We are # not allowed to pass deps (even []) if there is no srcs # kwarg. deps = (deps + layers) or None, runtime_deps = runtime_deps, jvm_flags = jvm_flags, **kwargs ) base = base or DEFAULT_JAVA_BASE for index, dep in enumerate(layers): this_name = "%s.%d" % (name, index) jar_dep_layer(name = this_name, base = base, dep = dep) base = this_name visibility = kwargs.get("visibility", None) jar_app_layer( name = name, base = base, binary = binary_name, main_class = main_class, jvm_flags = jvm_flags, deps = deps, runtime_deps = runtime_deps, jar_layers = layers, visibility = visibility, args = kwargs.get("args"), ) def _war_dep_layer_impl(ctx): """Appends a layer for a single dependency's runfiles.""" # TODO(mattmoor): Today we run the risk of filenames colliding when # they get flattened. Instead of just flattening and using basename # we should use a file_map based scheme. return _container.image.implementation( ctx, files = java_files(ctx.attr.dep), ) _war_dep_layer = rule( attrs = dict(_container.image.attrs.items() + { # The base image on which to overlay the dependency layers. "base": attr.label(mandatory = True), # The dependency whose runfiles we're appending. "dep": attr.label(mandatory = True), # Whether to lay out each dependency in a manner that is agnostic # of the binary in which it is participating. This can increase # sharing of the dependency's layer across images, but requires a # symlink forest in the app layers. "agnostic_dep_layout": attr.bool(default = True), # Override the defaults. "directory": attr.string(default = "/jetty/webapps/ROOT/WEB-INF/lib"), # WE WANT PATHS FLATTENED # "data_path": attr.string(default = "."), }.items()), executable = True, outputs = _container.image.outputs, implementation = _war_dep_layer_impl, ) def _war_app_layer_impl(ctx): """Appends the app layer with all remaining runfiles.""" available = depset() for jar in ctx.attr.jar_layers: available += java_files(jar) # This is based on rules_appengine's WAR rules. transitive_deps = depset() transitive_deps += java_files(ctx.attr.library) # TODO(mattmoor): Handle data files. # If we start putting libs in servlet-agnostic paths, # then consider adding symlinks here. files = [d for d in transitive_deps if d not in available] return _container.image.implementation(ctx, files = files) _war_app_layer = rule( attrs = dict(_container.image.attrs.items() + { # The library target for which we are synthesizing an image. "library": attr.label(mandatory = True), # The full list of dependencies that have their own layers # factored into our base. "jar_layers": attr.label_list(), # The base image on which to overlay the dependency layers. "base": attr.label(mandatory = True), "entrypoint": attr.string_list(default = []), # Whether to lay out each dependency in a manner that is agnostic # of the binary in which it is participating. This can increase # sharing of the dependency's layer across images, but requires a # symlink forest in the app layers. "agnostic_dep_layout": attr.bool(default = True), # Override the defaults. "directory": attr.string(default = "/jetty/webapps/ROOT/WEB-INF/lib"), # WE WANT PATHS FLATTENED # "data_path": attr.string(default = "."), "legacy_run_behavior": attr.bool(default = False), }.items()), executable = True, outputs = _container.image.outputs, implementation = _war_app_layer_impl, ) def war_image(name, base = None, deps = [], layers = [], **kwargs): """Builds a container image overlaying the java_library as an exploded WAR. TODO(mattmoor): For `bazel run` of this to be useful, we need to be able to ctrl-C it and have the container actually terminate. More information: https://github.com/bazelbuild/bazel/issues/3519 Args: layers: Augments "deps" with dependencies that should be put into their own layers. **kwargs: See java_library. """ library_name = name + ".library" native.java_library(name = library_name, deps = deps + layers, **kwargs) base = base or DEFAULT_JETTY_BASE for index, dep in enumerate(layers): this_name = "%s.%d" % (name, index) _war_dep_layer(name = this_name, base = base, dep = dep) base = this_name visibility = kwargs.get("visibility", None) tags = kwargs.get("tags", None) _war_app_layer( name = name, base = base, library = library_name, jar_layers = layers, visibility = visibility, tags = tags, )
34.246787
79
0.642546
0
0
0
0
0
0
0
0
6,215
0.466522
b9a831ae9aec7e87ced37e12721727df9e75bb48
17,427
py
Python
cupyx/jit/_builtin_funcs.py
khushi-411/cupy
b5221a478c800c5e60eef65545467de9eb00c0d9
[ "MIT" ]
null
null
null
cupyx/jit/_builtin_funcs.py
khushi-411/cupy
b5221a478c800c5e60eef65545467de9eb00c0d9
[ "MIT" ]
null
null
null
cupyx/jit/_builtin_funcs.py
khushi-411/cupy
b5221a478c800c5e60eef65545467de9eb00c0d9
[ "MIT" ]
null
null
null
import warnings import cupy from cupy_backends.cuda.api import runtime from cupy.cuda import device from cupyx.jit import _cuda_types from cupyx.jit._internal_types import BuiltinFunc from cupyx.jit._internal_types import Data from cupyx.jit._internal_types import Constant from cupyx.jit._internal_types import Range from cupyx.jit import _compile from functools import reduce class RangeFunc(BuiltinFunc): def __call__(self, *args, unroll=None): """Range with loop unrolling support. Args: start (int): Same as that of built-in :obj:`range`. stop (int): Same as that of built-in :obj:`range`. step (int): Same as that of built-in :obj:`range`. unroll (int or bool or None): - If `True`, add ``#pragma unroll`` directive before the loop. - If `False`, add ``#pragma unroll(1)`` directive before the loop to disable unrolling. - If an `int`, add ``#pragma unroll(n)`` directive before the loop, where the integer ``n`` means the number of iterations to unroll. - If `None` (default), leave the control of loop unrolling to the compiler (no ``#pragma``). .. seealso:: `#pragma unroll`_ .. _#pragma unroll: https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#pragma-unroll """ super().__call__() def call(self, env, *args, unroll=None): if len(args) == 0: raise TypeError('range expected at least 1 argument, got 0') elif len(args) == 1: start, stop, step = Constant(0), args[0], Constant(1) elif len(args) == 2: start, stop, step = args[0], args[1], Constant(1) elif len(args) == 3: start, stop, step = args else: raise TypeError( f'range expected at most 3 argument, got {len(args)}') if unroll is not None: if not all(isinstance(x, Constant) for x in (start, stop, step, unroll)): raise TypeError( 'loop unrolling requires constant start, stop, step and ' 'unroll value') unroll = unroll.obj if not (isinstance(unroll, int) or isinstance(unroll, bool)): raise TypeError( 'unroll value expected to be of type int, ' f'got {type(unroll).__name__}') if unroll is False: unroll = 1 if not (unroll is True or 0 < unroll < 1 << 31): warnings.warn( 'loop unrolling is ignored as the unroll value is ' 'non-positive or greater than INT_MAX') if isinstance(step, Constant): step_is_positive = step.obj >= 0 elif step.ctype.dtype.kind == 'u': step_is_positive = True else: step_is_positive = None stop = Data.init(stop, env) start = Data.init(start, env) step = Data.init(step, env) if start.ctype.dtype.kind not in 'iu': raise TypeError('range supports only for integer type.') if stop.ctype.dtype.kind not in 'iu': raise TypeError('range supports only for integer type.') if step.ctype.dtype.kind not in 'iu': raise TypeError('range supports only for integer type.') if env.mode == 'numpy': ctype = _cuda_types.Scalar(int) elif env.mode == 'cuda': ctype = stop.ctype else: assert False return Range(start, stop, step, ctype, step_is_positive, unroll=unroll) class LenFunc(BuiltinFunc): def call(self, env, *args, **kwds): if len(args) != 1: raise TypeError(f'len() expects only 1 argument, got {len(args)}') if kwds: raise TypeError('keyword arguments are not supported') arg = args[0] if not isinstance(arg.ctype, _cuda_types.CArray): raise TypeError('len() supports only array type') if not arg.ctype.ndim: raise TypeError('len() of unsized array') return Data(f'static_cast<long long>({arg.code}.shape()[0])', _cuda_types.Scalar('q')) class MinFunc(BuiltinFunc): def call(self, env, *args, **kwds): if len(args) < 2: raise TypeError( f'min() expects at least 2 arguments, got {len(args)}') if kwds: raise TypeError('keyword arguments are not supported') return reduce(lambda a, b: _compile._call_ufunc( cupy.minimum, (a, b), None, env), args) class MaxFunc(BuiltinFunc): def call(self, env, *args, **kwds): if len(args) < 2: raise TypeError( f'max() expects at least 2 arguments, got {len(args)}') if kwds: raise TypeError('keyword arguments are not supported') return reduce(lambda a, b: _compile._call_ufunc( cupy.maximum, (a, b), None, env), args) class SyncThreads(BuiltinFunc): def __call__(self): """Calls ``__syncthreads()``. .. seealso:: `Synchronization functions`_ .. _Synchronization functions: https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#synchronization-functions """ super().__call__() def call_const(self, env): return Data('__syncthreads()', _cuda_types.void) class SyncWarp(BuiltinFunc): def __call__(self, *, mask=0xffffffff): """Calls ``__syncwarp()``. Args: mask (int): Active threads in a warp. Default is 0xffffffff. .. seealso:: `Synchronization functions`_ .. _Synchronization functions: https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#synchronization-functions """ super().__call__() def call(self, env, *, mask=None): if runtime.is_hip: if mask is not None: warnings.warn(f'mask {mask} is ignored on HIP', RuntimeWarning) mask = None if mask: if isinstance(mask, Constant): if not (0x0 <= mask.obj <= 0xffffffff): raise ValueError('mask is out of range') mask = _compile._astype_scalar( mask, _cuda_types.int32, 'same_kind', env) mask = Data.init(mask, env) code = f'__syncwarp({mask.code})' else: code = '__syncwarp()' return Data(code, _cuda_types.void) class SharedMemory(BuiltinFunc): def __call__(self, dtype, size, alignment=None): """Allocates shared memory and returns it as a 1-D array. Args: dtype (dtype): The dtype of the returned array. size (int or None): If ``int`` type, the size of static shared memory. If ``None``, declares the shared memory with extern specifier. alignment (int or None): Enforce the alignment via __align__(N). """ super().__call__() def call_const(self, env, dtype, size, alignment=None): name = env.get_fresh_variable_name(prefix='_smem') child_type = _cuda_types.Scalar(dtype) while env[name] is not None: name = env.get_fresh_variable_name(prefix='_smem') # retry var = Data(name, _cuda_types.SharedMem(child_type, size, alignment)) env.decls[name] = var env.locals[name] = var return Data(name, _cuda_types.Ptr(child_type)) class AtomicOp(BuiltinFunc): def __init__(self, op, dtypes): self._op = op self._name = 'atomic' + op self._dtypes = dtypes doc = f"""Calls the ``{self._name}`` function to operate atomically on ``array[index]``. Please refer to `Atomic Functions`_ for detailed explanation. Args: array: A :class:`cupy.ndarray` to index over. index: A valid index such that the address to the corresponding array element ``array[index]`` can be computed. value: Represent the value to use for the specified operation. For the case of :obj:`atomic_cas`, this is the value for ``array[index]`` to compare with. alt_value: Only used in :obj:`atomic_cas` to represent the value to swap to. .. seealso:: `Numba's corresponding atomic functions`_ .. _Atomic Functions: https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#atomic-functions .. _Numba's corresponding atomic functions: https://numba.readthedocs.io/en/stable/cuda-reference/kernel.html#synchronization-and-atomic-operations """ self.__doc__ = doc def __call__(self, array, index, value, alt_value=None): super().__call__() def call(self, env, array, index, value, value2=None): name = self._name op = self._op array = Data.init(array, env) if not isinstance(array.ctype, (_cuda_types.CArray, _cuda_types.Ptr)): raise TypeError('The first argument must be of array type.') target = _compile._indexing(array, index, env) ctype = target.ctype if ctype.dtype.name not in self._dtypes: raise TypeError(f'`{name}` does not support {ctype.dtype} input.') # On HIP, 'e' is not supported and we will never reach here if (op == 'Add' and ctype.dtype.char == 'e' and runtime.runtimeGetVersion() < 10000): raise RuntimeError( 'float16 atomic operation is not supported before CUDA 10.0.') value = _compile._astype_scalar(value, ctype, 'same_kind', env) value = Data.init(value, env) if op == 'CAS': assert value2 is not None # On HIP, 'H' is not supported and we will never reach here if ctype.dtype.char == 'H': if runtime.runtimeGetVersion() < 10010: raise RuntimeError( 'uint16 atomic operation is not supported before ' 'CUDA 10.1') if int(device.get_compute_capability()) < 70: raise RuntimeError( 'uint16 atomic operation is not supported before ' 'sm_70') value2 = _compile._astype_scalar(value2, ctype, 'same_kind', env) value2 = Data.init(value2, env) code = f'{name}(&{target.code}, {value.code}, {value2.code})' else: assert value2 is None code = f'{name}(&{target.code}, {value.code})' return Data(code, ctype) class GridFunc(BuiltinFunc): def __init__(self, mode): if mode == 'grid': self._desc = 'Compute the thread index in the grid.' self._eq = 'jit.threadIdx.x + jit.blockIdx.x * jit.blockDim.x' self._link = 'numba.cuda.grid' self._code = 'threadIdx.{n} + blockIdx.{n} * blockDim.{n}' elif mode == 'gridsize': self._desc = 'Compute the grid size.' self._eq = 'jit.blockDim.x * jit.gridDim.x' self._link = 'numba.cuda.gridsize' self._code = 'blockDim.{n} * gridDim.{n}' else: raise ValueError('unsupported function') doc = f""" {self._desc} Computation of the first integer is as follows:: {self._eq} and for the other two integers the ``y`` and ``z`` attributes are used. Args: ndim (int): The dimension of the grid. Only 1, 2, or 3 is allowed. Returns: int or tuple: If ``ndim`` is 1, an integer is returned, otherwise a tuple. .. note:: This function follows the convention of Numba's :func:`{self._link}`. """ self.__doc__ = doc def __call__(self, ndim): super().__call__() def call_const(self, env, ndim): if not isinstance(ndim, int): raise TypeError('ndim must be an integer') # Numba convention: for 1D we return a single variable, # otherwise a tuple if ndim == 1: return Data(self._code.format(n='x'), _cuda_types.uint32) elif ndim == 2: dims = ('x', 'y') elif ndim == 3: dims = ('x', 'y', 'z') else: raise ValueError('Only ndim=1,2,3 are supported') elts_code = ', '.join(self._code.format(n=n) for n in dims) ctype = _cuda_types.Tuple([_cuda_types.uint32]*ndim) return Data(f'thrust::make_tuple({elts_code})', ctype) class WarpShuffleOp(BuiltinFunc): def __init__(self, op, dtypes): self._op = op self._name = '__shfl_' + (op + '_' if op else '') + 'sync' self._dtypes = dtypes doc = f"""Calls the ``{self._name}`` function. Please refer to `Warp Shuffle Functions`_ for detailed explanation. .. _Warp Shuffle Functions: https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#warp-shuffle-functions """ self.__doc__ = doc def __call__(self, mask, var, val_id, *, width=32): super().__call__() def call(self, env, mask, var, val_id, *, width=None): name = self._name var = Data.init(var, env) ctype = var.ctype if ctype.dtype.name not in self._dtypes: raise TypeError(f'`{name}` does not support {ctype.dtype} input.') try: mask = mask.obj except Exception: raise TypeError('mask must be an integer') if runtime.is_hip: warnings.warn(f'mask {mask} is ignored on HIP', RuntimeWarning) elif not (0x0 <= mask <= 0xffffffff): raise ValueError('mask is out of range') # val_id refers to "delta" for shfl_{up, down}, "srcLane" for shfl, and # "laneMask" for shfl_xor if self._op in ('up', 'down'): val_id_t = _cuda_types.uint32 else: val_id_t = _cuda_types.int32 val_id = _compile._astype_scalar(val_id, val_id_t, 'same_kind', env) val_id = Data.init(val_id, env) if width: if isinstance(width, Constant): if width.obj not in (2, 4, 8, 16, 32): raise ValueError('width needs to be power of 2') else: width = Constant(64) if runtime.is_hip else Constant(32) width = _compile._astype_scalar( width, _cuda_types.int32, 'same_kind', env) width = Data.init(width, env) code = f'{name}({hex(mask)}, {var.code}, {val_id.code}' code += f', {width.code})' return Data(code, ctype) class LaneID(BuiltinFunc): def __call__(self): """Returns the lane ID of the calling thread, ranging in ``[0, jit.warpsize)``. .. note:: Unlike :obj:`numba.cuda.laneid`, this is a callable function instead of a property. """ super().__call__() def _get_preamble(self): preamble = '__device__ __forceinline__ unsigned int LaneId() {' if not runtime.is_hip: # see https://github.com/NVIDIA/cub/blob/main/cub/util_ptx.cuh#L419 preamble += """ unsigned int ret; asm ("mov.u32 %0, %%laneid;" : "=r"(ret) ); return ret; } """ else: # defined in hip/hcc_detail/device_functions.h preamble += """ return __lane_id(); } """ return preamble def call_const(self, env): env.generated.add_code(self._get_preamble()) return Data('LaneId()', _cuda_types.uint32) builtin_functions_dict = { range: RangeFunc(), len: LenFunc(), min: MinFunc(), max: MaxFunc(), } range_ = RangeFunc() syncthreads = SyncThreads() syncwarp = SyncWarp() shared_memory = SharedMemory() grid = GridFunc('grid') gridsize = GridFunc('gridsize') laneid = LaneID() # atomic functions atomic_add = AtomicOp( 'Add', ('int32', 'uint32', 'uint64', 'float32', 'float64') + (() if runtime.is_hip else ('float16',))) atomic_sub = AtomicOp( 'Sub', ('int32', 'uint32')) atomic_exch = AtomicOp( 'Exch', ('int32', 'uint32', 'uint64', 'float32')) atomic_min = AtomicOp( 'Min', ('int32', 'uint32', 'uint64')) atomic_max = AtomicOp( 'Max', ('int32', 'uint32', 'uint64')) atomic_inc = AtomicOp( 'Inc', ('uint32',)) atomic_dec = AtomicOp( 'Dec', ('uint32',)) atomic_cas = AtomicOp( 'CAS', ('int32', 'uint32', 'uint64') + (() if runtime.is_hip else ('uint16',))) atomic_and = AtomicOp( 'And', ('int32', 'uint32', 'uint64')) atomic_or = AtomicOp( 'Or', ('int32', 'uint32', 'uint64')) atomic_xor = AtomicOp( 'Xor', ('int32', 'uint32', 'uint64')) # warp-shuffle functions _shfl_dtypes = ( ('int32', 'uint32', 'int64', 'float32', 'float64') + (() if runtime.is_hip else ('uint64', 'float16'))) shfl_sync = WarpShuffleOp('', _shfl_dtypes) shfl_up_sync = WarpShuffleOp('up', _shfl_dtypes) shfl_down_sync = WarpShuffleOp('down', _shfl_dtypes) shfl_xor_sync = WarpShuffleOp('xor', _shfl_dtypes)
35.206061
115
0.569748
15,546
0.892064
0
0
0
0
0
0
7,265
0.416882
b9aa6c3c1ecb2355209fd4db4f58118befbffa8a
225
py
Python
python-basic-grammer/python-basic/02-python-variables-and-string/string_strip_demo.py
jinrunheng/base-of-python
595bdbc8bfaf2136d8f1f9ea82c03b84aeaf0a39
[ "Apache-2.0" ]
null
null
null
python-basic-grammer/python-basic/02-python-variables-and-string/string_strip_demo.py
jinrunheng/base-of-python
595bdbc8bfaf2136d8f1f9ea82c03b84aeaf0a39
[ "Apache-2.0" ]
null
null
null
python-basic-grammer/python-basic/02-python-variables-and-string/string_strip_demo.py
jinrunheng/base-of-python
595bdbc8bfaf2136d8f1f9ea82c03b84aeaf0a39
[ "Apache-2.0" ]
null
null
null
# 字符串删除空白 str1 = " hello " print(str1) print(len(str1)) # 去除两端的空格 print(str1.strip()) print(len(str1.strip())) # 去除左侧的空格 print(str1.lstrip()) print(len(str1.lstrip())) # 去除右侧的空格 print(str1.rstrip()) print(len(str1.rstrip()))
16.071429
25
0.688889
0
0
0
0
0
0
0
0
101
0.359431
b9aaf0198d21a1cb3a68b8836041445460cf7efd
379
py
Python
bruges/util/__init__.py
hyperiongeo/bruges
6d9a3aae86aaa53107caaa20e9aafa390358b0f8
[ "Apache-2.0" ]
null
null
null
bruges/util/__init__.py
hyperiongeo/bruges
6d9a3aae86aaa53107caaa20e9aafa390358b0f8
[ "Apache-2.0" ]
null
null
null
bruges/util/__init__.py
hyperiongeo/bruges
6d9a3aae86aaa53107caaa20e9aafa390358b0f8
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- from .util import rms from .util import moving_average from .util import moving_avg_conv from .util import moving_avg_fft from .util import normalize from .util import next_pow2 from .util import top_and_tail from .util import extrapolate from .util import nearest from .util import deprecated from .util import apply_along_axis from .util import sigmoid
27.071429
34
0.804749
0
0
0
0
0
0
0
0
23
0.060686
b9acae3f6c9a11754c72065d93acff3857609af2
5,423
py
Python
toontown/estate/DistributedHouseDoor.py
CrankySupertoon01/Toontown-2
60893d104528a8e7eb4aced5d0015f22e203466d
[ "MIT" ]
1
2021-02-13T22:40:50.000Z
2021-02-13T22:40:50.000Z
toontown/estate/DistributedHouseDoor.py
CrankySupertoonArchive/Toontown-2
60893d104528a8e7eb4aced5d0015f22e203466d
[ "MIT" ]
1
2018-07-28T20:07:04.000Z
2018-07-30T18:28:34.000Z
toontown/estate/DistributedHouseDoor.py
CrankySupertoonArchive/Toontown-2
60893d104528a8e7eb4aced5d0015f22e203466d
[ "MIT" ]
2
2019-12-02T01:39:10.000Z
2021-02-13T22:41:00.000Z
from toontown.toonbase.ToonBaseGlobal import * from panda3d.core import * from direct.interval.IntervalGlobal import * from direct.distributed.ClockDelta import * from direct.distributed import DistributedObject from toontown.toonbase import ToontownGlobals from direct.directnotify import DirectNotifyGlobal from direct.showbase.MessengerGlobal import messenger from direct.fsm import ClassicFSM from toontown.building import DistributedDoor from toontown.hood import ZoneUtil from toontown.suit import Suit from toontown.building import FADoorCodes from toontown.building import DoorTypes from toontown.estate.DistributedHouse import DistributedHouse class DistributedHouseDoor(DistributedDoor.DistributedDoor): def __init__(self, cr): DistributedDoor.DistributedDoor.__init__(self, cr) def disable(self): DistributedDoor.DistributedDoor.disable(self) self.ignoreAll() def setZoneIdAndBlock(self, zoneId, block): self.houseId = block DistributedDoor.DistributedDoor.setZoneIdAndBlock(self, zoneId, block) def getTriggerName(self): return 'door_trigger_' + str(self.houseId) def hideDoorParts(self): try: self.findDoorNode('doorFrameHoleRight').hide() self.findDoorNode('doorFrameHoleLeft').hide() except: pass def announceGenerate(self): DistributedObject.DistributedObject.announceGenerate(self) if self.doorType == DoorTypes.EXT_STANDARD: house = base.cr.doId2do.get(self.houseId) if not isinstance(house, DistributedHouse): self.notify.error('tried to use {0} as house'.format(house.__class__.__name__)) if house and house.house_loaded: self.__gotRelatedHouse() else: self.acceptOnce('houseLoaded-%d' % self.houseId, self.__gotRelatedHouse) elif self.doorType == DoorTypes.INT_STANDARD: door = render.find('**/leftDoor;+s') if door.isEmpty(): self.acceptOnce('houseInteriorLoaded-%d' % self.zoneId, self.__gotRelatedHouse) else: self.__gotRelatedHouse() def __gotRelatedHouse(self): self.doPostAnnounceGenerate() self.bHasFlat = not self.findDoorNode('door*flat', True).isEmpty() self.hideDoorParts() building = self.getBuilding() doorTrigger = building.find('**/door_trigger*') doorTrigger.setName(self.getTriggerName()) self.accept(self.getEnterTriggerEvent(), self.doorTrigger) self.acceptOnce('clearOutToonInterior', self.doorTrigger) self.zoneDoneLoading = 0 def getBuilding(self, allowEmpty = False): if 'building' not in self.__dict__: if self.doorType == DoorTypes.INT_STANDARD: door = render.find('**/leftDoor;+s') self.building = door.getParent() elif self.doorType == DoorTypes.EXT_STANDARD: if self.houseId: self.building = self.cr.playGame.hood.loader.houseId2house.get(self.houseId, None) if allowEmpty: return self.building return self.building def isInterior(self): if self.doorType == DoorTypes.INT_STANDARD: return 1 return 0 def getDoorNodePath(self): if self.doorType == DoorTypes.INT_STANDARD: otherNP = render.find('**/door_origin') elif self.doorType == DoorTypes.EXT_STANDARD: building = self.getBuilding() otherNP = building.find('**/door') if otherNP.isEmpty(): otherNP = building.find('**/door_origin') else: self.notify.error('No such door type as ' + str(self.doorType)) return otherNP def enterClosing(self, ts): doorFrameHoleRight = self.findDoorNode('doorFrameHoleRight') if doorFrameHoleRight.isEmpty(): self.notify.warning('enterClosing(): did not find doorFrameHoleRight') return rightDoor = self.findDoorNode('rightDoor') if rightDoor.isEmpty(): self.notify.warning('enterClosing(): did not find rightDoor') return otherNP = self.getDoorNodePath() trackName = 'doorClose-%d' % self.doId if self.rightSwing: h = 100 else: h = -100 self.finishDoorTrack() self.doorTrack = Sequence(LerpHprInterval(nodePath=rightDoor, duration=1.0, hpr=VBase3(0, 0, 0), startHpr=VBase3(h, 0, 0), other=otherNP, blendType='easeInOut'), Func(doorFrameHoleRight.hide), Func(self.hideIfHasFlat, rightDoor), SoundInterval(self.closeSfx, node=rightDoor), name=trackName) self.doorTrack.start(ts) if hasattr(self, 'done'): base.cr.playGame.hood.loader.setHouse(self.houseId) zoneId = self.otherZoneId if self.doorType == DoorTypes.EXT_STANDARD: whereTo = 'house' else: whereTo = 'estate' request = {'loader': 'safeZoneLoader', 'where': whereTo, 'how': 'doorIn', 'hoodId': ToontownGlobals.MyEstate, 'zoneId': zoneId, 'shardId': None, 'avId': -1, 'allowRedirect': 0, 'doorDoId': self.otherDoId} messenger.send('doorDoneEvent', [request]) return
40.17037
299
0.638761
4,768
0.879218
0
0
0
0
0
0
559
0.103079
b9ad055e162f0001e288ab22dec6a5a4746fd51d
2,786
py
Python
Neuro-Cognitive Models/Runs/Nonhier_run/res_nonhier.py
AGhaderi/spatial_attenNCM
1f7edf17f55d804d2ae3360d23623c9ab5035518
[ "MIT" ]
null
null
null
Neuro-Cognitive Models/Runs/Nonhier_run/res_nonhier.py
AGhaderi/spatial_attenNCM
1f7edf17f55d804d2ae3360d23623c9ab5035518
[ "MIT" ]
null
null
null
Neuro-Cognitive Models/Runs/Nonhier_run/res_nonhier.py
AGhaderi/spatial_attenNCM
1f7edf17f55d804d2ae3360d23623c9ab5035518
[ "MIT" ]
null
null
null
#!/home/a.ghaderi/.conda/envs/envjm/bin/python # Model 2 import pystan import pandas as pd import numpy as np import sys sys.path.append('../../') import utils parts = 1 data = utils.get_data() #loading dateset data = data[data['participant']==parts] mis = np.where((data['n200lat']<.101)|(data['n200lat']>.248))[0] # missing data for n200lat obs = np.where((data['n200lat']>.101)&(data['n200lat']<.248))[0] # observation and missing data for n200lat N_mis = mis.shape[0] # number of missing data N_obs = obs.shape[0] # number of observed data modelfile = '../../stans/res_nonhier.stan' #reading the model span f = open(modelfile, 'r') model_wiener = f.read() sm = pystan.StanModel(model_code=model_wiener)# Compile the model stan ncohers = 2 #Number of coherence conditions nspats = 2 #Number of spatial conditions nconds = 4 #Number of conditions y = data['y'].to_numpy() cond_coher = data['cond_coher'].to_numpy() cond_spat = data['cond_spat'].to_numpy() conds = data['conds'].to_numpy() n200lat = data['n200lat'].to_numpy() #set inistial data for molde span data_winner = {'N_obs':N_obs, #Number of trial-level observations 'N_mis':N_mis, #Number of trial-level mising data 'ncohers':ncohers, #Number of coherence conditions 'nspats':nspats, #Number of spatial conditions 'nconds':nconds, #Number of conditions 'y':np.concatenate([y[obs],y[mis]]), #acc*rt in seconds for obervation and missing data 'cond_coher':np.concatenate([cond_coher[obs],cond_coher[mis]]), #Coherence index for each trial 'cond_spat':np.concatenate([cond_spat[obs],cond_spat[mis]]), #sptial index for each trial 'conds':np.concatenate([conds[obs],conds[mis]]), #sptial index for each trial 'n200lat_obs':n200lat[obs]}; #n200 latency for each trial observation # setting MCMC arguments niter = 10000 nwarmup = 4000 nchains = 1 thin = 1 initials = [] # initial sampling for c in range(0, nchains): chaininit = { 'delta': np.random.uniform(1, 3, size=ncohers), 'alpha': np.random.uniform(.5, 1.), 'eta': np.random.uniform(.01, .2), 'res': np.random.uniform(.01, .02, size=nspats), 'n200sub': np.random.uniform(.11, .2, size=nconds), 'lambda': np.random.uniform(.01, .02), 'n200lat_mis': np.random.uniform(.11, .2, size = N_mis) } initials.append(chaininit) # Train the model and generate samples fit = sm.sampling(data=data_winner, iter=niter, chains=nchains, warmup=nwarmup, thin=thin, init=initials) utils.to_pickle(stan_model=sm, stan_fit=fit, save_path='../../save/nonhier/'+str(parts)+'_res_nonhier.pkl')
39.8
116
0.648959
0
0
0
0
0
0
0
0
1,068
0.383345
b9adc3a3c0f82e03cf53dd13486c80b1bb9dbf85
6,691
py
Python
rq_dashboard/dashboard.py
refgenomics/rq-dashboard
cdfadd2b9aa9a66b0594fd5573e3c45fa8643f05
[ "BSD-2-Clause-FreeBSD" ]
null
null
null
rq_dashboard/dashboard.py
refgenomics/rq-dashboard
cdfadd2b9aa9a66b0594fd5573e3c45fa8643f05
[ "BSD-2-Clause-FreeBSD" ]
null
null
null
rq_dashboard/dashboard.py
refgenomics/rq-dashboard
cdfadd2b9aa9a66b0594fd5573e3c45fa8643f05
[ "BSD-2-Clause-FreeBSD" ]
null
null
null
from redis import Redis from redis import from_url from rq import push_connection, pop_connection from rq.job import Job from functools import wraps import times from flask import Blueprint from flask import current_app, url_for, abort from flask import render_template from rq import Queue, Worker from rq import cancel_job, requeue_job from rq import get_failed_queue from math import ceil dashboard = Blueprint('rq_dashboard', __name__, template_folder='templates', static_folder='static', ) @dashboard.before_request def authentication_hook(): """ Allow the parent app to authenticate user's access to the dashboard with it's own auth_handler method that must return True or False """ auth_handler = current_app.extensions['rq-dashboard'].auth_handler if auth_handler and not auth_handler(): abort(401) @dashboard.before_app_first_request def setup_rq_connection(): if current_app.config.get('REDIS_URL'): current_app.redis_conn = from_url(current_app.config.get('REDIS_URL')) else: current_app.redis_conn = Redis(host=current_app.config.get('REDIS_HOST', 'localhost'), port=current_app.config.get('REDIS_PORT', 6379), password=current_app.config.get('REDIS_PASSWORD', None), db=current_app.config.get('REDIS_DB', 0)) @dashboard.before_request def push_rq_connection(): push_connection(current_app.redis_conn) @dashboard.teardown_request def pop_rq_connection(exception=None): pop_connection() def jsonify(f): @wraps(f) def _wrapped(*args, **kwargs): from flask import jsonify as flask_jsonify try: result_dict = f(*args, **kwargs) except Exception as e: result_dict = dict(status='error') if current_app.config['DEBUG']: result_dict['reason'] = str(e) from traceback import format_exc result_dict['exc_info'] = format_exc() return flask_jsonify(**result_dict) return _wrapped def serialize_queues(queues): return [dict(name=q.name, count=q.count, url=url_for('.overview', queue_name=q.name)) for q in queues] def serialize_date(dt): if dt is None: return None return times.format(dt, 'UTC') def serialize_job(job): return dict( id=job.id, created_at=serialize_date(job.created_at), enqueued_at=serialize_date(job.enqueued_at), ended_at=serialize_date(job.ended_at), origin=job.origin, result=job._result, exc_info=job.exc_info, description=job.description) def remove_none_values(input_dict): return dict([ (k,v) for k,v in input_dict.items() if v is not None ]) def pagination_window(total_items, cur_page, per_page=5, window_size=10): all_pages = range(1, int(ceil(total_items / float(per_page))) + 1) results = all_pages if (window_size >= 1): pages_window_start = int(max(0, min(len(all_pages) - window_size, (cur_page-1) - ceil(window_size / 2.0)))) pages_window_end = int(pages_window_start + window_size) result = all_pages[pages_window_start:pages_window_end] return result @dashboard.route('/', defaults={'queue_name': None, 'page': '1'}) @dashboard.route('/<queue_name>', defaults={'page': '1'}) @dashboard.route('/<queue_name>/<page>') def overview(queue_name, page): if queue_name is None: # Show the failed queue by default if it contains any jobs failed = Queue('failed') if not failed.is_empty(): queue = failed else: queue = Queue() else: queue = Queue(queue_name) return render_template('rq_dashboard/dashboard.html', workers=Worker.all(), queue=queue, page=page, queues=Queue.all(), rq_url_prefix=url_for('.overview')) @dashboard.route('/job/<job_id>/cancel', methods=['POST']) @jsonify def cancel_job_view(job_id): rq_job = Job.fetch(job_id) if rq_job.status == "failed": rq_job.delete() else: rq_job.cancel() return dict(status='OK') @dashboard.route('/job/<job_id>/requeue', methods=['POST']) @jsonify def requeue_job_view(job_id): requeue_job(job_id) return dict(status='OK') @dashboard.route('/requeue-all', methods=['GET', 'POST']) @jsonify def requeue_all(): fq = get_failed_queue() job_ids = fq.job_ids count = len(job_ids) for job_id in job_ids: requeue_job(job_id) return dict(status='OK', count=count) @dashboard.route('/queue/<queue_name>/empty', methods=['POST']) @jsonify def empty_queue(queue_name): q = Queue(queue_name) q.empty() return dict(status='OK') @dashboard.route('/queue/<queue_name>/compact', methods=['POST']) @jsonify def compact_queue(queue_name): q = Queue(queue_name) q.compact() return dict(status='OK') @dashboard.route('/queues.json') @jsonify def list_queues(): queues = serialize_queues(sorted(Queue.all())) return dict(queues=queues) @dashboard.route('/jobs/<queue_name>/<page>.json') @jsonify def list_jobs(queue_name, page): current_page = int(page) queue = Queue(queue_name) per_page = 5 total_items = queue.count pages_numbers_in_window = pagination_window(total_items, current_page, per_page) pages_in_window = [ dict(number=p, url=url_for('.overview', queue_name=queue_name, page=p)) for p in pages_numbers_in_window ] last_page = int(ceil(total_items / float(per_page))) prev_page = None if current_page > 1: prev_page = dict(url=url_for('.overview', queue_name=queue_name, page=(current_page-1))) next_page = None if current_page < last_page: next_page = dict(url=url_for('.overview', queue_name=queue_name, page=(current_page+1))) pagination = remove_none_values( dict(pages_in_window=pages_in_window, next_page=next_page, prev_page=prev_page)) offset = (current_page - 1) * per_page jobs = [serialize_job(job) for job in queue.get_jobs(offset, per_page)] return dict(name=queue.name, jobs=jobs, pagination=pagination) @dashboard.route('/workers.json') @jsonify def list_workers(): def serialize_queue_names(worker): return [q.name for q in worker.queues] workers = [dict(name=worker.name, queues=serialize_queue_names(worker), state=worker.get_state()) for worker in Worker.all()] return dict(workers=workers) @dashboard.context_processor def inject_interval(): interval = current_app.config.get('RQ_POLL_INTERVAL', 2500) return dict(poll_interval=interval)
29.606195
115
0.67658
0
0
0
0
4,930
0.736811
0
0
796
0.118966
b9ae95988166d56c353c46926e3d21a79d84f88c
769
py
Python
layers/layer1_python3/0300_acquisition/acquisition/__init__.py
moas/mfdata
ca9460c3783ddfd6ad022c96a0a8bf0e65fa36b2
[ "BSD-3-Clause" ]
null
null
null
layers/layer1_python3/0300_acquisition/acquisition/__init__.py
moas/mfdata
ca9460c3783ddfd6ad022c96a0a8bf0e65fa36b2
[ "BSD-3-Clause" ]
null
null
null
layers/layer1_python3/0300_acquisition/acquisition/__init__.py
moas/mfdata
ca9460c3783ddfd6ad022c96a0a8bf0e65fa36b2
[ "BSD-3-Clause" ]
null
null
null
from acquisition.step import AcquisitionStep from acquisition.stats import AcquisitionStatsDClient from acquisition.move_step import AcquisitionMoveStep from acquisition.delete_step import AcquisitionDeleteStep from acquisition.batch_step import AcquisitionBatchStep from acquisition.reinject_step import AcquisitionReinjectStep from acquisition.fork_step import AcquisitionForkStep from acquisition.archive_step import AcquisitionArchiveStep from acquisition.listener import AcquisitionListener __all__ = ['AcquisitionStep', 'AcquisitionBatchStep', 'AcquisitionMoveStep', 'AcquisitionDeleteStep', 'AcquisitionReinjectStep', 'AcquisitionForkStep', 'AcquisitionArchiveStep', 'AcquisitionStatsDClient', 'AcquisitionListener']
48.0625
63
0.830949
0
0
0
0
0
0
0
0
199
0.258778
b9b2dd8fc97fddaaa64ec64957043ee8e8088e39
615
py
Python
frappe-bench/apps/erpnext/erpnext/non_profit/doctype/member/member.py
Semicheche/foa_frappe_docker
a186b65d5e807dd4caf049e8aeb3620a799c1225
[ "MIT" ]
null
null
null
frappe-bench/apps/erpnext/erpnext/non_profit/doctype/member/member.py
Semicheche/foa_frappe_docker
a186b65d5e807dd4caf049e8aeb3620a799c1225
[ "MIT" ]
null
null
null
frappe-bench/apps/erpnext/erpnext/non_profit/doctype/member/member.py
Semicheche/foa_frappe_docker
a186b65d5e807dd4caf049e8aeb3620a799c1225
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and contributors # For license information, please see license.txt from __future__ import unicode_literals from frappe.model.document import Document from frappe.contacts.address_and_contact import load_address_and_contact class Member(Document): def onload(self): """Load address and contacts in `__onload`""" load_address_and_contact(self) def validate(self): self.validate_email_type(self.email) def validate_email_type(self, email): from frappe.utils import validate_email_add validate_email_add(email.strip(), True)
29.285714
72
0.786992
313
0.508943
0
0
0
0
0
0
185
0.300813
b9b3501a4a1a7bee83abdc50e1932071f97c2394
12,427
py
Python
networks/networks.py
ayyuriss/TRHPO
56a06d3593504647b75589ab87b5c96bdab74c9f
[ "MIT" ]
null
null
null
networks/networks.py
ayyuriss/TRHPO
56a06d3593504647b75589ab87b5c96bdab74c9f
[ "MIT" ]
null
null
null
networks/networks.py
ayyuriss/TRHPO
56a06d3593504647b75589ab87b5c96bdab74c9f
[ "MIT" ]
null
null
null
from torch import nn import numpy as np import base.basenetwork as BaseN from networks.cholesky import CholeskyBlock class FCNet(BaseN.BaseNetwork): name ="FCNet" def __init__(self,input_shape,output_shape,owner_name=""): super(FCNet,self).__init__(input_shape,output_shape,owner_name) x = input_shape self.model = nn.Sequential(BaseN.Flatten(), nn.Linear(np.prod(x), 1024),nn.Softplus(), nn.Linear(1024,512),nn.Tanh(), nn.Linear(512,256), BaseN.EigenLayer(256,self.output_shape[0])) self.compile() class FCSpectralNet(BaseN.BaseNetwork): name ="FCSpectralNet" def __init__(self,input_shape,output_shape,owner_name=""): super(FCSpectralNet,self).__init__(input_shape,output_shape,owner_name) x = input_shape self.model = nn.Sequential(BaseN.Flatten(), nn.Linear(np.prod(x), 1024),BaseN.AdaptiveTanh(), nn.Linear(1024,1024),BaseN.AdaptiveTanh(), nn.Linear(1024,512),BaseN.AdaptiveTanh(), BaseN.EigenLayer(512,self.output_shape[0])) self.compile() class FCSpectralMNet(BaseN.BaseNetwork): name ="FCSpectralMNet" def __init__(self,input_shape,output_shape,owner_name=""): super(FCSpectralMNet,self).__init__(input_shape,output_shape,owner_name) x = input_shape self.model = nn.Sequential(BaseN.Flatten(), nn.Linear(np.prod(x), 1024),nn.ReLU(), nn.Linear(1024,1024),nn.ReLU(), nn.Linear(1024,512),nn.ReLU(), nn.Linear(512,self.output_shape[0]-1),nn.Tanh(), BaseN.EigenLayer()) self.compile() class FCNetQ(BaseN.BaseNetwork): name ="FCNetQ" def __init__(self,input_shape,output_shape,owner_name=""): super(FCNetQ,self).__init__(input_shape,output_shape,owner_name) x = int(np.prod(input_shape)) self.model = nn.Sequential(BaseN.Flatten(), nn.Linear(x,x),nn.Tanh(), nn.Linear(x,self.output_shape[0])) self.compile() class ConvNet(BaseN.BaseNetwork): name="ConvNet" def __init__(self,input_shape,output_shape,owner_name=""): super(ConvNet,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.ReLU(), BaseN.conv3_2(8, 16),nn.ReLU(), BaseN.conv3_2(8, 8))] x = BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512),BaseN.AdaptiveTanh(), nn.Linear(512,256), BaseN.EigenLayer(256,self.output_shape[0],bias=False)) self.compile() class ConvNetBias(BaseN.BaseNetwork): name="ConvNetBias" def __init__(self,input_shape,output_shape,owner_name=""): super(ConvNetBias,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.Softplus(), BaseN.conv3_2(8, 12),BaseN.AdaptiveTanh(), BaseN.conv3_2(12, 16), BaseN.conv3_2(16, 20))] x = BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512),BaseN.AdaptiveTanh(), nn.Linear(512,256), BaseN.EigenLayer(256,self.output_shape[0],bias=False)) self.compile() class FCConvNet(BaseN.BaseNetwork): name="FCConvNet" def __init__(self,input_shape,output_shape,owner_name=""): super(FCConvNet,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 4),nn.Softplus(), BaseN.conv3_2(4, 8),BaseN.AdaptiveTanh())] x = BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,512),BaseN.AdaptiveTanh(), nn.Linear(512,256), BaseN.EigenLayer(256,self.output_shape[0],bias=False)) self.compile() class FCConvNetBias(BaseN.BaseNetwork): name="FCConvNetBias" def __init__(self,input_shape,output_shape,owner_name=""): super(FCConvNetBias,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 4),nn.ReLU(), BaseN.conv3_2(4, 4),BaseN.AdaptiveTanh())] x = BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,1024),BaseN.AdaptiveTanh(), nn.Linear(1024,256), BaseN.EigenLayer(256,self.output_shape[0],bias=False)) self.compile() class ConvNet2(BaseN.BaseNetwork): name="ConvNet2" def __init__(self,input_shape,output_shape,owner_name=""): super(ConvNet2,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 3),nn.Softplus(), BaseN.conv3_2(3, 6),BaseN.conv3_2(6, 12))] x = BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,256),nn.Tanh(), nn.Linear(256,512), nn.Linear(512,1024),nn.Tanh(), nn.Linear(1024,512), nn.Linear(512,256),nn.Tanh(), nn.Linear(256,256), BaseN.EigenLayer(256,self.output_shape[0])) self.compile() class ConvNetBig(BaseN.BaseNetwork): name="ConvNetBig" def __init__(self,input_shape,output_shape,owner_name=""): super(ConvNetBig,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.Softplus(), BaseN.conv3_2(8, 16),nn.Softplus(), BaseN.conv3_2(16, 32))] x = BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,256),nn.Tanh(), nn.Linear(256,512), BaseN.EigenLayer(512,self.output_shape[0])) self.compile() class ConvNetBigBias(BaseN.BaseNetwork): name="ConvNetBigBias" def __init__(self,input_shape,output_shape,owner_name=""): super(ConvNetBigBias,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 4),nn.Softplus(), BaseN.conv3_2(4, 4),BaseN.AdaptiveTanh())] x = BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,256),nn.Tanh(), nn.Linear(256,512), BaseN.EigenLayer(512,self.output_shape[0],bias=False)) self.compile() class ConvNetBigAtari(BaseN.BaseNetwork): name="ConvNetBigAtari" def __init__(self,input_shape,output_shape,owner_name=""): super(ConvNetBigAtari,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.Softplus(), BaseN.conv3_2(8, 16), BaseN.conv3_2(16, 32))] x = BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,512),nn.Tanh(), nn.Linear(512,1024), BaseN.EigenLayer(1024,self.output_shape[0])) self.compile() class ConvNetBigS(BaseN.BaseNetwork): name="ConvNetBigS" def __init__(self,input_shape,output_shape,owner_name=""): super(ConvNetBigS,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.Softplus(), BaseN.conv3_2(8, 16), BaseN.conv3_2(16, 32))] x = BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,256),nn.Tanh(), nn.Linear(256,512), nn.Linear(512,self.output_shape[0])) self.compile() class ConvNetMNIST(BaseN.BaseNetwork): name = "ConvNetMNIST" def __init__(self,input_shape,output_shape,**kwargs): super(ConvNetMNIST,self).__init__(**kwargs) self.n = output_shape self.conv = [BaseN.ResNetBlock(1,32), BaseN.conv3_2(32,64)] x = BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], nn.Softplus(), BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,256),nn.Tanh(), BaseN.EigenLayer(256,self.output_shape[0])) self.compile() class ConvNetSimple(BaseN.BaseNetwork): name="ConvNetSimple" def __init__(self,input_shape,output_shape,owner_name=""): super(ConvNetSimple,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 4),nn.Softplus())] x = BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,256),nn.Tanh(), nn.Linear(256,self.output_shape[0])) self.compile() class FCNetSimple(BaseN.BaseNetwork): name ="FCNetSimple" def __init__(self,input_shape,output_shape,owner_name=""): super(FCNetSimple,self).__init__(input_shape,output_shape,owner_name) x = input_shape self.model = nn.Sequential(BaseN.Flatten(), nn.Linear(np.prod(x), 1024),nn.Softplus(), nn.Linear(1024,512), nn.Linear(512,256),nn.Tanh(), nn.Linear(256,self.output_shape[0])) self.compile()
50.51626
91
0.508168
12,250
0.985757
0
0
0
0
0
0
234
0.01883
b9b4c8dedee4c99c1bae0987e46ca8c83899e4d8
1,435
py
Python
icenews/api_important_words.py
sverrirab/icenews
10a5e13d4dcd5e95f746c4fec9821b4b48fa440e
[ "Apache-2.0" ]
4
2019-04-25T21:09:39.000Z
2020-07-26T08:57:00.000Z
icenews/api_important_words.py
sverrirab/icenews
10a5e13d4dcd5e95f746c4fec9821b4b48fa440e
[ "Apache-2.0" ]
1
2019-08-11T00:27:18.000Z
2019-08-12T17:36:42.000Z
icenews/api_important_words.py
sverrirab/icenews
10a5e13d4dcd5e95f746c4fec9821b4b48fa440e
[ "Apache-2.0" ]
null
null
null
import logging from pydantic import BaseModel, Field from typing import List from .similar import important_words from .server import app _MAX_LENGTH = 2000 logger = logging.getLogger(__name__) class ImportantWordsResponse(BaseModel): important_words: List[str] = Field(..., description="List of lemmas") class ImportantWordsRequest(BaseModel): input_string: str = Field( ..., description="Icelandic text for analysis.", min_length=1, max_length=_MAX_LENGTH, ) # Strange things happen with error handling when using alias - splitting up into two input models class ParseInputDeprecated(BaseModel): input_string: str = Field( ..., description="Icelandic text for analysis.", min_length=1, max_length=_MAX_LENGTH, alias="in", ) @app.post( "/v1/important_words", description="Find lemmas of important words", response_model=ImportantWordsResponse, ) def v1_important_words(*, data: ImportantWordsRequest): return ImportantWordsResponse(important_words=important_words(data.input_string)) @app.post( "/v1/parse", description="Find lemmas of important words", response_model=ImportantWordsResponse, deprecated=True, ) def v1_parse(*, data: ParseInputDeprecated): logger.info(f"parse: {repr(data.input_string)}") return ImportantWordsResponse(important_words=important_words(data.input_string))
25.625
97
0.724042
523
0.36446
0
0
601
0.418815
0
0
308
0.214634
b9b51a38157d5d825f921ab19fba044fe3545044
206
py
Python
try-except.py
kmarcini/Learn-Python---Full-Course-for-Beginners-Tutorial-
8ea4ef004d86fdf393980fd356edcf5b769bfeac
[ "BSD-3-Clause" ]
null
null
null
try-except.py
kmarcini/Learn-Python---Full-Course-for-Beginners-Tutorial-
8ea4ef004d86fdf393980fd356edcf5b769bfeac
[ "BSD-3-Clause" ]
null
null
null
try-except.py
kmarcini/Learn-Python---Full-Course-for-Beginners-Tutorial-
8ea4ef004d86fdf393980fd356edcf5b769bfeac
[ "BSD-3-Clause" ]
null
null
null
try: # num = 10 / 0 number = int(input("Enter a number: ")) print(number) # catch specific errors except ZeroDivisionError as err: print(err) except ValueError: print("Invalid input")
17.166667
43
0.650485
0
0
0
0
0
0
0
0
70
0.339806
b9b691941c62b002880bb1f21ca60b0e932e41c1
3,574
py
Python
peaksampl.py
Gattocrucco/sipmfilter
74215d6c53b998808fc6c677b46030234d996bdf
[ "CC-BY-4.0", "MIT" ]
null
null
null
peaksampl.py
Gattocrucco/sipmfilter
74215d6c53b998808fc6c677b46030234d996bdf
[ "CC-BY-4.0", "MIT" ]
null
null
null
peaksampl.py
Gattocrucco/sipmfilter
74215d6c53b998808fc6c677b46030234d996bdf
[ "CC-BY-4.0", "MIT" ]
null
null
null
import numpy as np def _adddims(a, b): n = max(a.ndim, b.ndim) a = np.expand_dims(a, tuple(range(n - a.ndim))) b = np.expand_dims(b, tuple(range(n - b.ndim))) return a, b def _yz(y, z, t, yout): """ Shared implementation of peaksampl and sumpeaks. """ y = np.asarray(y) z = np.asarray(z) t = np.asarray(t) y = np.pad(y, [(0, 0)] * (y.ndim - 1) + [(1, 1)], constant_values=yout) offset = np.argmax(np.abs(y), axis=-1) ampl = np.take_along_axis(y, np.expand_dims(offset, -1), -1) ampl = np.squeeze(ampl, -1) indices = t[..., :, None] - t[..., None, :] + offset[..., None, None] indices = np.minimum(indices, y.shape[-1] - 1) indices = np.maximum(indices, 0) N = t.shape[-1] indices = indices.reshape(indices.shape[:-2] + (N * N,)) n = max(y.ndim, indices.ndim) y, indices = _adddims(y, indices) y = np.take_along_axis(y, indices, -1) eps = np.finfo(float).eps * N * N * ampl y[..., ::N + 1] += np.expand_dims(eps, -1) y = y.reshape(y.shape[:-1] + (N, N)) z = z[..., None] y, z = _adddims(y, z) return y, z def peaksampl(y, z, t, yout=0): """ Get peak amplitudes given their sum. This assumes that the position of the signals is given by peaks positions even when they are summed. Parameters ---------- y : array (..., M,) The single signal shape. z : array (..., N,) The peak height in the sum of the signals for each peak. t : int array (..., N,) The indices of the peaks in the sum. yout : scalar The value of the signal outside the provided values, default 0. Return ------ a : array (..., N), The amplitudes such that z_i = sum_j a_j * y[t_i - t_j]. Broadcasted along non-last axis. """ y, z = _yz(y, z, t, yout) a = np.linalg.solve(y, z) return np.squeeze(a, -1) def sumpeaks(y, a, t, yout=0): """ Compute the peak heights of a sum of signals. This assumes that the position of the peaks is given by the signal positions even when they are summed. Parameters ---------- y : array (..., M,) The single signal shape. a : array (..., N,) The amplitudes of the signals (`y` is multiplied by `a`). t : int array (..., N,) The indices of the position of the signals. yout : scalar The value of the signal outside the provided values, default 0. Return ------ z : array (..., N,) The peak height in the sum of the signals for each signal. Broadcasted along non-last axis. """ y, a = _yz(y, a, t, yout) z = np.matmul(y, a) return np.squeeze(z, axis=-1) if __name__ == '__main__': from matplotlib import pyplot as plt from scipy import signal y = np.exp(-np.linspace(0, 10, 1000) / 10) i = np.arange(1, 1000) t0 = np.array([10, 340, 523]) a0 = np.array([3, 2, 1]) indices = i - t0[:, None] z = np.take(y, indices, mode='clip') * a0[:, None] z = np.where((indices < 0) | (indices >= len(y)), 0, z) z = np.sum(z, axis=0) t, = signal.argrelmax(z) assert len(t) == len(t0) a = peaksampl(y, z[t], t) h = sumpeaks(y, a, t) fig, ax = plt.subplots(num='peaksampl', clear=True) ax.plot(z, color='#f55') ax.vlines(t0, 0, a0, color='gray', zorder=3) ax.vlines(t, 0, a, linestyle='--', zorder=3) ax.plot(t, h, 'ok') ax.grid('major', linestyle='--') fig.tight_layout() fig.show()
28.822581
78
0.546726
0
0
0
0
0
0
0
0
1,478
0.413542
b9b6f990f4ce20ca5842a3bb309cb667f69ccc3e
453
py
Python
arachne/hdl/xilinx/ps8/resources/pmu.py
shrine-maiden-heavy-industries/arachne
1d0320bf6e77653656f8ce1874900743452dbac4
[ "BSD-3-Clause" ]
3
2021-09-13T20:23:42.000Z
2022-01-19T13:12:32.000Z
arachne/hdl/xilinx/ps8/resources/pmu.py
shrine-maiden-heavy-industries/arachne
1d0320bf6e77653656f8ce1874900743452dbac4
[ "BSD-3-Clause" ]
null
null
null
arachne/hdl/xilinx/ps8/resources/pmu.py
shrine-maiden-heavy-industries/arachne
1d0320bf6e77653656f8ce1874900743452dbac4
[ "BSD-3-Clause" ]
null
null
null
# SPDX-License-Identifier: BSD-3-Clause from amaranth import * from amaranth.build import * from .common import PS8Resource, MIOSet __all__ = ( 'PMUResource', ) class PMUResource(PS8Resource): name = 'pmu' claimable_mio = [ ] def __init__(self): super().__init__(0, 0, None, False) def used_mio(self, **kwargs): raise NotImplementedError # :nocov: def generate_mapping(self, **kwargs): raise NotImplementedError # :nocov:
19.695652
46
0.699779
274
0.604857
0
0
0
0
0
0
75
0.165563
b9b9340675c6ceead7ff166bf8fe4d65fa580b58
4,597
py
Python
backend/Washlist/tests.py
henrikhorluck/tdt4140-washlists
a75c3bc38a3f915eb48cf3e9ecba848f46a2bcaa
[ "MIT" ]
null
null
null
backend/Washlist/tests.py
henrikhorluck/tdt4140-washlists
a75c3bc38a3f915eb48cf3e9ecba848f46a2bcaa
[ "MIT" ]
2
2020-05-02T18:17:44.000Z
2020-05-02T18:18:02.000Z
backend/Washlist/tests.py
henrikhorluck/tdt4140-washlists
a75c3bc38a3f915eb48cf3e9ecba848f46a2bcaa
[ "MIT" ]
null
null
null
from django.test import TestCase from django.urls import reverse from rest_framework import status from Dormroom.models import Dormroom from SIFUser.mixins import AuthTestMixin from StudentVillage.models import StudentVillage from Washlist.jobs import reset_washlists from Washlist.models.Templates import TemplateListItem, TemplateWashList from Washlist.models.WashLists import ListItem from Washlist.serializer import TemplateWashListSerializer class WashListTemplateTest(TestCase): room = None def setUp(self): village = StudentVillage.objects.create(name="Moholt") self.room = Dormroom.objects.create(number=1, village=village) temp_list = TemplateWashList.objects.create(title="Moholt") village.templateWashList = temp_list village.save() def test_add_to_template_adds_to_each_list(self): desc = "Vask badet" temp_list = TemplateWashList.objects.get(title="Moholt") TemplateListItem.objects.create(description=desc, washlist=temp_list).save() self.assertEqual(desc, ListItem.objects.get(dormroom=self.room).description) class WeeklyResetOfWashlistsTest(TestCase): def setUp(self): """ Create a Washlist item that is completed the method also sets up a village and a room to relate the Washlist item to satisfy the db constraints """ village = StudentVillage.objects.create(name="Moholt") self.room = Dormroom.objects.create(number=1, village=village) temp_list = TemplateWashList.objects.create(title="Moholt") village.templateWashList = temp_list village.save() self.item = ListItem.objects.create( pk=1, dormroom=self.room, desc="Vask badet", completed=True ) self.item.save() def test_job_resets_items(self): """ Test that job to reset Washlist items when run manually actually rests the databases Washlist items """ reset_washlists() self.assertEqual(False, ListItem.objects.get(pk=1).completed) class WashlistTemplateAPITest(AuthTestMixin): def setUp(self): super().setUp() self.temp_list = TemplateWashList.objects.create(title="Moholt") village = StudentVillage.objects.create( name="Moholt", templateWashList=self.temp_list ) self.room = Dormroom.objects.create(number=1, village=village) self.item = ListItem.objects.create( pk=1, dormroom=self.room, desc="Vask badet", completed=True ) def test_get_template_list(self): url = reverse("templatewashlist-list") response = self.client.get(url, HTTP_AUTHORIZATION=self.auth) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual( response.data[0], TemplateWashListSerializer( TemplateWashList.objects.get(title="Moholt") ).data, ) def test_get_detail_template_list(self): url = reverse("templatewashlist-detail", args=[1]) response = self.client.get(url, HTTP_AUTHORIZATION=self.auth) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual( response.data, TemplateWashListSerializer( TemplateWashList.objects.get(title="Moholt") ).data, ) def test_add_template_washlist(self): url = reverse("templatewashlist-list") response = self.client.post( url, {"title": "Tyholt", "village": 1}, HTTP_AUTHORIZATION=self.auth ) self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual( response.data, TemplateWashListSerializer( TemplateWashList.objects.get(title="Tyholt") ).data, ) def test_partial_update(self): url = reverse("templatewashlist-detail", args=[1]) response = self.client.patch( url, {"title": "Berg"}, HTTP_AUTHORIZATION=self.auth ) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual( response.data, TemplateWashListSerializer(TemplateWashList.objects.get(title="Berg")).data, ) def test_destroy(self): url = reverse("templatewashlist-detail", args=[1]) response = self.client.delete(url, HTTP_AUTHORIZATION=self.auth) self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) self.assertEqual(TemplateWashList.objects.count(), 0)
35.091603
92
0.67022
4,139
0.90037
0
0
0
0
0
0
594
0.129215
b9ba39e57d52ad0baaeb81fbe95a03b7bb17d4ad
3,792
py
Python
torchvision/prototype/models/mobilenetv3.py
piyush01123/vision
c6722307e6860057b4855483d237fe00a213dcf6
[ "BSD-3-Clause" ]
null
null
null
torchvision/prototype/models/mobilenetv3.py
piyush01123/vision
c6722307e6860057b4855483d237fe00a213dcf6
[ "BSD-3-Clause" ]
null
null
null
torchvision/prototype/models/mobilenetv3.py
piyush01123/vision
c6722307e6860057b4855483d237fe00a213dcf6
[ "BSD-3-Clause" ]
null
null
null
from functools import partial from typing import Any, Optional, List from torchvision.prototype.transforms import ImageNetEval from torchvision.transforms.functional import InterpolationMode from ...models.mobilenetv3 import MobileNetV3, _mobilenet_v3_conf, InvertedResidualConfig from ._api import WeightsEnum, Weights from ._meta import _IMAGENET_CATEGORIES from ._utils import handle_legacy_interface, _ovewrite_named_param __all__ = [ "MobileNetV3", "MobileNet_V3_Large_Weights", "MobileNet_V3_Small_Weights", "mobilenet_v3_large", "mobilenet_v3_small", ] def _mobilenet_v3( inverted_residual_setting: List[InvertedResidualConfig], last_channel: int, weights: Optional[WeightsEnum], progress: bool, **kwargs: Any, ) -> MobileNetV3: if weights is not None: _ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"])) model = MobileNetV3(inverted_residual_setting, last_channel, **kwargs) if weights is not None: model.load_state_dict(weights.get_state_dict(progress=progress)) return model _COMMON_META = { "task": "image_classification", "architecture": "MobileNetV3", "publication_year": 2019, "size": (224, 224), "min_size": (1, 1), "categories": _IMAGENET_CATEGORIES, "interpolation": InterpolationMode.BILINEAR, } class MobileNet_V3_Large_Weights(WeightsEnum): ImageNet1K_V1 = Weights( url="https://download.pytorch.org/models/mobilenet_v3_large-8738ca79.pth", transforms=partial(ImageNetEval, crop_size=224), meta={ **_COMMON_META, "num_params": 5483032, "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#mobilenetv3-large--small", "acc@1": 74.042, "acc@5": 91.340, }, ) ImageNet1K_V2 = Weights( url="https://download.pytorch.org/models/mobilenet_v3_large-5c1a4163.pth", transforms=partial(ImageNetEval, crop_size=224, resize_size=232), meta={ **_COMMON_META, "num_params": 5483032, "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe-with-reg-tuning", "acc@1": 75.274, "acc@5": 92.566, }, ) default = ImageNet1K_V2 class MobileNet_V3_Small_Weights(WeightsEnum): ImageNet1K_V1 = Weights( url="https://download.pytorch.org/models/mobilenet_v3_small-047dcff4.pth", transforms=partial(ImageNetEval, crop_size=224), meta={ **_COMMON_META, "num_params": 2542856, "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#mobilenetv3-large--small", "acc@1": 67.668, "acc@5": 87.402, }, ) default = ImageNet1K_V1 @handle_legacy_interface(weights=("pretrained", MobileNet_V3_Large_Weights.ImageNet1K_V1)) def mobilenet_v3_large( *, weights: Optional[MobileNet_V3_Large_Weights] = None, progress: bool = True, **kwargs: Any ) -> MobileNetV3: weights = MobileNet_V3_Large_Weights.verify(weights) inverted_residual_setting, last_channel = _mobilenet_v3_conf("mobilenet_v3_large", **kwargs) return _mobilenet_v3(inverted_residual_setting, last_channel, weights, progress, **kwargs) @handle_legacy_interface(weights=("pretrained", MobileNet_V3_Small_Weights.ImageNet1K_V1)) def mobilenet_v3_small( *, weights: Optional[MobileNet_V3_Small_Weights] = None, progress: bool = True, **kwargs: Any ) -> MobileNetV3: weights = MobileNet_V3_Small_Weights.verify(weights) inverted_residual_setting, last_channel = _mobilenet_v3_conf("mobilenet_v3_small", **kwargs) return _mobilenet_v3(inverted_residual_setting, last_channel, weights, progress, **kwargs)
34.472727
119
0.704114
1,469
0.387395
0
0
960
0.253165
0
0
889
0.234441
b9bb675bdbf31f94537da2d2380efe251bd20dd2
1,036
py
Python
rest_auth/registration/urls.py
soul4code/django-rest-auth
b7a2e06e7736865b18f6aab79dcd42210e06c28b
[ "MIT" ]
null
null
null
rest_auth/registration/urls.py
soul4code/django-rest-auth
b7a2e06e7736865b18f6aab79dcd42210e06c28b
[ "MIT" ]
null
null
null
rest_auth/registration/urls.py
soul4code/django-rest-auth
b7a2e06e7736865b18f6aab79dcd42210e06c28b
[ "MIT" ]
null
null
null
from django.urls import re_path from django.views.generic import TemplateView from .views import RegisterView, VerifyEmailView urlpatterns = [ re_path(r'^$', RegisterView.as_view(), name='rest_register'), re_path(r'^verify-email/$', VerifyEmailView.as_view(), name='rest_verify_email'), # This url is used by django-allauth and empty TemplateView is # defined just to allow reverse() call inside app, for example when email # with verification link is being sent, then it's required to render email # content. # account_confirm_email - You should override this view to handle it in # your API client somehow and then, send post to /verify-email/ endpoint # with proper key. # If you don't want to use API on that step, then just use ConfirmEmailView # view from: # django-allauth https://github.com/pennersr/django-allauth/blob/master/allauth/account/views.py re_path(r'^account-confirm-email/(?P<key>[-:\w]+)/$', TemplateView.as_view(), name='account_confirm_email'), ]
41.44
100
0.721042
0
0
0
0
0
0
0
0
687
0.663127
b9bb907819b5835937644fde4b8d08e5dd987580
1,036
py
Python
crawler/tests.py
mental689/paddict
493268b62531c698687d42416edf61c602250133
[ "MIT" ]
1
2019-06-22T10:28:21.000Z
2019-06-22T10:28:21.000Z
crawler/tests.py
mental689/paddict
493268b62531c698687d42416edf61c602250133
[ "MIT" ]
4
2020-09-05T01:48:18.000Z
2022-03-02T04:29:25.000Z
crawler/tests.py
mental689/paddict
493268b62531c698687d42416edf61c602250133
[ "MIT" ]
null
null
null
from django.test import TestCase # Create your tests here. from crawler.download import * from crawler.models import * class AnimalDownloadTestCase(TestCase): def setUp(self): self.stopWords = ["CVPR 2019", "Computer Vision Foundation."] self.url = "/Users/tuannguyenanh/Desktop/cvpr2019.html"#"http://openaccess.thecvf.com/CVPR2019.py" self.root = "http://openaccess.thecvf.com/" self.event = Event.objects.filter(shortname='CVPR2019').first() if self.event is None: self.event = Event(shortname='CVPR2019') self.event.save() def test_animal_can_download(self): #print(get_html(self.url)) f = open(self.url) soup = parse_html(f.read()) f.close() f = open('cvpr2019.bib', 'w') print(soup.title) bibtexs = soup.find_all("div", attrs={"class": "bibref"}) #print(bibtexs) for bib in bibtexs: print(bib.text) f.write(bib.text.replace('<br>', '\n')) f.close()
32.375
106
0.608108
912
0.880309
0
0
0
0
0
0
291
0.280888
b9bfcc9ca3f71d3591d1b453eea9313adf491d9f
452
py
Python
test_scripts/xml_example.py
petervdb/testrep1
76b6eb3de2deb9596c055f252191e28587d5520c
[ "MIT" ]
1
2015-11-17T21:35:44.000Z
2015-11-17T21:35:44.000Z
test_scripts/xml_example.py
petervdb/testrep1
76b6eb3de2deb9596c055f252191e28587d5520c
[ "MIT" ]
null
null
null
test_scripts/xml_example.py
petervdb/testrep1
76b6eb3de2deb9596c055f252191e28587d5520c
[ "MIT" ]
null
null
null
#!/usr/bin/python3 from urllib.request import urlopen from xml.etree.ElementTree import parse # Download the RSS feed and parse it u = urlopen('http://planet.python.org/rss20.xml') doc = parse(u) # Extract and output tags of interest for item in doc.iterfind('channel/item'): title = item.findtext('title') date = item.findtext('pubDate') link = item.findtext('link') print(title) print(date) print(link) print() print("Program executed.")
20.545455
49
0.725664
0
0
0
0
0
0
0
0
182
0.402655
b9c06414f6de5d6df932f87abe0ac2addfe2d410
1,489
py
Python
contacts/urls.py
anthowen/duplify
846d01c1b21230937fdf0281b0cf8c0b08a8c24e
[ "MIT" ]
1
2019-04-21T18:57:57.000Z
2019-04-21T18:57:57.000Z
contacts/urls.py
anthowen/duplify
846d01c1b21230937fdf0281b0cf8c0b08a8c24e
[ "MIT" ]
null
null
null
contacts/urls.py
anthowen/duplify
846d01c1b21230937fdf0281b0cf8c0b08a8c24e
[ "MIT" ]
null
null
null
"""dedupper_app URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/2.0/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ from django.contrib import admin from django.urls import path from contacts import views admin.autodiscover() urlpatterns = [ path('', views.index, name='contact_index'), path('', views.index, name='lead_index'), path('contacts/', views.contacts, name='contacts'), path('leads/', views.leads, name='leads'), path('table/', views.table, name='table'), path('plotly/', views.plotly, name='plotly'), # url(r'^keys', views.upload, name='keys'), # path('key-gen/', views.key_gen, name='key-gen'), # path('heroku/', generic.ListView.as_view(model=models.Contact), name='heroku'), # path('run/', views.run, name='run'), # path('sorted/<id>', views.merge, name='merge'), # path('sorted/export/<type>', views.download, name='export'), # path('sorted/report/<type>', views.download_times, name='report'), ]
38.179487
85
0.672263
0
0
0
0
0
0
0
0
1,153
0.774345
b9c1739f7de89fe26aec7c763524b35992652ab0
92
py
Python
pydm/PyQt/uic.py
klauer/pydm
e26aad58a7a0eb6f7321c61aa1dace646ff652bd
[ "BSD-3-Clause-LBNL" ]
null
null
null
pydm/PyQt/uic.py
klauer/pydm
e26aad58a7a0eb6f7321c61aa1dace646ff652bd
[ "BSD-3-Clause-LBNL" ]
null
null
null
pydm/PyQt/uic.py
klauer/pydm
e26aad58a7a0eb6f7321c61aa1dace646ff652bd
[ "BSD-3-Clause-LBNL" ]
null
null
null
from . import qtlib QT_LIB = qtlib.QT_LIB if QT_LIB == 'PyQt5': from PyQt5.uic import *
18.4
27
0.684783
0
0
0
0
0
0
0
0
7
0.076087
b9c1d738b7414d020a32d72c8b5b4b39a4b6d1d4
2,667
py
Python
CPB100/lab2b/scheduled/ingestapp.py
pranaynanda/training-data-analyst
f10ab778589129239fd5b277cfdefb41638eded5
[ "Apache-2.0" ]
null
null
null
CPB100/lab2b/scheduled/ingestapp.py
pranaynanda/training-data-analyst
f10ab778589129239fd5b277cfdefb41638eded5
[ "Apache-2.0" ]
null
null
null
CPB100/lab2b/scheduled/ingestapp.py
pranaynanda/training-data-analyst
f10ab778589129239fd5b277cfdefb41638eded5
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python # Copyright 2016 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # [START app] import os import logging import transform import flask import google.cloud.storage as gcs # [start config] app = flask.Flask(__name__) # Configure this environment variable via app.yaml CLOUD_STORAGE_BUCKET = os.environ['CLOUD_STORAGE_BUCKET'] # logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO) # [end config] @app.route('/') def welcome(): return '<html><a href="ingest">ingest last week</a> earthquake data</html>' @app.route('/ingest') def ingest_last_week(): try: # verify that this is a cron job request is_cron = flask.request.headers['X-Appengine-Cron'] logging.info('Received cron request {}'.format(is_cron)) # create png url = 'http://earthquake.usgs.gov/earthquakes/feed/v1.0/summary/all_week.csv' outfile = 'earthquakes.png' status = 'scheduled ingest of {} to {}'.format(url, outfile) logging.info(status) transform.create_png(url, outfile) # upload to cloud storage client = gcs.Client() bucket = client.get_bucket(CLOUD_STORAGE_BUCKET) blob = gcs.Blob('earthquakes/earthquakes.png', bucket) blob.upload_from_filename(outfile) # change permissions blob.make_public() status = 'uploaded {} to {}'.format(outfile, blob.name) logging.info(status) except KeyError as e: status = '<html>Sorry, this capability is accessible only by the Cron service, but I got a KeyError for {} -- try invoking it from <a href="{}"> the GCP console / AppEngine / taskqueues </a></html>'.format( e, 'http://console.cloud.google.com/appengine/taskqueues?tab=CRON') logging.info('Rejected non-Cron request') return status @app.errorhandler(500) def server_error(e): logging.exception('An error occurred during a request.') return """ An internal error occurred: <pre>{}</pre> See logs for full stacktrace. """.format(e), 500 if __name__ == '__main__': app.run(host='0.0.0.0', port=8080, debug=True) # [END app]
32.13253
214
0.683915
0
0
0
0
1,614
0.605174
0
0
1,551
0.581552
b9c3b62c39c9c9979a0f73775051d1aecd742c75
73
py
Python
index.py
StarSky1/microsoft-python-study
7fdc1ad87ac0eeb497013d7792f499416aac32d9
[ "MIT" ]
null
null
null
index.py
StarSky1/microsoft-python-study
7fdc1ad87ac0eeb497013d7792f499416aac32d9
[ "MIT" ]
null
null
null
index.py
StarSky1/microsoft-python-study
7fdc1ad87ac0eeb497013d7792f499416aac32d9
[ "MIT" ]
null
null
null
name=input('input your name:'); print('hello'); print(name.capitalize());
24.333333
31
0.69863
0
0
0
0
0
0
0
0
25
0.342466
b9c4867749ea67a9a267a9e62cea575f210f9260
2,526
py
Python
credentials.py
Machel54/-pass-locker-
8ddf14cf755924ca903919177f9f878f65a08042
[ "MIT" ]
null
null
null
credentials.py
Machel54/-pass-locker-
8ddf14cf755924ca903919177f9f878f65a08042
[ "MIT" ]
null
null
null
credentials.py
Machel54/-pass-locker-
8ddf14cf755924ca903919177f9f878f65a08042
[ "MIT" ]
1
2020-01-29T02:05:49.000Z
2020-01-29T02:05:49.000Z
import pyperclip import random import string class Credential: ''' class that generates new credentials ''' credential_list = [] def __init__(self,username,sitename,password): self.username = username self.password = password self.sitename = sitename def save_credential(self): ''' save_cred method saves the user objects into creds_list ''' Credential.credential_list.append(self) @classmethod def display_credential(cls, user_name): ''' Class method to show the list of credentials saved ''' users_credential_list = [] for credential in cls.credential_list: if credential.username == user_name: users_credential_list.append(credential) return users_credential_list def delete_credential(self): ''' delete_contact method deletes a saved credential from the credential_list ''' Credential.credential_list.remove(self) def generate_password(self): ''' Function to generate a password where a user can generate a password based on their length of choice ''' chars = "abcdefghijklmnopqrstuvwxyziABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890^?!?$%&/()=?`'+#*'~';:_,.-<>|" password = "" print("Use Char list = %s \n" % chars) length = int(input("[*] Input Password Length: ")) while len(password) != length: password = password + random.choice(chars) if len(password) == length: print("Password: %s" % password) return password @classmethod def find_by_sitename(cls, sitename): ''' Class method that takes a site name and returns the credential that matches that site ''' for credential in cls.credential_list: if credential.sitename == sitename: return credential @classmethod def copy_credential(cls, sitename): ''' Class method that copies a credentials details after the credentials sitename has been entered ''' find_credential = Credential.find_by_sitename(sitename) return pyperclip.copy(find_credential.password) @classmethod def credential_exist(cls, sitename): ''' Method that checks if user exists from the credential list. Returns: Boolean: True or false depending if the credential exits ''' the_credential = "" for credential in Credential.credential_list: if (credential.sitename == sitename): the_credential = sitename return the_credential
30.071429
111
0.665479
2,474
0.979414
0
0
1,288
0.509897
0
0
932
0.368963
b9c5365f366487d350d0993e89760939da233546
80
py
Python
tests/test_dice.py
mehulsatardekar/dice-on-demand
fa1ce1214975ba70c5d61390408a4de2418cf997
[ "MIT" ]
1
2020-12-03T14:27:20.000Z
2020-12-03T14:27:20.000Z
tests/test_dice.py
mehulsatardekar/dice-on-demand
fa1ce1214975ba70c5d61390408a4de2418cf997
[ "MIT" ]
11
2020-10-21T17:51:12.000Z
2020-11-09T12:02:52.000Z
tests/test_dice.py
mehulsatardekar/dice-on-demand
fa1ce1214975ba70c5d61390408a4de2418cf997
[ "MIT" ]
27
2021-09-09T22:53:21.000Z
2021-11-20T22:46:16.000Z
import unittest import app def test_test(): assert app.test() == "Works!"
11.428571
33
0.6625
0
0
0
0
0
0
0
0
8
0.1
b9c62ba9c79d4ffcb00ede68fc940fc877d45118
5,614
py
Python
annotations/rip_annotated_junctions.py
ChristopherWilks/snaptron
82ea3c5c5f2fbb726bba6d8c2bd0f7713291833a
[ "MIT" ]
25
2016-01-08T02:02:36.000Z
2021-12-29T14:00:58.000Z
annotations/rip_annotated_junctions.py
ChristopherWilks/snaptron
82ea3c5c5f2fbb726bba6d8c2bd0f7713291833a
[ "MIT" ]
11
2016-02-25T01:44:46.000Z
2021-07-02T05:52:55.000Z
annotations/rip_annotated_junctions.py
ChristopherWilks/snaptron
82ea3c5c5f2fbb726bba6d8c2bd0f7713291833a
[ "MIT" ]
7
2016-02-13T01:45:15.000Z
2021-11-22T11:04:12.000Z
#!/usr/bin/env python """ rip_annotated_junctions.py Non-reference/species verson of this script, no lift-over Rips junctions from annotation files contained in jan_24_2016_annotations.tar.gz, as described in annotation_definition.md. Junctions are dumped to stdout, which we record as annotated_junctions.tsv.gz in runs/sra (same directory as this file). annotated_junctions.tsv.gz is required by tables.py. The format of annotated_junctions.tsv.gz is (tab-separated fields), one per junction 1. Chromosome 2. Start position (1-based, inclusive) 3. End position (1-based, inclusive) 4. Strand (+ or -) 5. anno source (abbreviation) Must have Stats are written to stderr From the runs/sra/v2 directory, we ran pypy rip_annotated_junctions.py --hisat2-dir /path/to/hisat2-2.0.1-beta --annotations path/to/jan_24_2016_annotations.tar.gz | sort -k1,1 -k2,2n -k3,3n | gzip >annotated_junctions.tsv.gz """ import subprocess import tarfile import argparse import tempfile import atexit import shutil import glob import os import gzip import sys #file2source = {"hg19/gencode.v19.annotation.gtf.gz":"gC19","hg19/refGene.txt.gz":"rG19","hg19/acembly.txt.gz":"aC19","hg19/ccdsGene.txt.gz":"cG19","hg19/vegaGene.txt.gz":"vG19","hg19/knownGene.txt.gz":"kG19","hg19/mgcGenes.txt.gz":"mG19","hg19/lincRNAsTranscripts.txt.gz":"lR19","hg19/sibGene.txt.gz":"sG19","hg38/refGene.txt.gz":"rG38","hg38/ccdsGene.txt.gz":"cG38","hg38/gencode.v24.annotation.gtf.gz":"gC38","hg38/knownGene.txt.gz":"kG38","hg38/mgcGenes.txt.gz":"mG38","hg38/lincRNAsTranscripts.txt.gz":"lR38","hg38/sibGene.txt.gz":"sG38"} #file2source = {"mm10/mouse10_ucsc_genes.gtf.gz":"kG10","mm10/mouse10_gencodevm11_comp.gtf.gz":"gC11","mm10/mouse10_gencodevm09_comp.gtf.gz":"gC09","mm10/mouse10_refseq_refgene.gtf.gz":"rG10"} file2source = {"mouse10_ucsc_genes.gtf.gz":"kG10","mouse10_gencodevm11_comp.gtf.gz":"gC11","mouse10_gencodevm09_comp.gtf.gz":"gC09","mouse10_refseq_refgene.gtf.gz":"rG10"} if __name__ == '__main__': # Print file's docstring if -h is invoked parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) # Add command-line arguments parser.add_argument('--extract-script-dir', type=str, required=True, help=('path to directory containing extract_splice_sites.py script (from HISAT2)') ) parser.add_argument('--annotations', type=str, required=True, help=('full path to directory that has the annotation GTF(s) in gzipped format') ) args = parser.parse_args() extract_destination = tempfile.mkdtemp() atexit.register(shutil.rmtree, extract_destination) #with tarfile.open(args.annotations, 'r:gz') as tar: # tar.extractall(path=extract_destination) extract_splice_sites_path = os.path.join(args.extract_script_dir, 'extract_splice_sites.py') containing_dir = os.path.dirname(os.path.realpath(__file__)) annotated_junctions_ = set() for junction_file in glob.glob( os.path.join(args.annotations, '*') ): label = os.path.basename(junction_file) datasource_code = file2source[label] unique_junctions = set() #extract_splice_sites_path prints 0-based, exon coords around junctions #hence the +2 for the start here extract_process = subprocess.Popen(' '.join([ sys.executable, extract_splice_sites_path, '<(gzip -cd %s)' % junction_file ]), shell=True, executable='/bin/bash', stdout=subprocess.PIPE ) for line in extract_process.stdout: tokens = line.strip().split('\t') tokens[1] = int(tokens[1]) + 2 tokens[2] = int(tokens[2]) if tokens[2] < tokens[1]: print >>sys.stderr, ( 'Invalid junction ({}, {}, {}) from file {}. ' 'Skipping.' ).format( tokens[0], tokens[1], tokens[2], junction_file ) continue tokens.append(datasource_code) junction_to_add = tuple(tokens) annotated_junctions_.add(junction_to_add) unique_junctions.add(junction_to_add) extract_process.stdout.close() exit_code = extract_process.wait() if exit_code != 0: raise RuntimeError( 'extract_splice_sites.py had nonzero exit code {}.'.format( exit_code ) ) print >>sys.stderr, 'Junctions in {}: {}'.format( label, len(unique_junctions) ) junc2datasource = {} for junction in annotated_junctions_: if junction[:4] not in junc2datasource: junc2datasource[junction[:4]]=set() junc2datasource[junction[:4]].add(junction[4]) seen = set() for junction in annotated_junctions_: if junction[:4] not in seen: sources = ",".join(sorted(junc2datasource[junction[:4]])) print "%s\t%s" % ('\t'.join(map(str, junction[:4])),sources) seen.add(junction[:4])
44.555556
543
0.603669
0
0
0
0
0
0
0
0
2,471
0.44015
b9c731695680778a55c685fcfc15ab5e3eccf437
5,438
py
Python
dramkit/_tmp/VMD.py
Genlovy-Hoo/dramkit
fa3d2f35ebe9effea88a19e49d876b43d3c5c4c7
[ "MIT" ]
null
null
null
dramkit/_tmp/VMD.py
Genlovy-Hoo/dramkit
fa3d2f35ebe9effea88a19e49d876b43d3c5c4c7
[ "MIT" ]
null
null
null
dramkit/_tmp/VMD.py
Genlovy-Hoo/dramkit
fa3d2f35ebe9effea88a19e49d876b43d3c5c4c7
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- import numpy as np def vmd( signal, alpha, tau, K, DC, init, tol): ''' 用VMD分解算法时只要把信号输入进行分解就行了,只是对信号进行分解,和采样频率没有关系, VMD的输入参数也没有采样频率。 VMD分解出的各分量在输出量 u 中,这个和信号的长度、信号的采样频率没有关系。 迭代时各分量的中心频率在输出量omega,可以用2*pi/fs*omega求出中心频率, 但迭代时的频率是变化的。 Input and Parameters: signal - the time domain signal (1D) to be decomposed alpha - the balancing parameter of the data-fidelity constraint tau - time-step of the dual ascent ( pick 0 for noise-slack ) K - the number of modes to be recovered DC - true if the first mode is put and kept at DC (0-freq) init - 0 = all omegas start at 0 1 = all omegas start uniformly distributed 2 = all omegas initialized randomly tol - tolerance of convergence criterion; typically around 1e-6 Output: u - the collection of decomposed modes u_hat - spectra of the modes omega - estimated mode center-frequencies ''' # Period and sampling frequency of input signal #分解算法中的采样频率和时间是标准化的,分解信号的采样时间为1s,然后就得到相应的采样频率。采样时间间隔:1/ length(signal),频率: length(signal)。 save_T = len(signal) fs = 1 / save_T # extend the signal by mirroring镜像延拓 T = save_T f_mirror = [] temp = signal[0:T//2] f_mirror.extend(temp[::-1]) #temp[::-1] 倒序排列 f_mirror.extend(signal) temp = signal[T//2:T] f_mirror.extend(temp[::-1]) f = f_mirror # Time Domain 0 to T (of mirrored signal) T = len(f) t = [(i + 1) / T for i in range(T)] # 列表从1开始 # Spectral Domain discretization #freqs 进行移位是由于进行傅里叶变换时,会有正负对称的频率,分析时一般只有正频率,所以看到的频谱图是没有负频率的 freqs = np.array( [i - 0.5 - 1 / T for i in t] ) # Maximum number of iterations (if not converged yet, then it won't anyway) N = 500 # For future generalizations: individual alpha for each mode Alpha = alpha * np.ones(K) # Construct and center f_hat transformed = np.fft.fft(f) # 使用fft函数对信号进行快速傅里叶变换。 f_hat = np.fft.fftshift(transformed) # 使用fftshift函数进行移频操作。 f_hat_plus = f_hat f_hat_plus[0:T // 2] = 0 # f_hat_plus[0:T] = 1 #????????????????????????????//////////// # matrix keeping track of every iterant // could be discarded for mem u_hat_plus = [np.zeros((N, len(freqs)), dtype=complex) for i in range(K)] # Initialization of omega_k omega_plus = np.zeros((N, K)) if init == 1: for i in range(K): omega_plus[0, i] = (0.5 / K) * i elif init == 2: omega_plus[0, :] = np.sort(np.exp(np.log(fs) + (np.log(0.5) - np.log(fs)) * np.random.rand(K))) else: omega_plus[0, :] = 0 # if DC mode imposed, set its omega to 0 if DC: omega_plus[0, 0] = 0 # start with empty dual variables lambda_hat = np.zeros( (N, len(freqs)), dtype=complex) # other inits eps = 2.2204e-16 # python里没有eps功能 uDiff = tol + eps # update step n = 1 # loop counter sum_uk = 0 # accumulator #----------- Main loop for iterative updates---------- while (uDiff > tol and n < N ): #not converged and below iterations limit #update first mode accumulator k = 0 sum_uk = u_hat_plus[K-1][n-1,:]+ sum_uk - u_hat_plus[0][n-1,:] #sum_uk 一直都等于0(1,2000)???????????????? #update spectrum of first mode through Wiener filter of residuals u_hat_plus[k][n,:] = (f_hat_plus - sum_uk - lambda_hat[n-1,:]/2)/(1+Alpha[k]*(freqs - omega_plus[n-1,k])**2) #update first omega if not held at 0 if not DC: omega_plus[n,k] = (freqs[T//2:T]*np.mat(np.abs(u_hat_plus[k][n, T//2:T])**2).H)/np.sum(np.abs(u_hat_plus[k][n,T//2:T])**2) #update of any other mode for k in range(K-1): #accumulator sum_uk = u_hat_plus[k][n,:] + sum_uk - u_hat_plus[k+1][n-1,:] #mode spectrum u_hat_plus[k+1][n,:] = (f_hat_plus - sum_uk - lambda_hat[n-1,:]/2)/(1+Alpha[k+1]*(freqs - omega_plus[n-1,k+1])**2) #center frequencies omega_plus[n,k+1] = (freqs[T//2:T]*np.mat(np.abs(u_hat_plus[k+1][n, T//2:T])**2).H)/np.sum(np.abs(u_hat_plus[k+1][n,T//2:T])**2) #Dual ascent lambda_hat[n,:] = lambda_hat[n-1,:] + tau*(np.sum([ u_hat_plus[i][n,:] for i in range(K)],0) - f_hat_plus) #loop counter n = n+1 #converged yet? uDiff = eps for i in range(K): uDiff = uDiff + 1/T*(u_hat_plus[i][n-1,:]-u_hat_plus[i][n-2,:])*np.mat((u_hat_plus[i][n-1,:]-u_hat_plus[i][n-2,:]).conjugate()).H uDiff = np.abs(uDiff) # ------ Postprocessing and cleanup------- #discard empty space if converged early N = min(N,n) omega = omega_plus[0:N,:] #Signal reconstruction u_hat = np.zeros((T, K), dtype=complex) temp = [u_hat_plus[i][N-1,T//2:T] for i in range(K) ] u_hat[T//2:T,:] = np.squeeze(temp).T temp = np.squeeze(np.mat(temp).conjugate()) u_hat[1:(T//2+1),:] = temp.T[::-1] u_hat[0,:] = (u_hat[-1,:]).conjugate() u = np.zeros((K,len(t))) for k in range(K): u[k,:]=np.real(np.fft.ifft(np.fft.ifftshift(u_hat[:,k]))) #remove mirror part u = u[:,T//4:3*T//4] #recompute spectrum u_hat = np.zeros((T//2, K), dtype=complex) for k in range(K): u_hat[:,k]= np.squeeze( np.mat( np.fft.fftshift(np.fft.fft(u[k,:])) ).H) return u, u_hat, omega
37.503448
141
0.580912
0
0
0
0
0
0
0
0
2,877
0.478862
b9c7d11a999150072e268ba745c49eb08ce5081f
358
py
Python
src/PyDS/Queue/Deque.py
AoWangPhilly/PyDS
d79f92d0d2e7c005ebb8fa9f631d5f01e590625e
[ "MIT" ]
null
null
null
src/PyDS/Queue/Deque.py
AoWangPhilly/PyDS
d79f92d0d2e7c005ebb8fa9f631d5f01e590625e
[ "MIT" ]
null
null
null
src/PyDS/Queue/Deque.py
AoWangPhilly/PyDS
d79f92d0d2e7c005ebb8fa9f631d5f01e590625e
[ "MIT" ]
null
null
null
class Deque: def add_first(self, value): ... def add_last(self, value): ... def delete_first(self): ... def delete_last(self): ... def first(self): ... def last(self): ... def is_empty(self): ... def __len__(self): ... def __str__(self): ...
12.785714
31
0.430168
357
0.997207
0
0
0
0
0
0
0
0
b9c81413c2bd63d72d0731352d31911ef52240f6
480
py
Python
forum/main.py
asmaasalih/my_project
89183d7a2578fa302e94ea29570ab527e9ca47b5
[ "MIT" ]
1
2018-03-21T07:51:36.000Z
2018-03-21T07:51:36.000Z
forum/main.py
asmaasalih/my_project
89183d7a2578fa302e94ea29570ab527e9ca47b5
[ "MIT" ]
null
null
null
forum/main.py
asmaasalih/my_project
89183d7a2578fa302e94ea29570ab527e9ca47b5
[ "MIT" ]
null
null
null
import models import stores member1 =models.Member("ahmed",33) member2 =models.Member("mohamed",30) post1=models.Post("Post1", "Content1") post2= models.Post("Post2", "Content2") post3= models.Post("Post3", "Content3") #member store member_store=stores.MemberStore() member_store.add(member1) member_store.add(member2) print (member_store.get_all()) post_store=stores.PostStore() post_store.add(post1) post_store.add(post2) post_store.add(post3) print (post_store.get_all())
20.869565
39
0.772917
0
0
0
0
0
0
0
0
80
0.166667
b9c89d9ad7d4587730637df2e5c8576e03c43ad8
3,115
py
Python
shellfind.py
bhavyanshu/Shell-Finder
308b3ba7f1a53b8a6cc738d69c01f4b7108d0860
[ "Apache-2.0" ]
4
2016-06-15T22:08:29.000Z
2019-10-16T13:12:51.000Z
shellfind.py
kaitolegion/Shell-Finder
308b3ba7f1a53b8a6cc738d69c01f4b7108d0860
[ "Apache-2.0" ]
null
null
null
shellfind.py
kaitolegion/Shell-Finder
308b3ba7f1a53b8a6cc738d69c01f4b7108d0860
[ "Apache-2.0" ]
7
2015-07-08T22:21:52.000Z
2021-05-31T14:05:47.000Z
#!/usr/bin/env python ''' Author : Bhavyanshu Parasher Email : [email protected] Description : shellfind.py is a Python command line utility which lets you look for shells on a site that the hacker must have uploaded. It considers all the shells available and tries all possibilities via dictionary match. ''' import socket import sys import httplib from urlparse import urlparse import time as t import urllib2 from urllib2 import Request, urlopen, URLError negative = '\033[91m' positive = '\033[32m' wait = '\033[95m' final = '\033[93m' total_scanned_global=0 found_scanned_global=0 def OpenLog(log_file_name): try: f = open(log_file_name, 'r') return f.read() f.close() except IOError: return "File" + log_file_name + "does not exist." def main(): socket.setdefaulttimeout(10) print wait+"\n## ------ Welcome to Shell Finder Utility - Developed by Bhavyanshu Parasher (http://bhavyanshu.github.io) | Apache License V2.0 | Project Source (https://github.com/bhavyanshu/Shell-Finder) ------ ##" website_url = raw_input("\n\nEnter URL to scan ([eg, http://sitename.com or https://sitename.com/subdir ] | Do not add slash at the end of URL) : ") parse_url=urlparse(website_url) log_file_name = "LOG/"+parse_url.netloc+".log" global total_scanned_global global found_scanned_global try: try: create=open(log_file_name,"w") except: print negative+"\nError generating log file. Please check directory access permissions." print wait+"\nCreating a persistent connection to site "+website_url conn = urllib2.Request(website_url) urllib2.urlopen(website_url) print positive+"Connected! Begining to scan for shells.." except (urllib2.HTTPError) as Exit: print negative+"\nEither the server is down or you are not connected to the internet." exit() try: dictionary = open("dictionary","r") except(IOError): print negative+"Dictionary file not found_scanned_global. Please download the latest dictionary from github link" exit() keywords = dictionary.readlines() for keys in keywords: keys=keys.replace("\n","") #To replace newline with empty New_URL = website_url+"/"+keys print wait+">>>> "+New_URL req=Request(New_URL) try: response = urlopen(req) except URLError, e: if hasattr(e,'reason'): print negative+"Not found" total_scanned_global = total_scanned_global+1 elif hasattr(e,'code'): print negative+"Not found " total_scanned_global = total_scanned_global+1 else: try: log_file=open(log_file_name,"a+") #Appending to it except(IOError): print negative+"Failed to create log file. Check dir permissions." found_scanned_url=New_URL print positive+"Possible shell found at ",found_scanned_url log_file.writelines(found_scanned_url+"\n") found_scanned_global=found_scanned_global+1 total_scanned_global=total_scanned_global+1 log_file.close() print "\nTotal tries : ", total_scanned_global print positive+"\nPossible shells: ",found_scanned_global print final+"\nFollowing are the links to possible shells " print OpenLog(log_file_name) if __name__ == '__main__': main()
35
224
0.742857
0
0
0
0
0
0
0
0
1,351
0.433708
b9c964b752a9622a17123202e7aae50d1718a48a
1,345
py
Python
question3.py
nosisky/algo-solution
a9276f73ba63b1a0965c194885aea6cadfab0e0b
[ "MIT" ]
1
2019-08-14T12:32:49.000Z
2019-08-14T12:32:49.000Z
question3.py
nosisky/algo-solution
a9276f73ba63b1a0965c194885aea6cadfab0e0b
[ "MIT" ]
null
null
null
question3.py
nosisky/algo-solution
a9276f73ba63b1a0965c194885aea6cadfab0e0b
[ "MIT" ]
null
null
null
# A string S consisting of N characters is considered to be properly nested if any of the following conditions is true: # S is empty; # S has the form "(U)" or "[U]" or "{U}" where U is a properly nested string; S has the form "VW" where V and W are properly nested strings. # For example, the string "{[()()]}" is properly nested but "([)()]" is not. # Write a function: # int solution(char *S); # that, given a string S consisting of N characters, returns 1 if S is properly nested and 0 otherwise. # For example, given S = "{[()()]}", the function should return 1 and given S = "([)()]", the function should return 0, as explained above. # Assume that: # N is an integer within the range [0..200,000]; # string S consists only of the following characters: "(", "{", "[", "]", "}" and/or ")". Complexity: # expected worst-case time complexity is O(N); # expected worst-case space complexity is O(N) (not counting the storage required for input arguments). def solution(s): sets = dict(zip('({[', ')}]')) if(not isinstance(s, str)): return "Invalid input" collector = [] for bracket in s: if(bracket in sets): collector.append(sets[bracket]) elif bracket not in(sets.values()): return "Invalid input" elif (bracket != collector.pop()): return False return not collector print(solution("()[]{}"))
42.03125
140
0.66171
0
0
0
0
0
0
0
0
993
0.73829
b9ca4ff833bf2ee267f7f1b8ecf69069cd8c4b31
1,996
py
Python
Teil_27_Game_of_Life_3d.py
chrMenzel/A-beautiful-code-in-Python
92ee43c1fb03c299384d4de8bebb590c5ba1b623
[ "MIT" ]
50
2018-12-23T15:46:16.000Z
2022-03-28T15:49:59.000Z
Teil_27_Game_of_Life_3d.py
chrMenzel/A-beautiful-code-in-Python
92ee43c1fb03c299384d4de8bebb590c5ba1b623
[ "MIT" ]
9
2018-12-03T10:31:29.000Z
2022-01-20T14:41:33.000Z
Teil_27_Game_of_Life_3d.py
chrMenzel/A-beautiful-code-in-Python
92ee43c1fb03c299384d4de8bebb590c5ba1b623
[ "MIT" ]
69
2019-02-02T11:59:09.000Z
2022-03-28T15:54:28.000Z
import bpy import random as rnd from collections import Counter import itertools as iter feld_von, feld_bis = -4, 4 spielfeld_von, spielfeld_bis = feld_von-6, feld_bis+6 anz = int((feld_bis-feld_von)**3*.3) spielfeld = {(rnd.randint(feld_von, feld_bis), rnd.randint( feld_von, feld_bis), rnd.randint(feld_von, feld_bis)) for _ in range(anz)} animate_frame = 8 def nachbarn(pos): for x,y,z in iter.product(range(-1,2), repeat = 3): if z == y == x == 0: continue yield pos[0]+x, pos[1]+y, pos[2]+z def nächsteGeneration(spielfeld): nachb = Counter([p for pos in spielfeld for p in nachbarn(pos)]) return {pos for pos, anz in nachb.items() if anz == 6 or (anz in (5, 6, 7, 8) and pos in spielfeld)} def scale_rotate(ob, scale, rot, fr): ob.scale = (scale, scale, scale) ob.rotation_euler.rotate_axis("Z", rot) ob.keyframe_insert(data_path='rotation_euler', frame=fr) ob.keyframe_insert(data_path='scale', frame=fr) bpy.ops.mesh.primitive_cube_add(size=0.001, location=(0, 0, 0)) orig_cube = bpy.context.active_object n = "cube" m = orig_cube.data.copy() cubes = {} for x,y,z in iter.product(range(spielfeld_von,spielfeld_bis), repeat = 3): o = bpy.data.objects.new(n, m) o.location = (x, y, z) cubes[x, y, z] = o bpy.context.collection.objects.link(o) o.select_set(False) for i in range(200): print(f'Durchlauf No. {i}, Anz. Zellen = {len(spielfeld)}') spielfeld2 = nächsteGeneration(spielfeld) dead = spielfeld - spielfeld2 new = spielfeld2 - spielfeld spielfeld = spielfeld2 if not new and not dead: break for zelle in new | dead: if zelle not in cubes: continue ob = cubes[zelle] if zelle in new: scale_rotate(ob, 0.001, -3.141/2, (i-1)*animate_frame) scale_rotate(ob, 750, 3.141/2, i * animate_frame) else: scale_rotate(ob, 750, 3.141/2, (i-1) * animate_frame) scale_rotate(ob, 0.001, -3.141/2, i * animate_frame) if not spielfeld: break bpy.context.scene.frame_current = 1
28.927536
102
0.67986
0
0
145
0.072573
0
0
0
0
84
0.042042
b9ca98991068e30844d7bcc8e336f70de5eef5a9
1,824
py
Python
power_perceiver/xr_batch_processor/reduce_num_pv_systems.py
openclimatefix/power_perceiver
bafcdfaf6abf42fbab09da641479f74709ddd395
[ "MIT" ]
null
null
null
power_perceiver/xr_batch_processor/reduce_num_pv_systems.py
openclimatefix/power_perceiver
bafcdfaf6abf42fbab09da641479f74709ddd395
[ "MIT" ]
33
2022-02-16T07:51:41.000Z
2022-03-31T11:24:11.000Z
power_perceiver/xr_batch_processor/reduce_num_pv_systems.py
openclimatefix/power_perceiver
bafcdfaf6abf42fbab09da641479f74709ddd395
[ "MIT" ]
null
null
null
from dataclasses import dataclass import numpy as np import xarray as xr from power_perceiver.load_prepared_batches.data_sources import PV from power_perceiver.load_prepared_batches.data_sources.prepared_data_source import XarrayBatch @dataclass class ReduceNumPVSystems: """Reduce the number of PV systems per example to `requested_num_pv_systems`. Randomly select PV systems for each example. If there are less PV systems available than requested, then randomly sample with duplicates allowed. This is implemented as an xr_batch_processor so it can run after SelectPVSystemsNearCenterOfImage. """ requested_num_pv_systems: int def __post_init__(self): self.rng = np.random.default_rng() # Seeded by seed_rngs worker_init_function def __call__(self, xr_batch: XarrayBatch) -> XarrayBatch: pv_batch = xr_batch[PV] num_examples = len(pv_batch.example) selection = np.zeros(shape=(num_examples, self.requested_num_pv_systems), dtype=np.int32) for example_i in range(num_examples): pv_mask_for_example = pv_batch.pv_mask.isel(example=example_i).values all_indicies = np.nonzero(pv_mask_for_example)[0] # Only allow a PV system to be chosen multiple times for this example if there are # less available PV systems than requested PV systems. replace = len(all_indicies) < self.requested_num_pv_systems chosen_indicies = self.rng.choice( all_indicies, size=self.requested_num_pv_systems, replace=replace ) selection[example_i] = chosen_indicies selection = xr.DataArray(selection, dims=("example", "pv_system")) pv_batch = pv_batch.isel(pv_system=selection) xr_batch[PV] = pv_batch return xr_batch
39.652174
97
0.721491
1,573
0.86239
0
0
1,584
0.868421
0
0
546
0.299342
b9cc65aafe29eb9820f902e036880e65947e1e2d
857
py
Python
HelloWorld_python/log/demo_log_3.py
wang153723482/HelloWorld_my
b8642ad9742f95cfebafc61f25b00e917485e50c
[ "Apache-2.0" ]
null
null
null
HelloWorld_python/log/demo_log_3.py
wang153723482/HelloWorld_my
b8642ad9742f95cfebafc61f25b00e917485e50c
[ "Apache-2.0" ]
null
null
null
HelloWorld_python/log/demo_log_3.py
wang153723482/HelloWorld_my
b8642ad9742f95cfebafc61f25b00e917485e50c
[ "Apache-2.0" ]
null
null
null
#encoding=utf8 # 按天生成文件 import logging import time from logging.handlers import TimedRotatingFileHandler #---------------------------------------------------------------------- if __name__ == "__main__": logFilePath = "timed_test.log" logger = logging.getLogger("YouLoggerName") logger.setLevel(logging.INFO) handler = TimedRotatingFileHandler(logFilePath, when="d", interval=1, backupCount=7) formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') handler.setFormatter(formatter) handler.setLevel(logging.INFO) logger.addHandler(handler) for i in range(6): logger.info("This is a info!") logger.debug("This is a debug!") # time.sleep(61)
29.551724
89
0.536756
0
0
0
0
0
0
0
0
258
0.296893
b9cd3a4333e6169069ae770751a00e82db1b741a
736
py
Python
bot_da_os/statemachine/person/person_action.py
Atsocs/bot-da-os
e6d54057f4a3b703f303e9944a39e291ac87c40f
[ "MIT" ]
null
null
null
bot_da_os/statemachine/person/person_action.py
Atsocs/bot-da-os
e6d54057f4a3b703f303e9944a39e291ac87c40f
[ "MIT" ]
null
null
null
bot_da_os/statemachine/person/person_action.py
Atsocs/bot-da-os
e6d54057f4a3b703f303e9944a39e291ac87c40f
[ "MIT" ]
null
null
null
from operator import eq class PersonAction: def __init__(self, action): self.action = action def __str__(self): return self.action def __eq__(self, other): return eq(self.action, other.action) # Necessary when __cmp__ or __eq__ is defined # in order to make this class usable as a # dictionary key: def __hash__(self): return hash(self.action) # Static fields; an enumeration of instances: PersonAction.compliment = PersonAction("person compliments") PersonAction.informing = PersonAction("person gives information about the service order") PersonAction.query = PersonAction("person wants to know about his/her order") PersonAction.angry = PersonAction("person is pissed off")
29.44
89
0.72962
374
0.508152
0
0
0
0
0
0
282
0.383152
b9cd7df81437d8aa9311f772f8fd75744e9a395a
5,108
py
Python
MyServer.py
bisw1jit/MyServer
cbd7bc4015482ce8f24314894148f7e20ef66b21
[ "MIT" ]
3
2019-11-09T17:29:55.000Z
2021-12-19T21:52:12.000Z
MyServer.py
bisw1jit/MyServer
cbd7bc4015482ce8f24314894148f7e20ef66b21
[ "MIT" ]
null
null
null
MyServer.py
bisw1jit/MyServer
cbd7bc4015482ce8f24314894148f7e20ef66b21
[ "MIT" ]
null
null
null
# Tool Name :- MyServer # Author :- LordReaper # Date :- 13/11/2018 - 9/11/2019 # Powered By :- H1ckPro Software's import sys import os from time import sleep from core.system import * if len(sys.argv)>1: pass else: print ("error : invalid arguments !!") print ("use : myserver --help for more information") sys.exit() if sys.argv[1]=="-s": if len(sys.argv)==2: if system=="ubuntu": os.system("sudo python3 core/s.py "+sys.argv[1]) else: os.system("python3 core/s.py "+sys.argv[1]) elif len(sys.argv)==3: if sys.argv[2]=="apache": if system=="ubuntu": os.system("sudo python3 core/server.py -apa") else: os.system("python3 core/server.py -apa") else: print ("error : invalid arguments !!") print ("use : myserver --help for more information") elif len(sys.argv)==6: if sys.argv[2]=="-php": if system=="ubuntu": os.system("sudo python3 core/server.py -php "+sys.argv[3]+" "+sys.argv[4]+" "+sys.argv[5]) else: os.system("python3 core/server.py -php "+sys.argv[3]+" "+sys.argv[4]+" "+sys.argv[5]) elif sys.argv[2]=="-py": if system=="ubuntu": os.system("sudo python3 core/server.py -py "+sys.argv[3]+" "+sys.argv[4]+" "+sys.argv[5]) else: os.system("python3 core/server.py -py "+sys.argv[3]+" "+sys.argv[4]+" "+sys.argv[5]) elif sys.argv[2]=="-ng": if system=="ubuntu": os.system("sudo python3 core/server.py -ng "+sys.argv[3]+" "+sys.argv[4]+" "+sys.argv[5]) else: os.system("python3 core/server.py -ng "+sys.argv[3]+" "+sys.argv[4]+" "+sys.argv[5]) else: print ("error : invalid arguments !!") print ("use : myserver --help for more information") elif len(sys.argv)==5: if system=="ubuntu": os.system("sudo python3 core/server.py -d "+sys.argv[2]+" "+sys.argv[3]+" "+sys.argv[4]) else: os.system("python3 core/server.py -d "+sys.argv[2]+" "+sys.argv[3]+" "+sys.argv[4]) else: print ("error : invalid arguments !!") print ("use : myserver --help for more information") elif sys.argv[1]=="-h": if len(sys.argv)==2: if system=="ubuntu": os.system("sudo python3 core/s.py "+sys.argv[1]) else: os.system("python3 core/s.py "+sys.argv[1]) elif len(sys.argv)==5: if system=="ubuntu": os.system("sudo python3 core/host.py "+sys.argv[2]+" "+sys.argv[3]+" "+sys.argv[4]) else: os.system("python3 core/host.py "+sys.argv[2]+" "+sys.argv[3]+" "+sys.argv[4]) else: print ("error : invalid arguments") print ("use : myserver --help for more information") elif sys.argv[1]=="-db": if len(sys.argv)==3: if sys.argv[2]=="start": if system=="ubuntu": os.system("sudo python3 core/mysql.py "+sys.argv[2]) else: os.system("python3 core/mysql.py "+sys.argv[2]) elif sys.argv[2]=="stop": if system=="ubuntu": os.system("sudo python3 core/mysql.py "+sys.argv[2]) else: os.system("python3 core/mysql.py "+sys.argv[2]) else: print ("error : invalid arguments !!") print ("use : myserver --help for more information") else: print ("error : invalid arguments !!") print ("use : myserver --help for more information") elif sys.argv[1]=="rm": if len(sys.argv)==3: if sys.argv[2]=="-T" or sys.argv[2]=="-t": if system=="ubuntu": os.system("sudo python3 core/un.py") else: os.system("python3 core/un.py") else: print ("error : invalid arguments") print ("use : myserver --help for more information") else: print ("error : invalid arguments") print ("use : myserver --help for more information") elif sys.argv[1]=="update": if system=="ubuntu": os.system("sudo python3 core/upd.py") else: os.system("python3 core/upd.py") elif sys.argv[1]=="start": if system=="ubuntu": os.system("sudo python3 .MyServer.py") else: os.system("python3 .MyServer.py") elif sys.argv[1]=="--help" or sys.argv[1]=="-help" or sys.argv[1]=="help": print ("") print ("Usage: myserver [command]... [arguments]...") print ("") print (" Commands:") print (" -s <hostname> <port> <path> to start default localhost server.") print (" -s -ng <hostname> <port> <path> to start php localhost server.") print (" -s -php <hostname> <port> <path> to start php localhost server.") print (" -s -py <hostname> <port> <path> to start python localhost server.") print (" -h <hostname> <localhost_port> <port> to access localhost server on internet.") print (" -db [start/stop] to start/stop MySQL database server.") print (" -s apache to start apache web server.") print (" update update MyServer.") print (" rm -t uninstall MyServer.") print (" start start MyServer menu.") print ("") else: print ("error : invalid arguments !!") print ("use : myserver --help for more information")
34.748299
98
0.576155
0
0
0
0
0
0
0
0
2,584
0.505873
b9cda5cbb2749647d6a78abf80d9eb5c24205425
341
py
Python
tests/test_gen_epub.py
ffreemt/tmx2epub
55a59cb2a9b7f42031a65f64c29e5c43fdb487ea
[ "MIT" ]
null
null
null
tests/test_gen_epub.py
ffreemt/tmx2epub
55a59cb2a9b7f42031a65f64c29e5c43fdb487ea
[ "MIT" ]
null
null
null
tests/test_gen_epub.py
ffreemt/tmx2epub
55a59cb2a9b7f42031a65f64c29e5c43fdb487ea
[ "MIT" ]
null
null
null
""" test gen_epub. """ from tmx2epub.gen_epub import gen_epub def test_gen_epub2(): """ test_gen_epub2. """ from pathlib import Path infile = r"tests\2.tmx" stem = Path(infile).absolute().stem outfile = f"{Path(infile).absolute().parent / stem}.epub" assert gen_epub(infile, debug=True) == outfile # assert 0
22.733333
61
0.653959
0
0
0
0
0
0
0
0
116
0.340176
b9cde2fbd07898c518510cadb194827f6566c927
716
py
Python
pub_sub/python/http/checkout/app.py
amulyavarote/quickstarts
c21a8f58d515b28eaa8a3680388fa06995c2331b
[ "Apache-2.0" ]
null
null
null
pub_sub/python/http/checkout/app.py
amulyavarote/quickstarts
c21a8f58d515b28eaa8a3680388fa06995c2331b
[ "Apache-2.0" ]
null
null
null
pub_sub/python/http/checkout/app.py
amulyavarote/quickstarts
c21a8f58d515b28eaa8a3680388fa06995c2331b
[ "Apache-2.0" ]
null
null
null
import json import time import random import logging import requests import os logging.basicConfig(level=logging.INFO) base_url = os.getenv('BASE_URL', 'http://localhost') + ':' + os.getenv( 'DAPR_HTTP_PORT', '3500') PUBSUB_NAME = 'order_pub_sub' TOPIC = 'orders' logging.info('Publishing to baseURL: %s, Pubsub Name: %s, Topic: %s' % ( base_url, PUBSUB_NAME, TOPIC)) for i in range(1, 10): order = {'orderId': i} # Publish an event/message using Dapr PubSub via HTTP Post result = requests.post( url='%s/v1.0/publish/%s/%s' % (base_url, PUBSUB_NAME, TOPIC), json=order ) logging.info('Published data: ' + json.dumps(order)) time.sleep(1)
25.571429
72
0.642458
0
0
0
0
0
0
0
0
239
0.333799
b9ce404499c062b33e8623b446d27dfebe6f033f
52,312
py
Python
jj.py
smailedge/pro
f86347d4368bc97aa860b37caa9ba10e84a93738
[ "Unlicense" ]
1
2019-08-14T04:17:06.000Z
2019-08-14T04:17:06.000Z
jj.py
smailedge/pro
f86347d4368bc97aa860b37caa9ba10e84a93738
[ "Unlicense" ]
null
null
null
jj.py
smailedge/pro
f86347d4368bc97aa860b37caa9ba10e84a93738
[ "Unlicense" ]
7
2018-10-27T11:58:45.000Z
2021-02-11T19:45:30.000Z
# -*- coding: utf-8 -*- from linepy import * from datetime import datetime from time import sleep from humanfriendly import format_timespan, format_size, format_number, format_length import time, random, sys, json, codecs, threading, glob, re, string, os, requests, subprocess, six, ast, pytz, urllib, urllib.parse #==============================================================================# botStart = time.time() cl = LINE() #cl = LINE("TOKEN KAMU") #cl = LINE("Email","Password") cl.log("Auth Token : " + str(cl.authToken)) channelToken = cl.getChannelResult() cl.log("Channel Token : " + str(channelToken)) clMID = cl.profile.mid clProfile = cl.getProfile() lineSettings = cl.getSettings() oepoll = OEPoll(cl) #==============================================================================# readOpen = codecs.open("read.json","r","utf-8") settingsOpen = codecs.open("temp.json","r","utf-8") read = json.load(readOpen) settings = json.load(settingsOpen) myProfile = { "displayName": "", "statusMessage": "", "pictureStatus": "" } msg_dict = {} bl = [""] myProfile["displayName"] = clProfile.displayName myProfile["statusMessage"] = clProfile.statusMessage myProfile["pictureStatus"] = clProfile.pictureStatus #==============================================================================# def restartBot(): print ("[ INFO ] BOT RESETTED") backupData() python = sys.executable os.execl(python, python, *sys.argv) def backupData(): try: backup = settings f = codecs.open('temp.json','w','utf-8') json.dump(backup, f, sort_keys=True, indent=4, ensure_ascii=False) backup = read f = codecs.open('read.json','w','utf-8') json.dump(backup, f, sort_keys=True, indent=4, ensure_ascii=False) return True except Exception as error: logError(error) return False def logError(text): cl.log("[ ERROR ] " + str(text)) time_ = datetime.now() with open("errorLog.txt","a") as error: error.write("\n[%s] %s" % (str(time), text)) def sendMessageWithMention(to, mid): try: aa = '{"S":"0","E":"3","M":'+json.dumps(mid)+'}' text_ = '@x ' cl.sendMessage(to, text_, contentMetadata={'MENTION':'{"MENTIONEES":['+aa+']}'}, contentType=0) except Exception as error: logError(error) def helpmessage(): helpMessage = """╔═════════════ ╠♥ ✿✿✿ 十香の特製Bot ✿✿✿ ♥ ╠SR 設定已讀點 ╠LR 查看誰已讀 ╠Nk @ 標註踢人 ╠Nk 全部再見 ╠══✪〘 其他功能略 〙✪═══ """ return helpMessage wait = { "share":False, "sender" :{}, } admin =['ud5ff1dff426cf9e3030c7ac2a61512f0','ua10c2ad470b4b6e972954e1140ad1891',clMID] owners = ["ua10c2ad470b4b6e972954e1140ad1891","ud5ff1dff426cf9e3030c7ac2a61512f0"] #if clMID not in owners: # python = sys.executable # os.execl(python, python, *sys.argv) #==============================================================================# def lineBot(op): try: if op.type == 0: print ("[ 0 ] END OF OPERATION") return if op.type == 5: print ("[ 5 ] NOTIFIED ADD CONTACT") if settings["autoAdd"] == True: cl.sendMessage(op.param1, "感謝您加入本帳為好友w".format(str(cl.getContact(op.param1).displayName))) if op.type == 13: print ("[ 13 ] NOTIFIED INVITE GROUP") group = cl.getGroup(op.param1) if settings["autoJoin"] == True: cl.acceptGroupInvitation(op.param1) if op.type == 19: if op.param2 not in owners: if op.param2 in owners: pass elif wait["protect"] == True: settings["blacklist"][op.param2] = True cl.kickoutFromGroup(op.param1,[op.param2]) else: cl.sendMessage(op.param1,"") else: cl.sendMessage(op.param1,"") if op.type == 24: print ("[ 24 ] NOTIFIED LEAVE ROOM") if settings["autoLeave"] == True: cl.leaveRoom(op.param1) if op.type == 25 or op.type == 26: K0 = admin msg = op.message if wait["share"] == True: K0 = msg._from else: K0 = admin # if op.type == 25: # to = msg.to # receiver = str(to.displayName) # print ("send" + receiver + str(text.lower())) # if op.type == 26: # to = msg._from # sender = str(to.displayName) # print ("receiver" + sender + str(text.lower())) if op.type == 26 or op.type == 25: print ("[ 25 ] SEND MESSAGE") msg = op.message text = msg.text msg_id = msg.id receiver = msg.to sender = msg._from if msg.toType == 0: if sender != cl.profile.mid: to = sender else: to = receiver else: to = receiver if msg.contentType == 0: if text is None: return #==============================================================================# if sender in K0: if text.lower() == 'help': helpMessage = helpmessage() cl.sendMessage(to, str(helpMessage)) cl.sendContact(to,"u0a59c278b1529476ddb210cb5e827ffc") cl.sendContact(to,"ufb30e2203f44bc7b72e28b09a88c9bbd") #==============================================================================# elif text.lower() == 'speed': start = time.time() cl.sendMessage(to, "計算中...") elapsed_time = time.time() - start cl.sendMessage(to,format(str(elapsed_time))) elif text.lower() == 'restart': cl.sendMessage(to, "重新啟動中...") time.sleep(5) cl.sendMessage(to, "重啟成功,請重新登入") restartBot() elif text.lower() == 'runtime': timeNow = time.time() runtime = timeNow - botStart runtime = format_timespan(runtime) cl.sendMessage(to, "系統已運作 {}".format(str(runtime))) elif text.lower() == 'about': try: arr = [] owner = "ua10c2ad470b4b6e972954e1140ad1891" creator = cl.getContact(owner) contact = cl.getContact(clMID) grouplist = cl.getGroupIdsJoined() contactlist = cl.getAllContactIds() blockedlist = cl.getBlockedContactIds() ret_ = "╔══[ 關於使用者 ]" ret_ += "\n╠ 使用者名稱 : {}".format(contact.displayName) ret_ += "\n╠ 群組數 : {}".format(str(len(grouplist))) ret_ += "\n╠ 好友數 : {}".format(str(len(contactlist))) ret_ += "\n╠ 已封鎖 : {}".format(str(len(blockedlist))) ret_ += "\n╠══[ 關於本bot ]" ret_ += "\n╠ 版本 : 最新" ret_ += "\n╠ 製作者 : {}".format(creator.displayName) ret_ += "\n╚══[ 感謝您的使用 ]" cl.sendMessage(to, str(ret_)) except Exception as e: cl.sendMessage(msg.to, str(e)) #==============================================================================# elif text.lower() == 'set': try: ret_ = "╔══[ 狀態 ]" if settings["autoAdd"] == True: ret_ += "\n╠ Auto Add ✅" else: ret_ += "\n╠ Auto Add ❌" if settings["autoJoin"] == True: ret_ += "\n╠ Auto Join ✅" else: ret_ += "\n╠ Auto Join ❌" if settings["autoLeave"] == True: ret_ += "\n╠ Auto Leave ✅" else: ret_ += "\n╠ Auto Leave ❌" if settings["autoRead"] == True: ret_ += "\n╠ Auto Read ✅" else: ret_ += "\n╠ Auto Read ❌" if settings["reread"] ==True: ret_+="\n╠ Reread ✅" else: ret_ += "\n╠ Reread ❌" ret_ += "\n╚══[ Finish ]" cl.sendMessage(to, str(ret_)) except Exception as e: cl.sendMessage(msg.to, str(e)) elif text.lower() == 'autoadd on': settings["autoAdd"] = True cl.sendMessage(to, "Auto Add on success") elif text.lower() == 'autoadd off': settings["autoAdd"] = False cl.sendMessage(to, "Auto Add off success") elif text.lower() == 'autojoin on': settings["autoJoin"] = True cl.sendMessage(to, "Auto Join on success") elif text.lower() == 'autojoin off': settings["autoJoin"] = False cl.sendMessage(to, "Auto Join off success") elif text.lower() == 'autoleave on': settings["autoLeave"] = True cl.sendMessage(to, "Auto Leave on success") elif text.lower() == 'autojoin off': settings["autoLeave"] = False cl.sendMessage(to, "Auto Leave off success") elif text.lower() == 'autoread on': settings["autoRead"] = True cl.sendMessage(to, "Auto Read on success") elif text.lower() == 'autoread off': settings["autoRead"] = False cl.sendMessage(to, "Auto Read off success") elif text.lower() == 'checksticker on': settings["checkSticker"] = True cl.sendMessage(to, "Berhasil mengaktifkan Check Details Sticker") elif text.lower() == 'checksticker off': settings["checkSticker"] = False cl.sendMessage(to, "Berhasil menonaktifkan Check Details Sticker") elif text.lower() == 'detectmention on': settings["datectMention"] = True cl.sendMessage(to, "Berhasil mengaktifkan Detect Mention") elif text.lower() == 'detectmention off': settings["datectMention"] = False cl.sendMessage(to, "Berhasil menonaktifkan Detect Mention") elif text.lower() == 'reread on': settings["reread"] = True cl.sendMessage(to,"reread on success") elif text.lower() == 'reread off': settings["reread"] = False cl.sendMessage(to,"reread off success") elif text.lower() == 'protect on': settings["protect"] = True cl.sendMessage(to, "Protect on success") elif text.lower() == 'protect off': settings["protect"] = False cl.sendMessage(to, "Protect off success") elif text.lower() == 'share on': wait["share"] = True cl.sendMessage(to, "已開啟分享") elif text.lower() == 'share off': wait["share"] = False cl.sendMessage(to, "已關閉分享") #==============================================================================# elif text.lower() == 'admin ': MENTION =eval(msg.contentMetadata['MENTION']) inkey =MENTION['MENTIONEES'][0]['M'] admin.append(str(inkey)) cl.sendMessage(to,"已新增權限") elif text.lower() == 'demin ': MENTION =eval(msg.contentMetadata['MENTION']) inkey =MENTION['MENTIONEES'][0]['M'] admin.remove(str(inkey)) cl.sendMessage(to,"已停止權限") elif text.lower() == 'adminlist': if admin == []: cl.sendMessage(to,"無擁有權限者!") else: mc = "╔══[ Admin List ]" for mi_d in admin: mc += "\n╠ "+cl.getContact(mi_d).displayName cl.sendMessage(to,mc + "\n╚══[ Finish ]") #==============================================================================# elif text.lower() == 'me': sendMessageWithMention(to, clMID) cl.sendContact(to, clMID) elif text.lower() == 'mymid': cl.sendMessage(msg.to,"[MID]\n" + clMID) elif text.lower() == 'myname': me = cl.getContact(clMID) cl.sendMessage(msg.to,"[Name]\n" + me.displayName) elif text.lower() == 'mytoken': me = cl.getContact(clMID) cl.sendMessage(msg.to,"[StatusMessage]\n" + me.statusMessage) elif text.lower() == 'mypicture': me = cl.getContact(clMID) cl.sendImageWithURL(msg.to,"http://dl.profile.line-cdn.net/" + me.pictureStatus) elif text.lower() == 'myvideoprofile': me = cl.getContact(clMID) cl.sendVideoWithURL(msg.to,"http://dl.profile.line-cdn.net/" + me.pictureStatus + "/vp") elif text.lower() == 'mycover': me = cl.getContact(clMID) cover = cl.getProfileCoverURL(clMID) cl.sendImageWithURL(msg.to, cover) elif msg.text.lower().startswith("contact "): if 'MENTION' in msg.contentMetadata.keys()!= None: names = re.findall(r'@(\w+)', text) mention = ast.literal_eval(msg.contentMetadata['MENTION']) mentionees = mention['MENTIONEES'] lists = [] for mention in mentionees: if mention["M"] not in lists: lists.append(mention["M"]) for ls in lists: contact = cl.getContact(ls) mi_d = contact.mid cl.sendContact(msg.to, mi_d) elif msg.text.lower().startswith("mid "): if 'MENTION' in msg.contentMetadata.keys()!= None: names = re.findall(r'@(\w+)', text) mention = ast.literal_eval(msg.contentMetadata['MENTION']) mentionees = mention['MENTIONEES'] lists = [] for mention in mentionees: if mention["M"] not in lists: lists.append(mention["M"]) ret_ = "[ Mid User ]" for ls in lists: ret_ += "\n" + ls cl.sendMessage(msg.to, str(ret_)) elif msg.text.lower().startswith("name "): if 'MENTION' in msg.contentMetadata.keys()!= None: names = re.findall(r'@(\w+)', text) mention = ast.literal_eval(msg.contentMetadata['MENTION']) mentionees = mention['MENTIONEES'] lists = [] for mention in mentionees: if mention["M"] not in lists: lists.append(mention["M"]) for ls in lists: contact = cl.getContact(ls) cl.sendMessage(msg.to, "[ 名字 ]\n" + contact.displayName) for ls in lists: contact = cl.getContact(ls) cl.sendMessage(msg.to, "[ 個簽 ]\n" + contact.statusMessage) for ls in lists: path = "http://dl.profile.cl.naver.jp/" + cl.getContact(ls).pictureStatus cl.sendImageWithURL(msg.to, str(path)) for ls in lists: path = cl.getProfileCoverURL(ls) pmath = "http://dl.profile.cl.naver.jp/" + cl.getContact(ls).pictureStatus cl.sendImageWithURL(msg.to, path) try: key = eval(msg.contentMetadata["MENTION"]) u = key["MENTIONEES"][0]["M"] cname = cl.getContact(u).displayName cmid = cl.getContact(u).mid cstatus = cl.getContact(u).statusMessage cpic = cl.getContact(u).picturePath cl.sendMessage(receiver, 'Nama : '+cname+'\nMID : '+cmid+'\nStatus Msg : '+cstatus+'\nPicture : http://dl.profile.line.naver.jp'+cpic) cl.sendMessage(receiver, None, contentMetadata={'mid': cmid}, contentType=13) if cl.getContact(u).videoProfile != None: cl.sendVideoWithURL(receiver, 'http://dl.profile.line.naver.jp'+cpic+'/vp.small') else: cl.sendImageWithURL(receiver, 'http://dl.profile.line.naver.jp'+cpic) except Exception as e: cl.sendMessage(receiver, str(e)) if line != None: if 'MENTION' in msg.contentMetadata.keys()!= None: names = re.findall(r'@(\w+)', text) mention = ast.literal_eval(msg.contentMetadata['MENTION']) mentionees = mention['MENTIONEES'] lists = [] for mention in mentionees: if mention["M"] not in lists: lists.append(mention["M"]) for ls in lists: path = cl.getProfileCoverURL(ls) cl.sendImageWithURL(msg.to, str(path)) elif msg.text.lower().startswith("cloneprofile "): if 'MENTION' in msg.contentMetadata.keys()!= None: names = re.findall(r'@(\w+)', text) mention = ast.literal_eval(msg.contentMetadata['MENTION']) mentionees = mention['MENTIONEES'] for mention in mentionees: contact = mention["M"] break try: cl.cloneContactProfile(contact) cl.sendMessage(msg.to, "Berhasil clone member tunggu beberapa saat sampai profile berubah") except: cl.sendMessage(msg.to, "Gagal clone member") elif text.lower() == 'restoreprofile': try: clProfile.displayName = str(myProfile["displayName"]) clProfile.statusMessage = str(myProfile["statusMessage"]) clProfile.pictureStatus = str(myProfile["pictureStatus"]) cl.updateProfileAttribute(8, clProfile.pictureStatus) cl.updateProfile(clProfile) cl.sendMessage(msg.to, "Berhasil restore profile tunggu beberapa saat sampai profile berubah") except: cl.sendMessage(msg.to, "Gagal restore profile") #==============================================================================# elif msg.text.lower().startswith("mimicadd "): targets = [] key = eval(msg.contentMetadata["MENTION"]) key["MENTIONEES"][0]["M"] for x in key["MENTIONEES"]: targets.append(x["M"]) for target in targets: try: settings["mimic"]["target"][target] = True cl.sendMessage(msg.to,"已加入模仿名單!") break except: cl.sendMessage(msg.to,"添加失敗 !") break elif msg.text.lower().startswith("mimicdel "): targets = [] key = eval(msg.contentMetadata["MENTION"]) key["MENTIONEES"][0]["M"] for x in key["MENTIONEES"]: targets.append(x["M"]) for target in targets: try: del settings["模仿名單"]["target"][target] cl.sendMessage(msg.to,"刪除成功 !") break except: cl.sendMessage(msg.to,"刪除失敗 !") break elif text.lower() == 'mimiclist': if settings["mimic"]["target"] == {}: cl.sendMessage(msg.to,"未設定模仿目標") else: mc = "╔══[ Mimic List ]" for mi_d in settings["mimic"]["target"]: mc += "\n╠ "+cl.getContact(mi_d).displayName cl.sendMessage(msg.to,mc + "\n╚══[ Finish ]") elif "mimic" in msg.text.lower(): sep = text.split(" ") mic = text.replace(sep[0] + " ","") if mic == "on": if settings["mimic"]["status"] == False: settings["mimic"]["status"] = True cl.sendMessage(msg.to,"Reply Message on") elif mic == "off": if settings["mimic"]["status"] == True: settings["mimic"]["status"] = False cl.sendMessage(msg.to,"Reply Message off") #==============================================================================# elif text.lower() == 'groupcreator': group = cl.getGroup(to) GS = group.creator.mid cl.sendContact(to, GS) elif text.lower() == 'groupid': gid = cl.getGroup(to) cl.sendMessage(to, "[ID Group : ]\n" + gid.id) elif text.lower() == 'grouppicture': group = cl.getGroup(to) path = "http://dl.profile.line-cdn.net/" + group.pictureStatus cl.sendImageWithURL(to, path) elif text.lower() == 'groupname': gid = cl.getGroup(to) cl.sendMessage(to, "[群組名稱 : ]\n" + gid.name) elif text.lower() == 'grouplink': if msg.toType == 2: group = cl.getGroup(to) if group.preventedJoinByTicket == False: ticket = cl.reissueGroupTicket(to) cl.sendMessage(to, "[ Group Ticket ]\nhttps://cl.me/R/ti/g/{}".format(str(ticket))) else: cl.sendMessage(to, "Grouplink未開啟 {}openlink".format(str(settings["keyCommand"]))) elif text.lower() == 'link off': if msg.toType == 2: group = cl.getGroup(to) if group.preventedJoinByTicket == False: cl.sendMessage(to, "群組網址已關") else: group.preventedJoinByTicket = False cl.updateGroup(group) cl.sendMessage(to, "關閉成功") elif text.lower() == 'link on': if msg.toType == 2: group = cl.getGroup(to) if group.preventedJoinByTicket == True: cl.sendMessage(to, "群組網址已開") else: group.preventedJoinByTicket = True cl.updateGroup(group) cl.sendMessage(to, "開啟成功") elif text.lower() == 'groupinfo': group = cl.getGroup(to) try: gCreator = group.creator.displayName except: gCreator = "不明" if group.invitee is None: gPending = "0" else: gPending = str(len(group.invitee)) if group.preventedJoinByTicket == True: gQr = "關閉" gTicket = "無" else: gQr = "開啟" gTicket = "https://cl.me/R/ti/g/{}".format(str(cl.reissueGroupTicket(group.id))) path = "http://dl.profile.line-cdn.net/" + group.pictureStatus ret_ = "╔══[ Group Info ]" ret_ += "\n╠ 群組名稱 : {}".format(str(group.name)) ret_ += "\n╠ 群組 Id : {}".format(group.id) ret_ += "\n╠ 創建者 : {}".format(str(gCreator)) ret_ += "\n╠ 群組人數 : {}".format(str(len(group.members))) ret_ += "\n╠ 邀請中 : {}".format(gPending) ret_ += "\n╠ 網址狀態 : {}".format(gQr) ret_ += "\n╠ 群組網址 : {}".format(gTicket) ret_ += "\n╚══[ Finish ]" cl.sendMessage(to, str(ret_)) cl.sendImageWithURL(to, path) elif text.lower() == 'groupmemberlist': if msg.toType == 2: group = cl.getGroup(to) ret_ = "╔══[ 成員名單 ]" no = 0 + 1 for mem in group.members: ret_ += "\n╠ {}. {}".format(str(no), str(mem.displayName)) no += 1 ret_ += "\n╚══[ 全部成員共 {} 人]".format(str(len(group.members))) cl.sendMessage(to, str(ret_)) elif text.lower() == 'grouplist': groups = cl.groups ret_ = "╔══[ Group List ]" no = 0 + 1 for gid in groups: group = cl.getGroup(gid) ret_ += "\n╠ {}. {} | {}".format(str(no), str(group.name), str(len(group.members))) no += 1 ret_ += "\n╚══[ Total {} Groups ]".format(str(len(groups))) cl.sendMessage(to, str(ret_)) elif msg.text.lower().startswith("nk "): targets = [] key = eval(msg.contentMetadata["MENTION"]) key["MENTIONEES"][0]["M"] for x in key["MENTIONEES"]: targets.append(x["M"]) for target in targets: try: cl.sendMessage(to,"Fuck you") cl.kickoutFromGroup(msg.to,[target]) except: cl.sendMessage(to,"Error") elif msg.text.lower().startswith("ri "): targets = [] key = eval(msg.contentMetadata["MENTION"]) key["MENTIONEES"][0]["M"] for x in key["MENTIONEES"]: targets.append(x["M"]) for target in targets: try: cl.sendMessage(to,"來回機票一張ww") cl.kickoutFromGroup(msg.to,[target]) cl.inviteIntoGroup(to,[target]) except: cl.sendMessage(to,"Error") elif text.lower() == 'nk': if msg.toType == 2: print ("[ 19 ] KICK ALL MEMBER") _name = msg.text.replace("Byeall","") gs = cl.getGroup(msg.to) cl.sendMessage(msg.to,"Sorry guys") targets = [] for g in gs.members: if _name in g.displayName: targets.append(g.mid) if targets == []: cl.sendMessage(msg.to,"Not Found") else: for target in targets: try: cl.kickoutFromGroup(msg.to,[target]) print (msg.to,[g.mid]) except: cl.sendMessage(msg.to,"") elif ("Gn " in msg.text): if msg.toType == 2: X = cl.getGroup(msg.to) X.name = msg.text.replace("Gn ","") cl.updateGroup(X) else: cl.sendMessage(msg.to,"It can't be used besides the group.") elif text.lower() == 'cancel': if msg.toType == 2: group = cl.getGroup(to) gMembMids = [contact.mid for contact in group.invitee] for _mid in gMembMids: cl.cancelGroupInvitation(msg.to,[_mid]) cl.sendMessage(msg.to,"已取消所有邀請!") elif ("Inv " in msg.text): if msg.toType == 2: midd = msg.text.replace("Inv ","") cl.findAndAddContactsByMid(midd) cl.inviteIntoGroup(to,[midd]) #==============================================================================# elif text.lower() == 'tagall': group = cl.getGroup(msg.to) nama = [contact.mid for contact in group.members] k = len(nama)//100 for a in range(k+1): txt = u'' s=0 b=[] for i in group.members[a*100 : (a+1)*100]: b.append({"S":str(s), "E" :str(s+6), "M":i.mid}) s += 7 txt += u'@Alin \n' cl.sendMessage(to, text=txt, contentMetadata={u'MENTION': json.dumps({'MENTIONEES':b})}, contentType=0) cl.sendMessage(to, "Total {} Mention".format(str(len(nama)))) elif text.lower() == 'sr': tz = pytz.timezone("Asia/Jakarta") timeNow = datetime.now(tz=tz) day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"] hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"] bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"] hr = timeNow.strftime("%A") bln = timeNow.strftime("%m") for i in range(len(day)): if hr == day[i]: hasil = hari[i] for k in range(0, len(bulan)): if bln == str(k): bln = bulan[k-1] readTime = hasil + ", " + timeNow.strftime('%d') + " - " + bln + " - " + timeNow.strftime('%Y') + "\nJam : [ " + timeNow.strftime('%H:%M:%S') + " ]" if msg.to in read['readPoint']: try: del read['readPoint'][msg.to] del read['readMember'][msg.to] del read['readTime'][msg.to] except: pass read['readPoint'][msg.to] = msg.id read['readMember'][msg.to] = "" read['readTime'][msg.to] = datetime.now().strftime('%H:%M:%S') read['ROM'][msg.to] = {} with open('read.json', 'w') as fp: json.dump(read, fp, sort_keys=True, indent=4) cl.sendMessage(msg.to,"偵測點已設置") else: try: del read['readPoint'][msg.to] del read['readMember'][msg.to] del read['readTime'][msg.to] except: pass read['readPoint'][msg.to] = msg.id read['readMember'][msg.to] = "" read['readTime'][msg.to] = datetime.now().strftime('%H:%M:%S') read['ROM'][msg.to] = {} with open('read.json', 'w') as fp: json.dump(read, fp, sort_keys=True, indent=4) cl.sendMessage(msg.to, "Set reading point:\n" + readTime) elif text.lower() == 'readcancel': tz = pytz.timezone("Asia/Jakarta") timeNow = datetime.now(tz=tz) day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"] hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"] bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"] hr = timeNow.strftime("%A") bln = timeNow.strftime("%m") for i in range(len(day)): if hr == day[i]: hasil = hari[i] for k in range(0, len(bulan)): if bln == str(k): bln = bulan[k-1] readTime = hasil + ", " + timeNow.strftime('%d') + " - " + bln + " - " + timeNow.strftime('%Y') + "\nJam : [ " + timeNow.strftime('%H:%M:%S') + " ]" if msg.to not in read['readPoint']: cl.sendMessage(msg.to,"偵測點已取消") else: try: del read['readPoint'][msg.to] del read['readMember'][msg.to] del read['readTime'][msg.to] except: pass cl.sendMessage(msg.to, "Delete reading point:\n" + readTime) elif text.lower() == 'resetread': tz = pytz.timezone("Asia/Jakarta") timeNow = datetime.now(tz=tz) day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"] hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"] bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"] hr = timeNow.strftime("%A") bln = timeNow.strftime("%m") for i in range(len(day)): if hr == day[i]: hasil = hari[i] for k in range(0, len(bulan)): if bln == str(k): bln = bulan[k-1] readTime = hasil + ", " + timeNow.strftime('%d') + " - " + bln + " - " + timeNow.strftime('%Y') + "\nJam : [ " + timeNow.strftime('%H:%M:%S') + " ]" if msg.to in read["readPoint"]: try: del read["readPoint"][msg.to] del read["readMember"][msg.to] del read["readTime"][msg.to] except: pass cl.sendMessage(msg.to, "Reset reading point:\n" + readTime) else: cl.sendMessage(msg.to, "偵測點未設置?") elif text.lower() == 'lr': tz = pytz.timezone("Asia/Jakarta") timeNow = datetime.now(tz=tz) day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"] hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"] bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"] hr = timeNow.strftime("%A") bln = timeNow.strftime("%m") for i in range(len(day)): if hr == day[i]: hasil = hari[i] for k in range(0, len(bulan)): if bln == str(k): bln = bulan[k-1] readTime = hasil + ", " + timeNow.strftime('%d') + " - " + bln + " - " + timeNow.strftime('%Y') + "\nJam : [ " + timeNow.strftime('%H:%M:%S') + " ]" if receiver in read['readPoint']: if read["ROM"][receiver].items() == []: cl.sendMessage(receiver,"[ 已讀的人 ]:\nNone") else: chiya = [] for rom in read["ROM"][receiver].items(): chiya.append(rom[1]) cmem = cl.getContacts(chiya) zx = "" zxc = "" zx2 = [] xpesan = '[ 已讀的人 ]:\n' for x in range(len(cmem)): xname = str(cmem[x].displayName) pesan = '' pesan2 = pesan+"@c\n" xlen = str(len(zxc)+len(xpesan)) xlen2 = str(len(zxc)+len(pesan2)+len(xpesan)-1) zx = {'S':xlen, 'E':xlen2, 'M':cmem[x].mid} zx2.append(zx) zxc += pesan2 text = xpesan+ zxc + "\n[ 已讀時間 ]: \n" + readTime try: cl.sendMessage(receiver, text, contentMetadata={'MENTION':str('{"MENTIONEES":'+json.dumps(zx2).replace(' ','')+'}')}, contentType=0) except Exception as error: print (error) pass else: cl.sendMessage(receiver,"尚未設置偵測點") #==============================================================================# elif msg.text.lower().startswith("ban "): targets = [] key = eval(msg.contentMetadata["MENTION"]) key["MENTIONEES"][0]["M"] for x in key["MENTIONEES"]: targets.append(x["M"]) for target in targets: try: settings["blacklist"][target] = True cl.sendMessage(msg.to,"已加入黑單!") break except: cl.sendMessage(msg.to,"添加失敗 !") break elif msg.text.lower().startswith("unban "): targets = [] key = eval(msg.contentMetadata["MENTION"]) key["MENTIONEES"][0]["M"] for x in key["MENTIONEES"]: targets.append(x["M"]) for target in targets: try: del settings["blacklist"][target] cl.sendMessage(msg.to,"刪除成功 !") break except: cl.sendMessage(msg.to,"刪除失敗 !") break elif text.lower() == 'banlist': if settings["blacklist"] == {}: cl.sendMessage(msg.to,"無黑單成員!") else: mc = "╔══[ Black List ]" for mi_d in settings["blacklist"]: mc += "\n╠ "+cl.getContact(mi_d).displayName cl.sendMessage(msg.to,mc + "\n╚══[ Finish ]") elif text.lower() == 'nkban': if msg.toType == 2: group = cl.getGroup(to) gMembMids = [contact.mid for contact in group.members] matched_list = [] for tag in settings["blacklist"]: matched_list+=filter(lambda str: str == tag, gMembMids) if matched_list == []: cl.sendMessage(msg.to,"There was no blacklist user") return for jj in matched_list: cl.kickoutFromGroup(msg.to,[jj]) cl.sendMessage(msg.to,"Blacklist kicked out") elif text.lower() == 'cleanban': settings["blacklist"] == {ok} for mi_d in settings["blacklist"]: try: del settings["blacklist"][mi_d] cl.sendMessage(msg.to,"已清空黑單!") break except: cl.sendMessage(msg.to,"刪除失敗 !") break elif text.lower() == 'banmidlist': if settings["blacklist"] == {}: cl.sendMessage(msg.to,"無黑單成員!") else: mc = "╔══[ Black List ]" for mi_d in settings["blacklist"]: mc += "\n╠ "+mi_d cl.sendMessage(to,mc + "\n╚══[ Finish ]") #==============================================================================# elif "Copy " in msg.text: targets = [] key = eval(msg.contentMetadata["MENTION"]) key["MENTIONEES"][0]["M"] for x in key["MENTIONEES"]: targets.append(x["M"]) for target in targets: try: contact = cl.getContact(target) X = contact.displayName profile = cl.getProfile() profile.displayName = X cl.updateProfile(profile) cl.sendMessage(to, "Success...") Y = contact.statusMessage lol = cl.getProfile() lol.statusMessage = Y cl.updateProfile(lol) P = contact.pictureStatus pic = cl.getProfile() pic.pictureStatus = P cl.updateProfilePicture(P) cl.cloneContactProfile(target) except Exception as e: cl.sendMessage(to, "Failed!") elif text.lower() == 'cc9487': if sender in ['ua10c2ad470b4b6e972954e1140ad1891']: python = sys.executable os.execl(python, python, *sys.argv) else: pass #==============================================================================# elif text.lower() == 'calender': tz = pytz.timezone("Asia/Makassar") timeNow = datetime.now(tz=tz) day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"] hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"] bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"] hr = timeNow.strftime("%A") bln = timeNow.strftime("%m") for i in range(len(day)): if hr == day[i]: hasil = hari[i] for k in range(0, len(bulan)): if bln == str(k): bln = bulan[k-1] readTime = hasil + ", " + timeNow.strftime('%d') + " - " + bln + " - " + timeNow.strftime('%Y') + "\nJam : [ " + timeNow.strftime('%H:%M:%S') + " ]" cl.sendMessage(msg.to, readTime) elif "screenshotwebsite" in msg.text.lower(): sep = text.split(" ") query = text.replace(sep[0] + " ","") with requests.session() as web: r = web.get("http://rahandiapi.herokuapp.com/sswebAPI?key=betakey&link={}".format(urllib.parse.quote(query))) data = r.text data = json.loads(data) cl.sendImageWithURL(to, data["result"]) elif "checkdate" in msg.text.lower(): sep = msg.text.split(" ") tanggal = msg.text.replace(sep[0] + " ","") r=requests.get('https://script.google.com/macros/exec?service=AKfycbw7gKzP-WYV2F5mc9RaR7yE3Ve1yN91Tjs91hp_jHSE02dSv9w&nama=ervan&tanggal='+tanggal) data=r.text data=json.loads(data) ret_ = "╔══[ D A T E ]" ret_ += "\n╠ Date Of Birth : {}".format(str(data["data"]["lahir"])) ret_ += "\n╠ Age : {}".format(str(data["data"]["usia"])) ret_ += "\n╠ Birthday : {}".format(str(data["data"]["ultah"])) ret_ += "\n╠ Zodiak : {}".format(str(data["data"]["zodiak"])) ret_ += "\n╚══[ Success ]" cl.sendMessage(to, str(ret_)) elif msg.contentType == 7: if settings["checkSticker"] == True: stk_id = msg.contentMetadata['STKID'] stk_ver = msg.contentMetadata['STKVER'] pkg_id = msg.contentMetadata['STKPKGID'] ret_ = "╔══[ Sticker Info ]" ret_ += "\n╠ STICKER ID : {}".format(stk_id) ret_ += "\n╠ STICKER PACKAGES ID : {}".format(pkg_id) ret_ += "\n╠ STICKER VERSION : {}".format(stk_ver) ret_ += "\n╠ STICKER URL : line://shop/detail/{}".format(pkg_id) ret_ += "\n╚══[ Finish ]" cl.sendMessage(to, str(ret_)) elif msg.contentType == 13: if settings["copy"] == True: _name = msg.contentMetadata["displayName"] copy = msg.contentMetadata["mid"] groups = cl.getGroup(msg.to) targets = [] for s in groups.members: if _name in s.displayName: print ("[Target] Copy") break else: targets.append(copy) if targets == []: cl.sendMessage(msg.to, "Not Found...") pass else: for target in targets: try: cl.cloneContactProfile(target) cl.sendMessage(msg.to, "Berhasil clone member tunggu beberapa saat sampai profile berubah") settings['copy'] = False break except: msg.contentMetadata = {'mid': target} settings["copy"] = False break #==============================================================================# if op.type == 26: print ("[ 26 ] RECEIVE MESSAGE") msg = op.message text = msg.text msg_id = msg.id receiver = msg.to sender = msg._from if msg.toType == 0: if sender != cl.profile.mid: to = sender else: to = receiver else: to = receiver if settings["autoRead"] == True: cl.sendChatChecked(to, msg_id) if to in read["readPoint"]: if sender not in read["ROM"][to]: read["ROM"][to][sender] = True if sender in settings["mimic"]["target"] and settings["mimic"]["status"] == True and settings["mimic"]["target"][sender] == True: text = msg.text if text is not None: cl.sendMessage(msg.to,text) if msg.contentType == 0 and sender not in clMID and msg.toType == 2: if 'MENTION' in msg.contentMetadata.keys()!= None: names = re.findall(r'@(\w+)', text) mention = ast.literal_eval(msg.contentMetadata['MENTION']) mentionees = mention['MENTIONEES'] lists = [] for mention in mentionees: if clMID in mention["M"]: if settings["detectMention"] == True: contact = cl.getContact(sender) cl.sendMessage(to, "sundala nu") sendMessageWithMention(to, contact.mid) break #==============================================================================# if op.type == 65: print ("[ 65 ] REREAD") try: at = op.param1 msg_id = op.param2 if setting["reread"] == True: if msg_id in msg_dict: if msg_dict[msg_id]["from"] not in bl: cl.sendMessage(at,"[收回訊息者]\n%s\n[訊息內容]\n%s"%(cl.getContact(msg_dict[msg_id]["from"]).displayName,msg_dict[msg_id]["text"])) del msg_dict[msg_id] else: pass except Exception as e: print (e) #==============================================================================# if op.type == 55: print ("[ 55 ] NOTIFIED READ MESSAGE") try: if op.param1 in read['readPoint']: if op.param2 in read['readMember'][op.param1]: pass else: read['readMember'][op.param1] += op.param2 read['ROM'][op.param1][op.param2] = op.param2 backupData() else: pass except: pass except Exception as error: logError(error) #==============================================================================# while True: try: ops = oepoll.singleTrace(count=50) if ops is not None: for op in ops: lineBot(op) oepoll.setRevision(op.revision) except Exception as e: logError(e)
51.742829
168
0.404267
0
0
0
0
0
0
0
0
11,161
0.209557
b9cea3f3b51bf703897e952ed45d88260e3502a1
1,190
py
Python
dd_app/messaging/backend.py
datadealer/dd_app
3806b9b9df165a49f0fca8a249170b4ccd4d0177
[ "Artistic-2.0" ]
2
2018-12-17T10:10:49.000Z
2018-12-17T11:18:32.000Z
dd_app/messaging/backend.py
datadealer/dd_app
3806b9b9df165a49f0fca8a249170b4ccd4d0177
[ "Artistic-2.0" ]
null
null
null
dd_app/messaging/backend.py
datadealer/dd_app
3806b9b9df165a49f0fca8a249170b4ccd4d0177
[ "Artistic-2.0" ]
1
2021-06-06T22:28:12.000Z
2021-06-06T22:28:12.000Z
class RedisBackend(object): def __init__(self, settings={}, *args, **kwargs): self.settings = settings @property def connection(self): # cached redis connection if not hasattr(self, '_connection'): self._connection = self.settings.get('redis.connector').get() return self._connection @property def channel(self): # Fanout channel if not hasattr(self, '_channel'): self._channel = self.connection.pubsub() return self._channel def subscribe(self, channels=[]): # Fanout subscriber for chan_id in channels: self.channel.subscribe(chan_id) def listen(self): # Fanout generator for m in self.channel.listen(): if m['type'] == 'message': yield m def send(self, channel_id, payload): # Fanout emitter return self.connection.publish(channel_id, payload) def listen_queue(self, queue_keys): # Message queue generator while 1: yield self.connection.blpop(queue_keys) def send_queue(self, queue_key, payload): return self.connection.rpush(payload)
28.333333
73
0.608403
1,189
0.99916
285
0.239496
401
0.336975
0
0
174
0.146218
b9cf5fa54caecef97e6454178f438ce16bc99d7b
241
py
Python
fetch_data.py
bitfag/bt-macd-binance
eeffe52f8e561ff521629839078ff886e7bf700e
[ "MIT" ]
null
null
null
fetch_data.py
bitfag/bt-macd-binance
eeffe52f8e561ff521629839078ff886e7bf700e
[ "MIT" ]
null
null
null
fetch_data.py
bitfag/bt-macd-binance
eeffe52f8e561ff521629839078ff886e7bf700e
[ "MIT" ]
null
null
null
#!/usr/bin/env python from btmacd.binance_fetcher import BinanceFetcher def main(): fetcher = BinanceFetcher("BTCUSDT", filename="binance_ohlc.csv", start_date="01.01.2018") fetcher.fetch() if __name__ == "__main__": main()
18.538462
93
0.705394
0
0
0
0
0
0
0
0
70
0.290456
b9d0d7e9fc82e29bf1385d169d21f03d43d467e2
25,508
py
Python
tensorflow_probability/python/mcmc/diagnostic.py
Frightera/probability
deac4562cbc1056e6abebc7450218d38444fe65d
[ "Apache-2.0" ]
1
2022-03-06T15:37:18.000Z
2022-03-06T15:37:18.000Z
tensorflow_probability/python/mcmc/diagnostic.py
Frightera/probability
deac4562cbc1056e6abebc7450218d38444fe65d
[ "Apache-2.0" ]
null
null
null
tensorflow_probability/python/mcmc/diagnostic.py
Frightera/probability
deac4562cbc1056e6abebc7450218d38444fe65d
[ "Apache-2.0" ]
null
null
null
# Copyright 2018 The TensorFlow Probability Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Utilities for Markov Chain Monte Carlo (MCMC) sampling. @@effective_sample_size @@potential_scale_reduction """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow.compat.v2 as tf from tensorflow_probability.python import stats from tensorflow_probability.python.internal import assert_util from tensorflow_probability.python.internal import dtype_util from tensorflow_probability.python.internal import nest_util from tensorflow_probability.python.internal import prefer_static as ps from tensorflow_probability.python.internal import tensorshape_util from tensorflow.python.util import nest # pylint: disable=g-direct-tensorflow-import __all__ = [ 'effective_sample_size', 'potential_scale_reduction', ] def effective_sample_size(states, filter_threshold=0., filter_beyond_lag=None, filter_beyond_positive_pairs=False, cross_chain_dims=None, validate_args=False, name=None): """Estimate a lower bound on effective sample size for each independent chain. Roughly speaking, "effective sample size" (ESS) is the size of an iid sample with the same variance as `state`. More precisely, given a stationary sequence of possibly correlated random variables `X_1, X_2, ..., X_N`, identically distributed, ESS is the number such that ``` Variance{ N**-1 * Sum{X_i} } = ESS**-1 * Variance{ X_1 }. ``` If the sequence is uncorrelated, `ESS = N`. If the sequence is positively auto-correlated, `ESS` will be less than `N`. If there are negative correlations, then `ESS` can exceed `N`. Some math shows that, with `R_k` the auto-correlation sequence, `R_k := Covariance{X_1, X_{1+k}} / Variance{X_1}`, we have ``` ESS(N) = N / [ 1 + 2 * ( (N - 1) / N * R_1 + ... + 1 / N * R_{N-1} ) ] ``` This function estimates the above by first estimating the auto-correlation. Since `R_k` must be estimated using only `N - k` samples, it becomes progressively noisier for larger `k`. For this reason, the summation over `R_k` should be truncated at some number `filter_beyond_lag < N`. This function provides two methods to perform this truncation. * `filter_threshold` -- since many MCMC methods generate chains where `R_k > 0`, a reasonable criterion is to truncate at the first index where the estimated auto-correlation becomes negative. This method does not estimate the `ESS` of super-efficient chains (where `ESS > N`) correctly. * `filter_beyond_positive_pairs` -- reversible MCMC chains produce an auto-correlation sequence with the property that pairwise sums of the elements of that sequence are positive [Geyer][1], i.e. `R_{2k} + R_{2k + 1} > 0` for `k in {0, ..., N/2}`. Deviations are only possible due to noise. This method truncates the auto-correlation sequence where the pairwise sums become non-positive. The arguments `filter_beyond_lag`, `filter_threshold` and `filter_beyond_positive_pairs` are filters intended to remove noisy tail terms from `R_k`. You can combine `filter_beyond_lag` with `filter_threshold` or `filter_beyond_positive_pairs. E.g., combining `filter_beyond_lag` and `filter_beyond_positive_pairs` means that terms are removed if they were to be filtered under the `filter_beyond_lag` OR `filter_beyond_positive_pairs` criteria. This function can also compute cross-chain ESS following [Vehtari et al. (2019)][2] by specifying the `cross_chain_dims` argument. Cross-chain ESS takes into account the cross-chain variance to reduce the ESS in cases where the chains are not mixing well. In general, this will be a smaller number than computing the ESS for individual chains and then summing them. In an extreme case where the chains have fallen into K non-mixing modes, this function will return ESS ~ K. Even when chains are mixing well it is still preferrable to compute cross-chain ESS via this method because it will reduce the noise in the estimate of `R_k`, reducing the need for truncation. Args: states: `Tensor` or Python structure of `Tensor` objects. Dimension zero should index identically distributed states. filter_threshold: `Tensor` or Python structure of `Tensor` objects. Must broadcast with `state`. The sequence of auto-correlations is truncated after the first appearance of a term less than `filter_threshold`. Setting to `None` means we use no threshold filter. Since `|R_k| <= 1`, setting to any number less than `-1` has the same effect. Ignored if `filter_beyond_positive_pairs` is `True`. filter_beyond_lag: `Tensor` or Python structure of `Tensor` objects. Must be `int`-like and scalar valued. The sequence of auto-correlations is truncated to this length. Setting to `None` means we do not filter based on the size of lags. filter_beyond_positive_pairs: Python boolean. If `True`, only consider the initial auto-correlation sequence where the pairwise sums are positive. cross_chain_dims: An integer `Tensor` or a structure of integer `Tensors` corresponding to each state component. If a list of `states` is provided, then this argument should also be a list of the same length. Which dimensions of `states` to treat as independent chains that ESS will be summed over. If `None`, no summation is performed. Note this requires at least 2 chains. validate_args: Whether to add runtime checks of argument validity. If False, and arguments are incorrect, correct behavior is not guaranteed. name: `String` name to prepend to created ops. Returns: ess: `Tensor` structure parallel to `states`. The effective sample size of each component of `states`. If `cross_chain_dims` is None, the shape will be `states.shape[1:]`. Otherwise, the shape is `tf.reduce_mean(states, cross_chain_dims).shape[1:]`. Raises: ValueError: If `states` and `filter_threshold` or `states` and `filter_beyond_lag` are both structures of different shapes. ValueError: If `cross_chain_dims` is not `None` and there are less than 2 chains. #### Examples We use ESS to estimate standard error. ``` import tensorflow as tf import tensorflow_probability as tfp tfd = tfp.distributions target = tfd.MultivariateNormalDiag(scale_diag=[1., 2.]) # Get 1000 states from one chain. states = tfp.mcmc.sample_chain( num_burnin_steps=200, num_results=1000, current_state=tf.constant([0., 0.]), trace_fn=None, kernel=tfp.mcmc.HamiltonianMonteCarlo( target_log_prob_fn=target.log_prob, step_size=0.05, num_leapfrog_steps=20)) states.shape ==> (1000, 2) ess = effective_sample_size(states, filter_beyond_positive_pairs=True) ==> Shape (2,) Tensor mean, variance = tf.nn.moments(states, axis=0) standard_error = tf.sqrt(variance / ess) ``` #### References [1]: Charles J. Geyer, Practical Markov chain Monte Carlo (with discussion). Statistical Science, 7:473-511, 1992. [2]: Aki Vehtari, Andrew Gelman, Daniel Simpson, Bob Carpenter, Paul-Christian Burkner. Rank-normalization, folding, and localization: An improved R-hat for assessing convergence of MCMC, 2019. Retrieved from http://arxiv.org/abs/1903.08008 """ if cross_chain_dims is None: cross_chain_dims = nest_util.broadcast_structure(states, None) filter_beyond_lag = nest_util.broadcast_structure(states, filter_beyond_lag) filter_threshold = nest_util.broadcast_structure(states, filter_threshold) filter_beyond_positive_pairs = nest_util.broadcast_structure( states, filter_beyond_positive_pairs) # Process items, one at a time. def single_state(*args): return _effective_sample_size_single_state( *args, validate_args=validate_args) with tf.name_scope('effective_sample_size' if name is None else name): return nest.map_structure_up_to( states, single_state, states, filter_beyond_lag, filter_threshold, filter_beyond_positive_pairs, cross_chain_dims) def _effective_sample_size_single_state(states, filter_beyond_lag, filter_threshold, filter_beyond_positive_pairs, cross_chain_dims, validate_args): """ESS computation for one single Tensor argument.""" with tf.name_scope('effective_sample_size_single_state'): states = tf.convert_to_tensor(states, name='states') dt = states.dtype # filter_beyond_lag == None ==> auto_corr is the full sequence. auto_cov = stats.auto_correlation( states, axis=0, max_lags=filter_beyond_lag, normalize=False) n = _axis_size(states, axis=0) if cross_chain_dims is not None: num_chains = _axis_size(states, cross_chain_dims) num_chains_ = tf.get_static_value(num_chains) assertions = [] msg = ('When `cross_chain_dims` is not `None`, there must be > 1 chain ' 'in `states`.') if num_chains_ is not None: if num_chains_ < 2: raise ValueError(msg) elif validate_args: assertions.append( assert_util.assert_greater(num_chains, 1., message=msg)) with tf.control_dependencies(assertions): # We're computing the R[k] from equation 10 of Vehtari et al. # (2019): # # R[k] := 1 - (W - 1/C * Sum_{c=1}^C s_c**2 R[k, c]) / (var^+), # # where: # C := number of chains # N := length of chains # x_hat[c] := 1 / N Sum_{n=1}^N x[n, c], chain mean. # x_hat := 1 / C Sum_{c=1}^C x_hat[c], overall mean. # W := 1/C Sum_{c=1}^C s_c**2, within-chain variance. # B := N / (C - 1) Sum_{c=1}^C (x_hat[c] - x_hat)**2, between chain # variance. # s_c**2 := 1 / (N - 1) Sum_{n=1}^N (x[n, c] - x_hat[c])**2, chain # variance # R[k, m] := auto_corr[k, m, ...], auto-correlation indexed by chain. # var^+ := (N - 1) / N * W + B / N cross_chain_dims = ps.non_negative_axis( cross_chain_dims, ps.rank(states)) # B / N between_chain_variance_div_n = _reduce_variance( tf.reduce_mean(states, axis=0), biased=False, # This makes the denominator be C - 1. axis=cross_chain_dims - 1) # W * (N - 1) / N biased_within_chain_variance = tf.reduce_mean(auto_cov[0], cross_chain_dims - 1) # var^+ approx_variance = ( biased_within_chain_variance + between_chain_variance_div_n) # 1/C * Sum_{c=1}^C s_c**2 R[k, c] mean_auto_cov = tf.reduce_mean(auto_cov, cross_chain_dims) auto_corr = 1. - (biased_within_chain_variance - mean_auto_cov) / approx_variance else: auto_corr = auto_cov / auto_cov[:1] num_chains = 1 # With R[k] := auto_corr[k, ...], # ESS = N / {1 + 2 * Sum_{k=1}^N R[k] * (N - k) / N} # = N / {-1 + 2 * Sum_{k=0}^N R[k] * (N - k) / N} (since R[0] = 1) # approx N / {-1 + 2 * Sum_{k=0}^M R[k] * (N - k) / N} # where M is the filter_beyond_lag truncation point chosen above. # Get the factor (N - k) / N, and give it shape [M, 1,...,1], having total # ndims the same as auto_corr k = tf.range(0., _axis_size(auto_corr, axis=0)) nk_factor = (n - k) / n if tensorshape_util.rank(auto_corr.shape) is not None: new_shape = [-1] + [1] * (tensorshape_util.rank(auto_corr.shape) - 1) else: new_shape = tf.concat( ([-1], tf.ones([tf.rank(auto_corr) - 1], dtype=tf.int32)), axis=0) nk_factor = tf.reshape(nk_factor, new_shape) weighted_auto_corr = nk_factor * auto_corr if filter_beyond_positive_pairs: def _sum_pairs(x): x_len = ps.shape(x)[0] # For odd sequences, we drop the final value. x = x[:x_len - x_len % 2] new_shape = ps.concat([[x_len // 2, 2], ps.shape(x)[1:]], axis=0) return tf.reduce_sum(tf.reshape(x, new_shape), 1) # Pairwise sums are all positive for auto-correlation spectra derived from # reversible MCMC chains. # E.g. imagine the pairwise sums are [0.2, 0.1, -0.1, -0.2] # Step 1: mask = [False, False, True, True] mask = _sum_pairs(auto_corr) < 0. # Step 2: mask = [0, 0, 1, 1] mask = tf.cast(mask, dt) # Step 3: mask = [0, 0, 1, 2] mask = tf.cumsum(mask, axis=0) # Step 4: mask = [1, 1, 0, 0] mask = tf.maximum(1. - mask, 0.) # N.B. this reduces the length of weighted_auto_corr by a factor of 2. # It still works fine in the formula below. weighted_auto_corr = _sum_pairs(weighted_auto_corr) * mask elif filter_threshold is not None: filter_threshold = tf.convert_to_tensor( filter_threshold, dtype=dt, name='filter_threshold') # Get a binary mask to zero out values of auto_corr below the threshold. # mask[i, ...] = 1 if auto_corr[j, ...] > threshold for all j <= i, # mask[i, ...] = 0, otherwise. # So, along dimension zero, the mask will look like [1, 1, ..., 0, 0,...] # Building step by step, # Assume auto_corr = [1, 0.5, 0.0, 0.3], and filter_threshold = 0.2. # Step 1: mask = [False, False, True, False] mask = auto_corr < filter_threshold # Step 2: mask = [0, 0, 1, 0] mask = tf.cast(mask, dtype=dt) # Step 3: mask = [0, 0, 1, 1] mask = tf.cumsum(mask, axis=0) # Step 4: mask = [1, 1, 0, 0] mask = tf.maximum(1. - mask, 0.) weighted_auto_corr *= mask return num_chains * n / (-1 + 2 * tf.reduce_sum(weighted_auto_corr, axis=0)) def potential_scale_reduction(chains_states, independent_chain_ndims=1, split_chains=False, validate_args=False, name=None): """Gelman and Rubin (1992)'s potential scale reduction for chain convergence. Given `N > 1` states from each of `C > 1` independent chains, the potential scale reduction factor, commonly referred to as R-hat, measures convergence of the chains (to the same target) by testing for equality of means. Specifically, R-hat measures the degree to which variance (of the means) between chains exceeds what one would expect if the chains were identically distributed. See [Gelman and Rubin (1992)][1]; [Brooks and Gelman (1998)][2]. Some guidelines: * The initial state of the chains should be drawn from a distribution overdispersed with respect to the target. * If all chains converge to the target, then as `N --> infinity`, R-hat --> 1. Before that, R-hat > 1 (except in pathological cases, e.g. if the chain paths were identical). * The above holds for any number of chains `C > 1`. Increasing `C` does improve effectiveness of the diagnostic. * Sometimes, R-hat < 1.2 is used to indicate approximate convergence, but of course this is problem-dependent. See [Brooks and Gelman (1998)][2]. * R-hat only measures non-convergence of the mean. If higher moments, or other statistics are desired, a different diagnostic should be used. See [Brooks and Gelman (1998)][2]. Args: chains_states: `Tensor` or Python structure of `Tensor`s representing the states of a Markov Chain at each result step. The `ith` state is assumed to have shape `[Ni, Ci1, Ci2,...,CiD] + A`. Dimension `0` indexes the `Ni > 1` result steps of the Markov Chain. Dimensions `1` through `D` index the `Ci1 x ... x CiD` independent chains to be tested for convergence to the same target. The remaining dimensions, `A`, can have any shape (even empty). independent_chain_ndims: Integer type `Tensor` with value `>= 1` giving the number of dimensions, from `dim = 1` to `dim = D`, holding independent chain results to be tested for convergence. split_chains: Python `bool`. If `True`, divide samples from each chain into first and second halves, treating these as separate chains. This makes R-hat more robust to non-stationary chains, and is recommended in [3]. validate_args: Whether to add runtime checks of argument validity. If False, and arguments are incorrect, correct behavior is not guaranteed. name: `String` name to prepend to created tf. Default: `potential_scale_reduction`. Returns: `Tensor` structure parallel to `chains_states` representing the R-hat statistic for the state(s). Same `dtype` as `state`, and shape equal to `state.shape[1 + independent_chain_ndims:]`. Raises: ValueError: If `independent_chain_ndims < 1`. #### Examples Diagnosing convergence by monitoring 10 chains that each attempt to sample from a 2-variate normal. ```python import tensorflow as tf import tensorflow_probability as tfp tfd = tfp.distributions target = tfd.MultivariateNormalDiag(scale_diag=[1., 2.]) # Get 10 (2x) overdispersed initial states. initial_state = target.sample(10) * 2. ==> (10, 2) # Get 1000 samples from the 10 independent chains. chains_states = tfp.mcmc.sample_chain( num_burnin_steps=200, num_results=1000, current_state=initial_state, trace_fn=None, kernel=tfp.mcmc.HamiltonianMonteCarlo( target_log_prob_fn=target.log_prob, step_size=0.05, num_leapfrog_steps=20)) chains_states.shape ==> (1000, 10, 2) rhat = tfp.mcmc.diagnostic.potential_scale_reduction( chains_states, independent_chain_ndims=1) # The second dimension needed a longer burn-in. rhat.eval() ==> [1.05, 1.3] ``` To see why R-hat is reasonable, let `X` be a random variable drawn uniformly from the combined states (combined over all chains). Then, in the limit `N, C --> infinity`, with `E`, `Var` denoting expectation and variance, ```R-hat = ( E[Var[X | chain]] + Var[E[X | chain]] ) / E[Var[X | chain]].``` Using the law of total variance, the numerator is the variance of the combined states, and the denominator is the total variance minus the variance of the the individual chain means. If the chains are all drawing from the same distribution, they will have the same mean, and thus the ratio should be one. #### References [1]: Stephen P. Brooks and Andrew Gelman. General Methods for Monitoring Convergence of Iterative Simulations. _Journal of Computational and Graphical Statistics_, 7(4), 1998. [2]: Andrew Gelman and Donald B. Rubin. Inference from Iterative Simulation Using Multiple Sequences. _Statistical Science_, 7(4):457-472, 1992. [3]: Aki Vehtari, Andrew Gelman, Daniel Simpson, Bob Carpenter, Paul-Christian Burkner. Rank-normalization, folding, and localization: An improved R-hat for assessing convergence of MCMC, 2019. Retrieved from http://arxiv.org/abs/1903.08008 """ # tf.get_static_value returns None iff a constant value (as a numpy # array) is not efficiently computable. Therefore, we try constant_value then # check for None. icn_const_ = tf.get_static_value( ps.convert_to_shape_tensor(independent_chain_ndims)) if icn_const_ is not None: independent_chain_ndims = icn_const_ if icn_const_ < 1: raise ValueError( 'Argument `independent_chain_ndims` must be `>= 1`, found: {}'.format( independent_chain_ndims)) def single_state(s): return _potential_scale_reduction_single_state( s, independent_chain_ndims, split_chains, validate_args) with tf.name_scope('potential_scale_reduction' if name is None else name): return tf.nest.map_structure(single_state, chains_states) def _potential_scale_reduction_single_state(state, independent_chain_ndims, split_chains, validate_args): """potential_scale_reduction for one single state `Tensor`.""" # casting integers to floats for floating-point division # check to see if the `state` is a numpy object for the numpy test suite if dtype_util.as_numpy_dtype(state.dtype) is np.int64: state = tf.cast(state, tf.float64) elif dtype_util.is_integer(state.dtype): state = tf.cast(state, tf.float32) with tf.name_scope('potential_scale_reduction_single_state'): # We assume exactly one leading dimension indexes e.g. correlated samples # from each Markov chain. state = tf.convert_to_tensor(state, name='state') n_samples_ = tf.compat.dimension_value(state.shape[0]) if n_samples_ is not None: # If available statically. if split_chains and n_samples_ < 4: raise ValueError( 'Must provide at least 4 samples when splitting chains. ' 'Found {}'.format(n_samples_)) if not split_chains and n_samples_ < 2: raise ValueError( 'Must provide at least 2 samples. Found {}'.format(n_samples_)) elif validate_args: if split_chains: assertions = [assert_util.assert_greater( ps.shape(state)[0], 4, message='Must provide at least 4 samples when splitting chains.')] with tf.control_dependencies(assertions): state = tf.identity(state) else: assertions = [assert_util.assert_greater( ps.shape(state)[0], 2, message='Must provide at least 2 samples.')] with tf.control_dependencies(assertions): state = tf.identity(state) # Define so it's not a magic number. # Warning! `if split_chains` logic assumes this is 1! sample_ndims = 1 if split_chains: # Split the sample dimension in half, doubling the number of # independent chains. # For odd number of samples, keep all but the last sample. state_shape = ps.shape(state) n_samples = state_shape[0] state = state[:n_samples - n_samples % 2] # Suppose state = [0, 1, 2, 3, 4, 5] # Step 1: reshape into [[0, 1, 2], [3, 4, 5]] # E.g. reshape states of shape [a, b] into [2, a//2, b]. state = tf.reshape( state, ps.concat([[2, n_samples // 2], state_shape[1:]], axis=0) ) # Step 2: Put the size `2` dimension in the right place to be treated as a # chain, changing [[0, 1, 2], [3, 4, 5]] into [[0, 3], [1, 4], [2, 5]], # reshaping [2, a//2, b] into [a//2, 2, b]. state = tf.transpose( a=state, perm=ps.concat( [[1, 0], ps.range(2, ps.rank(state))], axis=0)) # We're treating the new dim as indexing 2 chains, so increment. independent_chain_ndims += 1 sample_axis = ps.range(0, sample_ndims) chain_axis = ps.range(sample_ndims, sample_ndims + independent_chain_ndims) sample_and_chain_axis = ps.range( 0, sample_ndims + independent_chain_ndims) n = _axis_size(state, sample_axis) m = _axis_size(state, chain_axis) # In the language of Brooks and Gelman (1998), # B / n is the between chain variance, the variance of the chain means. # W is the within sequence variance, the mean of the chain variances. b_div_n = _reduce_variance( tf.reduce_mean(state, axis=sample_axis, keepdims=True), sample_and_chain_axis, biased=False) w = tf.reduce_mean( _reduce_variance(state, sample_axis, keepdims=True, biased=False), axis=sample_and_chain_axis) # sigma^2_+ is an estimate of the true variance, which would be unbiased if # each chain was drawn from the target. c.f. "law of total variance." sigma_2_plus = ((n - 1) / n) * w + b_div_n return ((m + 1.) / m) * sigma_2_plus / w - (n - 1.) / (m * n) # TODO(b/72873233) Move some variant of this to tfd.sample_stats. def _reduce_variance(x, axis=None, biased=True, keepdims=False): with tf.name_scope('reduce_variance'): x = tf.convert_to_tensor(x, name='x') mean = tf.reduce_mean(x, axis=axis, keepdims=True) biased_var = tf.reduce_mean( tf.math.squared_difference(x, mean), axis=axis, keepdims=keepdims) if biased: return biased_var n = _axis_size(x, axis) return (n / (n - 1.)) * biased_var def _axis_size(x, axis=None): """Get number of elements of `x` in `axis`, as type `x.dtype`.""" if axis is None: return ps.cast(ps.size(x), x.dtype) return ps.cast( ps.reduce_prod( ps.gather(ps.shape(x), axis)), x.dtype)
43.015177
85
0.663361
0
0
0
0
0
0
0
0
16,533
0.64815
b9d22fbf764d6a06a81fe68e7bedb0cb2069ff17
2,360
py
Python
mpl/models/leaf.py
jiangyuang/ModelPruningLibrary
9c8ba5a3c5d118f37768d5d42254711f48d88745
[ "MIT" ]
13
2020-02-24T16:57:37.000Z
2021-12-14T16:47:41.000Z
mpl/models/leaf.py
jiangyuang/ModelPruningLibrary
9c8ba5a3c5d118f37768d5d42254711f48d88745
[ "MIT" ]
3
2021-01-08T14:06:33.000Z
2021-09-07T13:39:46.000Z
mpl/models/leaf.py
jiangyuang/ModelPruningLibrary
9c8ba5a3c5d118f37768d5d42254711f48d88745
[ "MIT" ]
3
2020-05-30T17:59:43.000Z
2021-04-13T04:55:33.000Z
from torch import nn as nn from .base_model import BaseModel from ..nn.conv2d import DenseConv2d from ..nn.linear import DenseLinear __all__ = ["Conv2", "conv2", "Conv4", "conv4"] class Conv2(BaseModel): def __init__(self): super(Conv2, self).__init__() self.features = nn.Sequential(DenseConv2d(1, 32, kernel_size=5, padding=2), # 32x28x28 nn.ReLU(inplace=True), nn.MaxPool2d(2, stride=2), # 32x14x14 DenseConv2d(32, 64, kernel_size=5, padding=2), # 64x14x14 nn.ReLU(inplace=True), nn.MaxPool2d(2, stride=2)) # 64x7x7 self.classifier = nn.Sequential(DenseLinear(64 * 7 * 7, 2048), nn.ReLU(inplace=True), DenseLinear(2048, 62)) self.collect_prunable_layers() def forward(self, inp): out = self.features(inp) out = out.view(out.size(0), -1) out = self.classifier(out) return out class Conv4(BaseModel): def __init__(self): super(Conv4, self).__init__() self.features = nn.Sequential(DenseConv2d(3, 32, kernel_size=3, padding=1), nn.BatchNorm2d(32), nn.MaxPool2d(2), DenseConv2d(32, 32, kernel_size=3, padding=1), nn.BatchNorm2d(32), nn.MaxPool2d(2), DenseConv2d(32, 32, kernel_size=3, padding=2), nn.BatchNorm2d(32), nn.MaxPool2d(2), DenseConv2d(32, 32, kernel_size=3, padding=2), nn.BatchNorm2d(32), nn.MaxPool2d(2)) self.classifier = DenseLinear(in_features=32 * 6 * 6, out_features=2) def forward(self, inp): out = self.features(inp) out = out.view(out.size(0), -1) out = self.classifier(out) return out def conv2() -> Conv2: return Conv2() def conv4() -> Conv4: return Conv4() # TODO: define pretrain etc.
36.307692
96
0.469068
2,056
0.871186
0
0
0
0
0
0
94
0.039831
b9d2bd5114a0540a0095f6c31a8ad07b71899f53
29,424
py
Python
scripts/generate_network_interactomix.py
quimaguirre/NetworkAnalysis
c7a4da3ba5696800738b4767065ce29fa0020d79
[ "MIT" ]
1
2017-07-10T17:33:31.000Z
2017-07-10T17:33:31.000Z
scripts/generate_network_interactomix.py
quimaguirre/NetworkAnalysis
c7a4da3ba5696800738b4767065ce29fa0020d79
[ "MIT" ]
null
null
null
scripts/generate_network_interactomix.py
quimaguirre/NetworkAnalysis
c7a4da3ba5696800738b4767065ce29fa0020d79
[ "MIT" ]
null
null
null
import argparse import ConfigParser import sys, os, re import biana try: from biana import * except: sys.exit(10) import methods_dictionaries as methods_dicts def main(): options = parse_user_arguments() generate_network(options) def parse_user_arguments(*args, **kwds): parser = argparse.ArgumentParser( description = "Generate a protein-protein interaction network (implemented for Interactomix platform)", epilog = "@oliva's lab 2019") parser.add_argument('-iseed','--seeds_input_file',dest='seed',action = 'store', help = 'Seeds Input file (default is input_seed)') parser.add_argument('-radius','--radius_of_subnetwork_around_seeds',dest='radius',default=0,action = 'store',type=int, help = '''Network is built in a radius of connections around the seed proteins. If 0, it creates the complete interactome''') parser.add_argument('-taxid','--TaxID',dest='taxid',action = 'store',default='9606', help = 'Tax ID (i.e. human=9606 is default if TaxID=0 there is no restriction)') parser.add_argument('-stype','--seed_type',dest='stype',action = 'store',default='geneid', help = 'Type of identifier for seeds (default is geneid)') parser.add_argument('-ttype','--translation_type',dest='ttype',action = 'store',default='accessionnumber', help = '''Type of identifier for the output translation of codes (default is accessionnumber) Using "proteinsequence" provides with the longest sequence of all codes''') parser.add_argument('-trans','--translation_of_nodes_file',dest='translation_file',action = 'store',default='translation_nodes.txt', help = 'File with the translation of codes from BIANA to the selected type for all nodes') parser.add_argument('-strans','--translation_of_seeds_file',dest='translation_seeds_file',action = 'store',default='translation_seeds_to_BIANA_codes.txt', help = 'File with the translation of codes from the introduced type of code to BIANA codes') parser.add_argument('-edge','--edge_file',dest='edge',action = 'store', default='biana_edges', help = 'Output file with edges(default is biana_edges)') parser.add_argument('-node','--node_file',dest='node',action = 'store', default='biana_nodes', help = 'Output file with nodes(default is biana_nodes)') parser.add_argument('-format','--output_format',dest='format',action = 'store',default='sif', help = '''Format file of the edge file:\tsif (default), netscore, raw, multi-fields:\n 'sif': <node1>\tscore\t<node2>\n 'netscore': <node1>\t<node2>\t<score>\n 'raw': <node1>\t<node2>\n 'multi-fields' : <node1>\t<node2>\t<sources>\t<method_ids>\t<method_names>\t<pmids>\n''') parser.add_argument('-rAFF','--restricted_to_TAP',dest='restricted_to_TAP',action = 'store_true', help = 'Flag to use interactions at least described by affinity methods (i.e. Tandem Affinity Purification)') parser.add_argument('-rY2H','--restricted_to_Y2H',dest='restricted_to_Y2H',action = 'store_true', help = 'Flag to use interactions at least described by yeast two hybrid methods (Y2H)') parser.add_argument('-rUSER','--restricted_to_user',dest='restricted_to_user',action = 'store',default='restricted_methods', help = 'File to use interactions described by the user selected methods') parser.add_argument('-eAFF','--except_TAP',dest='except_TAP',action = 'store_true', help = 'Flag to use all interactions except those described by affinity methods (i.e. Tandem Affinity Purification)') parser.add_argument('-eY2H','--except_Y2H',dest='except_Y2H',action = 'store_true', help = 'Flag to use all interactions except those described by yeast two hybrid methods (Y2H)') parser.add_argument('-eUSER','--except_user',dest='except_user',action = 'store',default='restricted_methods', help = 'File to reject interactions described by the user selected methods') parser.add_argument('-v','--verbose',dest='verbose',action = 'store_true', help = 'Flag to use verbose mode') options=parser.parse_args() """ Example: python generate_network_interactomix.py -iseed example/sample1.txt -radius 1 -taxid 9606 -stype uniprotentry -ttype proteinsequence -trans example/output/example.proteinsequence.trans -strans example/output/example.seeds.trans -edge example/output/example.edges -node example/output/example.nodes -format raw -rY2H python /home/quim/PHD/Projects/BIANA/scripts/generate_network_interactomix.py -radius 0 -taxid 9606 -edge /home/quim/PHD/Projects/BIANA/outputs/BIANA_2020_geneID_seqtax_drugtarget/human_network_biana_2020.txt -node /home/quim/PHD/Projects/BIANA/outputs/BIANA_2020_geneID_seqtax_drugtarget/human_network_biana_2020_nodes.txt -trans /home/quim/PHD/Projects/BIANA/outputs/BIANA_2020_geneID_seqtax_drugtarget/human_network_biana_2020_translation.txt -ttype geneid -format multi-fields &> /home/quim/PHD/Projects/BIANA/outputs/BIANA_2020_geneID_seqtax_drugtarget/human_network_biana_2020.log """ return options def generate_network(options): """ Generates a protein-protein interaction network extracting information from BIANA. """ #----------------------# # FIXED PARAMETERS # #----------------------# # Parameters that I have decided to fix restricted_to_seeds = False minimum_number_of_methods = 1 minimum_number_of_db = 1 seed_score = 0.1 #--------------------------------------# # GET INFORMATION FROM CONFIG FILE # #--------------------------------------# # Get the program path main_path = os.path.abspath(os.path.dirname(__file__)) # Read the config file config_file = os.path.join(main_path, 'config.ini') config = ConfigParser.ConfigParser() config.read(config_file) #--------------------------------------# # LOAD THE DICTIONARIES OF METHODS # #--------------------------------------# # Get the affinity dictionary affinity_dict = methods_dicts.affinity_dict affinity=set(affinity_dict.keys()) # Get the complementation dictionary complementation_dict = methods_dicts.complementation_dict complementation=set(complementation_dict.keys()) #---------------------------------------# # GET METHODS THAT WILL BE FILTERED # #---------------------------------------# # Check if the user has introduced a file with methods that must be included if not fileExist(options.restricted_to_user): print "No restriction on methods selected by the user" user_selection=False else: use_methods=[] with open(options.restricted_to_user) as input_method_fd: for line in input_method_fd: fields = line.strip().split("\t") use_methods.append(fields[0]) user_selection=True print "Input to use only Methods:",repr(use_methods) # Check if the user has introduced a file with methods that have to be excluded if not fileExist(options.except_user): print "No rejection of methods selected by the user" user_rejection=False else: no_methods=[] with open(options.except_user) as input_method_fd: for line in input_method_fd: fields = line.strip().split("\t") no_methods.append(fields[0]) user_rejection=True print "Input of rejected Methods:",repr(no_methods) #---------------------------# # START A BIANA SESSION # #---------------------------# print "Open session" session = create_new_session( sessionID="biana_session", dbname=config.get('BIANA', 'database'), dbhost=config.get('BIANA', 'host'), dbuser=config.get('BIANA', 'user'), dbpassword=config.get('BIANA', 'password'), unification_protocol=config.get('BIANA', 'unification_protocol') ) print "Continue" #------------------------------# # DEFINE A USER ENTITY SET # #------------------------------# # Create network network of expansion if the radius is larger than 0 if restricted_to_seeds or options.radius>0: # Check if the seeds file exists if not fileExist(options.seed): print "File with seeds is missing or not found" sys.exit(10) else: level=options.radius seed_list = get_seeds_from_file(options.seed) # If we have Taxonomy restriction, we add it if options.taxid != "0": print("Check Proteome %s"%(repr(options.taxid))) proteome = session.create_new_user_entity_set( identifier_description_list =seed_list, attribute_restriction_list=[("taxid",options.taxid)], id_type=options.stype,new_user_entity_set_id="proteome", negative_attribute_restriction_list=[] ) else: print('Proteome without Taxonomy restriction') proteome = session.create_new_user_entity_set( identifier_description_list =seed_list, id_type=options.stype,new_user_entity_set_id="proteome", negative_attribute_restriction_list=[] ) else: level=0 proteome = session.create_new_user_entity_set( identifier_description_list = [("taxid",options.taxid)], attribute_restriction_list=[], id_type="embedded", new_user_entity_set_id="proteome", negative_attribute_restriction_list=[] ) #----------------------------------------------------# # SELECT THE INTERACTIONS OF THE USER ENTITY SET # #----------------------------------------------------# print ("Selecting interactions") # Select interactions that have been detected at least by affinity technology if options.restricted_to_TAP: print ('Using interactions at least described by affinity methods (i.e. Tandem Affinity Purification)') session.create_network( user_entity_set_id = "proteome" , level = level, relation_type_list=["interaction"] , relation_attribute_restriction_list = [("Method_id",400)], #relation_attribute_restriction_list = [("psimi_name","affinity technology")], include_relations_last_level = (not restricted_to_seeds) , use_self_relations = False) # Select interactions that have been detected at least by yeast two hybrid elif options.restricted_to_Y2H: print ('Using interactions at least described by yeast-two-hybrid methods (Y2H)') session.create_network( user_entity_set_id = "proteome" , level = level, relation_type_list=["interaction"] , relation_attribute_restriction_list = [("Method_id",18)], #relation_attribute_restriction_list = [("psimi_name","y2h2")], include_relations_last_level = (not restricted_to_seeds) , use_self_relations = False) # Select all interactions else: session.create_network( user_entity_set_id = "proteome" , level = level, relation_type_list=["interaction"] , include_relations_last_level = (not restricted_to_seeds) , use_self_relations = False) # Summary of interactions out_network = open(options.edge,'w') all_interactions = proteome.getRelations() print "Num interactions:", len(all_interactions) #--------------------------------------# # FILTER THE SELECTED INTERACTIONS # #--------------------------------------# nodes=set() # Get all the user entity ids from the user entity set 'proteome' all_uEs = proteome.get_user_entity_ids() # Obtain a dictionary user entity ID => type uEId_to_type = session.dbAccess.get_user_entity_type(config.get('BIANA', 'unification_protocol'), all_uEs) skip_interactions=0 for (uE_id1, uE_id2) in all_interactions: #self.dbAccess.get_external_entities_dict( externalEntityIdsList = [external_entity_relation_id] ) # Get TYPE of user entity uE1_type = uEId_to_type[uE_id1] uE2_type = uEId_to_type[uE_id2] # If type is not protein, we skip the interaction if uE1_type != 'protein' or uE2_type != 'protein': if options.verbose: print('Skipping interaction because the type of one of the user entities is not protein!') print('Node 1: {}\tType: {}'.format(uE_id1, uE1_type)) print('Node 2: {}\tType: {}'.format(uE_id2, uE2_type)) skip_interactions=skip_interactions+1 continue eErIDs_list = proteome.get_external_entity_relation_ids(uE_id1, uE_id2) method_names = set() method_ids = set() source_databases = set() use_method_ids=set() pubmed_ids = set() unused_method_names = set() relationObj_dict = session.dbAccess.get_external_entities_dict( externalEntityIdsList = eErIDs_list, attribute_list = [], relation_attribute_list = ["method_id","psimi_name","pubmed"], participant_attribute_list = [] ) num_methods=0 for current_eErID in eErIDs_list: relationObj = relationObj_dict[current_eErID] if options.verbose: print "Interaction: (",uE_id1,",",uE_id2,")" print relationObj #if relationObj.get_attribute(attribute_identifier="psimi_name") is not None: # print "\t".join([ x.value for x in relationObj.get_attribute(attribute_identifier="psimi_name") ]) #if relationObj.get_attribute(attribute_identifier="method_id") is not None: #print "\t".join([ x.value for x in relationObj.get_attribute(attribute_identifier="method_id") ]) #print relationObj.get_attributes_dict() #print [ x.value for x in relationObj.get_attributes_dict()["psimi_name"] ] #print [ x.value for x in relationObj.get_attributes_dict()["method_id"] ] if "psimi_name" in relationObj.get_attributes_dict(): method_names.update([ str(x.value) for x in relationObj.get_attributes_dict()["psimi_name"] ]) if "method_id" in relationObj.get_attributes_dict(): method_ids.update([ x.value for x in relationObj.get_attributes_dict()["method_id"]]) if "pubmed" in relationObj.get_attributes_dict(): pubmed_ids.update([ x.value for x in relationObj.get_attributes_dict()["pubmed"]]) source_databases.add(str(session.dbAccess.get_external_database( database_id = relationObj.get_source_database()) )) if options.except_TAP: for m in method_ids: if m not in affinity: use_method_ids.add(m) #print "Add", m else: unused_method_names.add(affinity_dict[m]) elif options.except_Y2H: #print "check Y2H" for m in method_ids: if m not in complementation: use_method_ids.add(m) #print "Add", m else: unused_method_names.add(complementation_dict[m]) elif user_rejection: for m in method_ids: if m not in no_methods: use_method_ids.add(m) elif user_selection: for m in method_ids: #print "Check",repr(use_methods) if m in set(use_methods): use_method_ids.add(m) if options.verbose: print "Not among selected methods ",m else: use_method_ids.update(method_ids) if len(source_databases) > 0: info_sources=";".join([str(x) for x in source_databases]) else: if options.verbose: print('Skipping interaction it has no source database!') print('Node 1: {}\tNode 2: {}'.format(uE_id1, uE_id2)) skip_interactions=skip_interactions+1 continue if len(method_names) > 0: method_names = [x for x in method_names if x not in unused_method_names] # Remove method names that were excluded info_methods=";".join([str(x) for x in method_names]) else: info_methods='-' if len(use_method_ids) > 0: info_methods_ids=";".join([str(x) for x in use_method_ids]) else: if options.verbose: print('Skipping interaction it has no method!') print('Node 1: {}\tNode 2: {}'.format(uE_id1, uE_id2)) skip_interactions=skip_interactions+1 continue if len(pubmed_ids) > 0: info_pubmed_ids=";".join([str(x) for x in pubmed_ids]) else: info_pubmed_ids='-' num_databases=len(source_databases) num_methods=len(use_method_ids) num_pubmeds = len(pubmed_ids) if options.verbose: print "Methods",num_methods,info_methods,"\tSelected:",info_methods_ids print "Databases",num_databases,info_sources print "Pubmeds",num_pubmeds,info_pubmed_ids # Check if the number of methods is higher than the minimum established if num_methods >= minimum_number_of_methods: use=True else: use=False # Check if the number of database is higher than the minimum established if use and num_databases >= minimum_number_of_db: use=True else: use=False if not use: skip_interactions=skip_interactions+1 #print method_names, method_ids, source_databases #----------------------# # OUTPUT EDGE FILE # #----------------------# if use: #print uE_id1, uE_id/2 nodes.add(uE_id1) nodes.add(uE_id2) #print "Attribute ",(uE_id1,uE_id2).get_attribute( if options.format == 'multi-fields' : out_network.write("{0}\t{1}\t{2}\t{3}\t{4}\t{5}\n". format(uE_id1,uE_id2,info_sources,info_methods_ids,info_methods,info_pubmed_ids)) elif options.format == 'netscore': out_network.write('\t{}\t{}\t{:.2f}\n'.format(uE_id1,uE_id2,1.)) elif options.format == 'raw': out_network.write("{}\t{}\n".format(uE_id1,uE_id2)) else: # If the format is not multi-fields, netscore or raw, the output format is sif out_network.write("{}\t{:.2f}\t{}\n".format(uE_id1,1.,uE_id2)) print "Num neglected interactions:", skip_interactions out_network.close() #---------------------------------------# # OUTPUT NODE AND TRANSLATION FILES # #---------------------------------------# # If we wanted the complete interactome, the translation will be done differently if options.radius <= 0: # Output node file out_proteins = open(options.node,'w') for protein in nodes: if options.format == 'multi-fields': out_proteins.write("{0}\t{1:.2f}\t{2:.2f}\t{3:.2f}\n".format(protein,1.,1.,0.1)) elif options.format == 'netscore': out_proteins.write("{0}\t{1:.2f}\t{2:.2f}\t{3:.2f}\n".format(protein,1.,1.,0.1)) else: out_proteins.write("{0}\t{1:.2f}\n".format(protein,0.1)) out_proteins.close() ################################# TRANSLATION #################################### out_translation = open(options.translation_file,'w') # TRANSLATION TO 'stype' trans_stype=False if options.stype != 'proteinsequence' and options.stype != options.ttype: trans_stype = True out_trans_stype = open(options.translation_file+'.'+options.stype+'.trans','w') for protein in nodes: uE = session.get_user_entity(protein) translate=set() translate_stype=set() if options.ttype == "proteinsequence": maxlen=0; for current_id in uE.get_attribute(attribute_identifier=options.ttype): if maxlen < len(current_id.value.get_sequence().upper()): maxlen=len(current_id.value.get_sequence().upper()) translation=",".join([str(current_id.value.get_sequence().upper()) for current_id in uE.get_attribute(attribute_identifier=options.ttype) if len(str(current_id.value.get_sequence().upper())) == maxlen ] ) #print "Translation",protein,translation #print("{0}\t'{1}'\n".format(protein,translation)) else: ##### TRANSLATION TO 'ttype' for current_id in uE.get_attribute(attribute_identifier=options.ttype): translate.add(current_id.value.upper()) translation="','".join(["{0}".format(x) for x in translate]) out_translation.write("{0}\t'{1}'\n".format(protein,translation)) ##### TRANSLATION TO STYPE if trans_stype: for current_id in uE.get_attribute(attribute_identifier=options.stype): translate_stype.add(current_id.value.upper()) translation_stype="','".join(["{0}".format(x) for x in translate_stype]) out_trans_stype.write("{0}\t'{1}'\n".format(protein,translation_stype)) out_translation.close() if trans_stype: out_trans_stype.close() #################################################################################### # If we wanted a network of expansion, the translation will be done differently elif options.radius > 0: # Read the seeds seeds=set() input_seed = open(options.seed,'r') for line in input_seed: fields = line.strip().split("\t") seeds.add(fields[0].lower()) input_seed.close() # Output node file out_proteins = open(options.node,'w') translate={} for protein in nodes: score=seed_score uE = session.get_user_entity(protein) for current_id in uE.get_attribute(attribute_identifier=options.stype): if current_id.value.lower() in seeds: translate.setdefault(current_id.value.lower(),[]) translate[current_id.value.lower()].append(protein) score=1.0 if options.format == 'multi-fields': out_proteins.write("{0}\t{1:.2f}\t{2:.2f}\t{3:.2f}\n".format(protein,1.,1.,score)) elif options.format == 'netscore': out_proteins.write("{0}\t{1:.2f}\t{2:.2f}\t{3:.2f}\n".format(protein,1.,1.,score)) else: out_proteins.write("{0}\t{1:.2f}\n".format(protein,score)) out_proteins.close() # Get the IDS of single nodes that were not previously found in the network single=set() for uE_id in proteome.get_unconnected_nodes(): single.add(uE_id) for protein in single: uE = session.get_user_entity(protein) for current_id in uE.get_attribute(attribute_identifier=options.stype): if current_id.value.lower() in seeds: translate.setdefault(current_id.value.lower(),[]) translate[current_id.value.lower()].append(protein) # Get all IDS of SEEDS, defined as "proteome", and check missing codes to be # added for translation allseed=set() for uE_id in proteome.get_user_entity_ids(): allseed.add(uE_id) for protein in allseed: if protein not in single and protein not in nodes: uE = session.get_user_entity(protein) for current_id in uE.get_attribute(attribute_identifier=options.stype): if current_id.value.lower() in seeds: translate.setdefault(current_id.value.lower(),[]) translate[current_id.value.lower()].append(protein) ################################# TRANSLATION #################################### out_translation = open(options.translation_seeds_file,'w') for s in seeds: if s == '': continue if s in translate: codes=set(translate[s]) translation="','".join([str(x) for x in codes]) #out_translation.write("%s\t'%s'\n" % (s.upper(),translation)) out_translation.write("{0}\t'{1}'\n".format(s.upper(),translation)) else: out_translation.write("{0}\t'Unknown'\n".format(s.upper())) out_translation.close() # Output translation file # TRANSLATION TO 'ttype' out_translation = open(options.translation_file,'w') # TRANSLATION TO 'stype' trans_stype=False if options.stype != 'proteinsequence' and options.stype != options.ttype: trans_stype = True out_trans_stype = open(options.translation_file+'.'+options.stype+'.trans','w') for protein in nodes: uE = session.get_user_entity(protein) translate=set() translate_stype=set() if options.ttype == "proteinsequence": maxlen=0; for current_id in uE.get_attribute(attribute_identifier=options.ttype): if maxlen < len(current_id.value.get_sequence().upper()): maxlen=len(current_id.value.get_sequence().upper()) translation=",".join([str(current_id.value.get_sequence().upper()) for current_id in uE.get_attribute(attribute_identifier=options.ttype) if len(str(current_id.value.get_sequence().upper())) == maxlen ] ) #print "Translation",protein,translation #print("{0}\t'{1}'\n".format(protein,translation)) else: for current_id in uE.get_attribute(attribute_identifier=options.ttype): translate.add(current_id.value.upper()) translation="','".join(["{0}".format(x) for x in translate]) out_translation.write("{0}\t'{1}'\n".format(protein,translation)) ##### TRANSLATION TO STYPE if trans_stype: for current_id in uE.get_attribute(attribute_identifier=options.stype): translate_stype.add(current_id.value.upper()) translation_stype="','".join(["{0}".format(x) for x in translate_stype]) out_trans_stype.write("{0}\t'{1}'\n".format(protein,translation_stype)) out_translation.close() if trans_stype: out_trans_stype.close() #################################################################################### print('Generation of the network done!') return def fileExist(file): """ Checks if a file exists AND is a file """ return os.path.exists(file) and os.path.isfile(file) def get_seeds_from_file(seed_file): """ Obtain the seeds from a file and introduce them to a Python list. The seeds must be separated by new lines! """ seed_set = set() with open(seed_file, 'r') as seed_file_fd: for line in seed_file_fd: fields = line.strip().split('\t') seed_set.add(fields[0]) return list(seed_set) if __name__ == "__main__": main()
48.157119
591
0.569195
0
0
0
0
0
0
0
0
10,337
0.351312
b9d2c04ffcb32d5c9ad6c0f626a368e22db97763
4,504
py
Python
tests/data/s3_scrape_config.py
kids-first/kf-api-study-creator
93a79b108b6474f9b4135ace06c89ddcf63dd257
[ "Apache-2.0" ]
3
2019-05-04T02:07:28.000Z
2020-10-16T17:47:44.000Z
tests/data/s3_scrape_config.py
kids-first/kf-api-study-creator
93a79b108b6474f9b4135ace06c89ddcf63dd257
[ "Apache-2.0" ]
604
2019-02-21T18:14:51.000Z
2022-02-10T08:13:54.000Z
tests/data/s3_scrape_config.py
kids-first/kf-api-study-creator
93a79b108b6474f9b4135ace06c89ddcf63dd257
[ "Apache-2.0" ]
null
null
null
""" This is an extract config intended for S3 object manifests produced by TBD. To use it, you must import it in another extract config and override at least the `source_data_url`. You may also append additional operations to the `operations` list as well. For example you could have the following in your extract config module: from kf_ingest_packages.common.extract_configs.s3_object_info import * source_data_url = 'file://../data/kf-seq-data-bcm-chung-s3-objects.tsv' operations.append( value_map( in_col='Key', out_col=CONCEPT.BIOSPECIMEN.ID, m=lambda x: x ) ) """ import os from kf_lib_data_ingest.common import constants from kf_lib_data_ingest.common.constants import GENOMIC_FILE from kf_lib_data_ingest.common.concept_schema import CONCEPT from kf_lib_data_ingest.etl.extract.operations import ( keep_map, row_map, value_map, constant_map, ) def file_ext(x): """ Get genomic file extension """ matches = [ file_ext for file_ext in FILE_EXT_FORMAT_MAP if x.endswith(file_ext) ] if matches: file_ext = max(matches, key=len) else: file_ext = None return file_ext FILE_EXT_FORMAT_MAP = { "fq": GENOMIC_FILE.FORMAT.FASTQ, "fastq": GENOMIC_FILE.FORMAT.FASTQ, "fq.gz": GENOMIC_FILE.FORMAT.FASTQ, "fastq.gz": GENOMIC_FILE.FORMAT.FASTQ, "bam": GENOMIC_FILE.FORMAT.BAM, "hgv.bam": GENOMIC_FILE.FORMAT.BAM, "cram": GENOMIC_FILE.FORMAT.CRAM, "bam.bai": GENOMIC_FILE.FORMAT.BAI, "bai": GENOMIC_FILE.FORMAT.BAI, "cram.crai": GENOMIC_FILE.FORMAT.CRAI, "crai": GENOMIC_FILE.FORMAT.CRAI, "g.vcf.gz": GENOMIC_FILE.FORMAT.GVCF, "g.vcf.gz.tbi": GENOMIC_FILE.FORMAT.TBI, "vcf.gz": GENOMIC_FILE.FORMAT.VCF, "vcf": GENOMIC_FILE.FORMAT.VCF, "vcf.gz.tbi": GENOMIC_FILE.FORMAT.TBI, "peddy.html": "html", } DATA_TYPES = { GENOMIC_FILE.FORMAT.FASTQ: GENOMIC_FILE.DATA_TYPE.UNALIGNED_READS, GENOMIC_FILE.FORMAT.BAM: GENOMIC_FILE.DATA_TYPE.ALIGNED_READS, GENOMIC_FILE.FORMAT.CRAM: GENOMIC_FILE.DATA_TYPE.ALIGNED_READS, GENOMIC_FILE.FORMAT.BAI: "Aligned Reads Index", GENOMIC_FILE.FORMAT.CRAI: "Aligned Reads Index", GENOMIC_FILE.FORMAT.VCF: "Variant Calls", GENOMIC_FILE.FORMAT.GVCF: "gVCF", "g.vcf.gz.tbi": "gVCF Index", "vcf.gz.tbi": "Variant Calls Index", "html": "Other", } def filter_df_by_file_ext(df): """ Only keep rows where file extension is one of those in FILE_EXT_FORMAT_MAP.keys """ df[CONCEPT.GENOMIC_FILE.FILE_FORMAT] = df["Key"].apply( lambda x: file_format(x) ) return df[df[CONCEPT.GENOMIC_FILE.FILE_FORMAT].notnull()] source_data_url = ( 'https://localhost:5002/download/study/SD_ME0WME0W/' 'file/SF_Y1JMXTTS/version/FV_4RYEMD71' ) do_after_read = filter_df_by_file_ext def s3_url(row): """ Create S3 URL for object from S3 bucket and key """ return f's3://{row["Bucket"]}/{row["Key"]}' def file_format(x): """ Get genomic file format by looking genomic file ext up in FILE_EXT_FORMAT_MAP dict """ # File format return FILE_EXT_FORMAT_MAP.get(file_ext(x)) def data_type(x): """ Get genomic file data type by looking up file format in DATA_TYPES. However, if the file's extension has `tbi` in it, then use the file extension itself to do the data type lookup. """ ext = file_ext(x) if "tbi" in ext: data_type = DATA_TYPES.get(ext) else: data_type = DATA_TYPES.get(file_format(x)) return data_type operations = [ row_map(out_col=CONCEPT.GENOMIC_FILE.ID, m=lambda row: s3_url(row)), row_map( out_col=CONCEPT.GENOMIC_FILE.URL_LIST, m=lambda row: [s3_url(row)] ), value_map( in_col="Key", out_col=CONCEPT.GENOMIC_FILE.FILE_NAME, m=lambda x: os.path.split(x)[-1], ), keep_map(in_col="Size", out_col=CONCEPT.GENOMIC_FILE.SIZE), value_map( in_col="ETag", out_col=CONCEPT.GENOMIC_FILE.HASH_DICT, m=lambda x: {constants.FILE.HASH.S3_ETAG.lower(): x.replace('"', "")}, ), constant_map( out_col=CONCEPT.GENOMIC_FILE.AVAILABILITY, m=constants.GENOMIC_FILE.AVAILABILITY.IMMEDIATE, ), keep_map( in_col=CONCEPT.GENOMIC_FILE.FILE_FORMAT, out_col=CONCEPT.GENOMIC_FILE.FILE_FORMAT, ), value_map( in_col="Key", out_col=CONCEPT.GENOMIC_FILE.DATA_TYPE, m=lambda x: data_type(x), ), ]
27.463415
78
0.67984
0
0
0
0
0
0
0
0
1,573
0.349245
b9d3222fd93bbc8ba199ba7a401394dc7531a2ff
665
py
Python
hard-gists/5c973ec1b5ab2e387646/snippet.py
jjhenkel/dockerizeme
eaa4fe5366f6b9adf74399eab01c712cacaeb279
[ "Apache-2.0" ]
21
2019-07-08T08:26:45.000Z
2022-01-24T23:53:25.000Z
hard-gists/5c973ec1b5ab2e387646/snippet.py
jjhenkel/dockerizeme
eaa4fe5366f6b9adf74399eab01c712cacaeb279
[ "Apache-2.0" ]
5
2019-06-15T14:47:47.000Z
2022-02-26T05:02:56.000Z
hard-gists/5c973ec1b5ab2e387646/snippet.py
jjhenkel/dockerizeme
eaa4fe5366f6b9adf74399eab01c712cacaeb279
[ "Apache-2.0" ]
17
2019-05-16T03:50:34.000Z
2021-01-14T14:35:12.000Z
import bpy from bpy.app.handlers import persistent bl_info = { "name": "Playback Once", "author": "Adhi Hargo", "version": (1, 0, 0), "blender": (2, 67, 3), "location": "", "description": "Playback once.", "warning": "", "wiki_url": "", "tracker_url": "", "category": "Animation"} @persistent def stopPlaybackAtEnd(scene): if scene.frame_current >= scene.frame_end: bpy.ops.screen.animation_cancel() def register(): bpy.app.handlers.frame_change_pre.append(stopPlaybackAtEnd) def unregister(): bpy.app.handlers.frame_change_pre.remove(stopPlaybackAtEnd) if __name__ == "__main__": register()
22.931034
63
0.645113
0
0
0
0
130
0.195489
0
0
169
0.254135
b9d47acd47b8bd0babe955a7bbbde7c4d9080b36
688
py
Python
Py3Challenges/saves/challenges/c6_min.py
AlbertUnruh/Py3Challenges
52f03f157860f6464f0c1710bf051a8099c29ea2
[ "MIT" ]
2
2022-02-13T04:57:10.000Z
2022-02-13T10:40:14.000Z
Py3Challenges/saves/challenges/c6_min.py
AlbertUnruh/Py3Challenges
52f03f157860f6464f0c1710bf051a8099c29ea2
[ "MIT" ]
null
null
null
Py3Challenges/saves/challenges/c6_min.py
AlbertUnruh/Py3Challenges
52f03f157860f6464f0c1710bf051a8099c29ea2
[ "MIT" ]
null
null
null
""" To master this you should consider using the builtin-``min``-function. """ from ...challenge import Challenge from random import randint x = [] for _ in range(randint(2, 10)): x.append(randint(1, 100)) intro = f"You have to print the lowest value of {', '.join(str(_) for _ in x[:-1])} and {x[-1]}. (values: x)" def validate_function(stdin: str, stdout: str, stderr: str, exc: tuple) -> bool: try: z = int(stdout.removesuffix("\n")) except ValueError: return False else: return min(x) == z challenge = Challenge( intro=intro, validate_function=validate_function, help=__doc__, values={"x": x}, capture_stdout=True, )
22.193548
109
0.632267
0
0
0
0
0
0
0
0
186
0.270349
b9d600352f466e38045c7614f4b0151d5eb8f878
4,625
py
Python
services/web/server/tests/unit/with_dbs/01/test_director_v2.py
mrnicegyu11/osparc-simcore
b6fa6c245dbfbc18cc74a387111a52de9b05d1f4
[ "MIT" ]
null
null
null
services/web/server/tests/unit/with_dbs/01/test_director_v2.py
mrnicegyu11/osparc-simcore
b6fa6c245dbfbc18cc74a387111a52de9b05d1f4
[ "MIT" ]
1
2021-11-29T13:38:09.000Z
2021-11-29T13:38:09.000Z
services/web/server/tests/unit/with_dbs/01/test_director_v2.py
mrnicegyu11/osparc-simcore
b6fa6c245dbfbc18cc74a387111a52de9b05d1f4
[ "MIT" ]
null
null
null
# pylint:disable=unused-variable # pylint:disable=unused-argument # pylint:disable=redefined-outer-name from typing import AsyncIterator import pytest from aioresponses import aioresponses from faker import Faker from hypothesis import HealthCheck, given, settings from hypothesis import strategies as st from models_library.clusters import ClusterID from models_library.projects import ProjectID from models_library.projects_pipeline import ComputationTask from models_library.projects_state import RunningState from models_library.users import UserID from simcore_service_webserver import director_v2_api from simcore_service_webserver.director_v2_models import ( ClusterCreate, ClusterPatch, ClusterPing, ) @pytest.fixture() async def mocked_director_v2( director_v2_service_mock: aioresponses, ) -> AsyncIterator[aioresponses]: yield director_v2_service_mock @pytest.fixture def user_id(faker: Faker) -> UserID: return UserID(faker.pyint(min_value=1)) @pytest.fixture def project_id(faker: Faker) -> ProjectID: return ProjectID(faker.uuid4()) @pytest.fixture def cluster_id(faker: Faker) -> ClusterID: return ClusterID(faker.pyint(min_value=0)) async def test_create_pipeline( mocked_director_v2, client, user_id: UserID, project_id: ProjectID ): task_out = await director_v2_api.create_or_update_pipeline( client.app, user_id, project_id ) assert task_out assert isinstance(task_out, dict) assert task_out["state"] == RunningState.NOT_STARTED async def test_get_computation_task( mocked_director_v2, client, user_id: UserID, project_id: ProjectID, ): task_out = await director_v2_api.get_computation_task( client.app, user_id, project_id ) assert task_out assert isinstance(task_out, ComputationTask) assert task_out.state == RunningState.NOT_STARTED async def test_delete_pipeline( mocked_director_v2, client, user_id: UserID, project_id: ProjectID ): await director_v2_api.delete_pipeline(client.app, user_id, project_id) @settings(suppress_health_check=[HealthCheck.function_scoped_fixture]) @given(cluster_create=st.builds(ClusterCreate)) async def test_create_cluster( mocked_director_v2, client, user_id: UserID, cluster_create ): created_cluster = await director_v2_api.create_cluster( client.app, user_id=user_id, new_cluster=cluster_create ) assert created_cluster is not None assert isinstance(created_cluster, dict) assert "id" in created_cluster async def test_list_clusters(mocked_director_v2, client, user_id: UserID): list_of_clusters = await director_v2_api.list_clusters(client.app, user_id=user_id) assert isinstance(list_of_clusters, list) assert len(list_of_clusters) > 0 async def test_get_cluster( mocked_director_v2, client, user_id: UserID, cluster_id: ClusterID ): cluster = await director_v2_api.get_cluster( client.app, user_id=user_id, cluster_id=cluster_id ) assert isinstance(cluster, dict) assert cluster["id"] == cluster_id async def test_get_cluster_details( mocked_director_v2, client, user_id: UserID, cluster_id: ClusterID ): cluster_details = await director_v2_api.get_cluster_details( client.app, user_id=user_id, cluster_id=cluster_id ) assert isinstance(cluster_details, dict) @settings(suppress_health_check=[HealthCheck.function_scoped_fixture]) @given(cluster_patch=st.from_type(ClusterPatch)) async def test_update_cluster( mocked_director_v2, client, user_id: UserID, cluster_id: ClusterID, cluster_patch ): print(f"--> updating cluster with {cluster_patch=}") updated_cluster = await director_v2_api.update_cluster( client.app, user_id=user_id, cluster_id=cluster_id, cluster_patch=cluster_patch ) assert isinstance(updated_cluster, dict) assert updated_cluster["id"] == cluster_id async def test_delete_cluster( mocked_director_v2, client, user_id: UserID, cluster_id: ClusterID ): await director_v2_api.delete_cluster( client.app, user_id=user_id, cluster_id=cluster_id ) @settings(suppress_health_check=[HealthCheck.function_scoped_fixture]) @given(cluster_ping=st.builds(ClusterPing)) async def test_ping_cluster(mocked_director_v2, client, cluster_ping: ClusterPing): await director_v2_api.ping_cluster(client.app, cluster_ping=cluster_ping) async def test_ping_specific_cluster( mocked_director_v2, client, user_id: UserID, cluster_id: ClusterID ): await director_v2_api.ping_specific_cluster( client.app, user_id=user_id, cluster_id=cluster_id )
30.833333
87
0.780973
0
0
142
0.030703
1,738
0.375784
3,188
0.689297
165
0.035676
b9d60ecc3068b2d42bc6110555d94274b9cac29c
930
py
Python
tools/py/heatmap.py
sriramreddyM/pLitter
e506777af0b8bbae411b474f5eacee91e8efea59
[ "MIT" ]
5
2021-11-09T10:25:35.000Z
2022-03-30T03:57:46.000Z
tools/py/heatmap.py
sriramreddyM/pLitter
e506777af0b8bbae411b474f5eacee91e8efea59
[ "MIT" ]
null
null
null
tools/py/heatmap.py
sriramreddyM/pLitter
e506777af0b8bbae411b474f5eacee91e8efea59
[ "MIT" ]
1
2021-09-09T08:04:46.000Z
2021-09-09T08:04:46.000Z
''' converts video to frames and saves images by different interval, or overlap, etc ''' import folium from folium import plugins from folium.plugins import HeatMap import csv # class plitterMap(): # def __int__(self, file_path): # self.data = file_path # df = [] # with open(self.data) as f: # reader = csv.reader(f) # for row in reader: # df_row = [] # df_row.append(row[0]) # df_row.append(row[0]) # df_row.append(row[0]) # df.append(row) # self.tooltip = df[0][0] # def loadMap(): # self.map = folium.Map(location=[float(row[1]), float(row[2])], zoom_start = 18) # def loadGpsLoc(): # folium.Marker([float(row[1]), float(row[2])], popup="<i>"+row[0]+"</i>", tooltip=tooltip, icon=icon_circle).add_to(rangsit_map) # rangsit_map
30
133
0.53871
0
0
0
0
0
0
0
0
807
0.867742
b9d6dd8bd3445675e1356c10ac0bb61cd00aba81
3,027
py
Python
generator.py
Geoalert/emergency-mapping
96668e4e5aa2b520e5727536f7a8f4c262ee3da6
[ "MIT" ]
3
2018-04-04T17:58:53.000Z
2021-10-14T08:50:13.000Z
generator.py
aeronetlab/map_augury
96668e4e5aa2b520e5727536f7a8f4c262ee3da6
[ "MIT" ]
null
null
null
generator.py
aeronetlab/map_augury
96668e4e5aa2b520e5727536f7a8f4c262ee3da6
[ "MIT" ]
1
2020-03-24T12:07:07.000Z
2020-03-24T12:07:07.000Z
import numpy as np def random_augmentation(img, mask): #you can add any augmentations you need return img, mask def batch_generator(image, mask, batch_size=1, crop_size=0, patch_size=256, bbox= None, augmentation=False): ''' image: nparray, must have 3 dimension mask: nparray, 2 dimensions, same size as image batch_size: int, number of images in a batch patch_size: int, size of the image returned, patch is square crop_size: int, how much pixels should be cropped off the mask bbox: None or tuple of 4 ints, (min_y, max_y, min_x, max_x), the data is selected from within the bbox augmentation: turn on/off data augmentation. The augmentation function is random_augmentation() above returns batch of image and mask patches, image is turned to 'channels last' as required by unet ''' if np.ndim(mask) != 2 or np.ndim(image) != 3: raise ValueError('image must have 3 dims and mask 2 dims') if mask.shape != image.shape[1:]: raise ValueError('image and mask shape is different') im_max = float(np.max(image)) mask_max = 1.0 #select subimage if bbox is not None: # check bbox if bbox[0] < 0 or bbox [2] < 0 \ or bbox[1] > mask.shape[0] or bbox[3] > mask.shape[0] \ or bbox[0] + patch_size > bbox[1] or bbox[2] + patch_size > bbox[3] \ or patch_size <= 0: raise ValueError("Incorrect bbox or patch size") img_ = image[:, bbox[0] : bbox[1], bbox[2]:bbox[3]] mask_ = mask[bbox[0] : bbox[1], bbox[2]:bbox[3]] else: img_ = image mask_ = mask while 1: x = [] y = [] for i in range (batch_size): random_x = np.random.randint(0, mask_.shape[1] - patch_size) random_y = np.random.randint(0, mask_.shape[0] - patch_size) img_patch = img_[:, random_y : random_y + patch_size, random_x : random_x + patch_size] / im_max # transform the image from channels-first (rasterio format) to channels-last (default tensorflow format) img_patch = np.moveaxis(img_patch, 0, 2) mask_patch = mask_[random_y : random_y + patch_size, random_x : random_x + patch_size] / mask_max if augmentation: img_patch, mask_patch = random_augmentation(img_patch, mask_patch) # mask is cropped as it may be useful for some convnets that have output size less than input if crop_size > 0: mask_patch = mask_patch[crop_size : -crop_size, crop_size : -crop_size] mask_patch = np.expand_dims(mask_patch, 2) x.append(img_patch) y.append(mask_patch) yield (np.array(x), np.array(y))
40.905405
116
0.570202
0
0
2,895
0.956392
0
0
0
0
973
0.32144
b9d71e12c5fdd4a3220a64251c8e0e2c9a302fe4
13,351
py
Python
awx/api/metadata.py
Avinesh/awx
6310a2edd890d6062a9f6bcdeb2b46c4b876c2bf
[ "Apache-2.0" ]
1
2021-09-07T14:53:57.000Z
2021-09-07T14:53:57.000Z
awx/api/metadata.py
Avinesh/awx
6310a2edd890d6062a9f6bcdeb2b46c4b876c2bf
[ "Apache-2.0" ]
2
2020-02-04T05:01:38.000Z
2020-02-18T06:44:52.000Z
awx/api/metadata.py
Avinesh/awx
6310a2edd890d6062a9f6bcdeb2b46c4b876c2bf
[ "Apache-2.0" ]
1
2020-01-28T05:34:09.000Z
2020-01-28T05:34:09.000Z
# Copyright (c) 2016 Ansible, Inc. # All Rights Reserved. from collections import OrderedDict # Django from django.core.exceptions import PermissionDenied from django.db.models.fields import PositiveIntegerField, BooleanField from django.db.models.fields.related import ForeignKey from django.http import Http404 from django.utils.encoding import force_text, smart_text from django.utils.translation import ugettext_lazy as _ # Django REST Framework from rest_framework import exceptions from rest_framework import metadata from rest_framework import serializers from rest_framework.relations import RelatedField, ManyRelatedField from rest_framework.fields import JSONField as DRFJSONField from rest_framework.request import clone_request # AWX from awx.main.fields import JSONField, ImplicitRoleField from awx.main.models import InventorySource, NotificationTemplate from awx.main.scheduler.kubernetes import PodManager class Metadata(metadata.SimpleMetadata): def get_field_info(self, field): field_info = OrderedDict() field_info['type'] = self.label_lookup[field] field_info['required'] = getattr(field, 'required', False) text_attrs = [ 'read_only', 'label', 'help_text', 'min_length', 'max_length', 'min_value', 'max_value', 'category', 'category_slug', 'defined_in_file' ] for attr in text_attrs: value = getattr(field, attr, None) if value is not None and value != '': field_info[attr] = force_text(value, strings_only=True) placeholder = getattr(field, 'placeholder', serializers.empty) if placeholder is not serializers.empty: field_info['placeholder'] = placeholder serializer = getattr(field, 'parent', None) if serializer and hasattr(serializer, 'Meta') and hasattr(serializer.Meta, 'model'): # Update help text for common fields. field_help_text = { 'id': _('Database ID for this {}.'), 'name': _('Name of this {}.'), 'description': _('Optional description of this {}.'), 'type': _('Data type for this {}.'), 'url': _('URL for this {}.'), 'related': _('Data structure with URLs of related resources.'), 'summary_fields': _('Data structure with name/description for related resources.'), 'created': _('Timestamp when this {} was created.'), 'modified': _('Timestamp when this {} was last modified.'), } if field.field_name in field_help_text: opts = serializer.Meta.model._meta.concrete_model._meta verbose_name = smart_text(opts.verbose_name) field_info['help_text'] = field_help_text[field.field_name].format(verbose_name) if field.field_name == 'type': field_info['filterable'] = True else: for model_field in serializer.Meta.model._meta.fields: if field.field_name == model_field.name: if getattr(model_field, '__accepts_json__', None): field_info['type'] = 'json' field_info['filterable'] = True break else: field_info['filterable'] = False # Indicate if a field has a default value. # FIXME: Still isn't showing all default values? try: default = field.get_default() if field.field_name == 'TOWER_URL_BASE' and default == 'https://towerhost': default = '{}://{}'.format(self.request.scheme, self.request.get_host()) field_info['default'] = default except serializers.SkipField: pass if getattr(field, 'child', None): field_info['child'] = self.get_field_info(field.child) elif getattr(field, 'fields', None): field_info['children'] = self.get_serializer_info(field) if not isinstance(field, (RelatedField, ManyRelatedField)) and hasattr(field, 'choices'): field_info['choices'] = [(choice_value, choice_name) for choice_value, choice_name in field.choices.items()] # Indicate if a field is write-only. if getattr(field, 'write_only', False): field_info['write_only'] = True # Special handling of inventory source_region choices that vary based on # selected inventory source. if field.field_name == 'source_regions': for cp in ('azure_rm', 'ec2', 'gce'): get_regions = getattr(InventorySource, 'get_%s_region_choices' % cp) field_info['%s_region_choices' % cp] = get_regions() # Special handling of group_by choices for EC2. if field.field_name == 'group_by': for cp in ('ec2',): get_group_by_choices = getattr(InventorySource, 'get_%s_group_by_choices' % cp) field_info['%s_group_by_choices' % cp] = get_group_by_choices() # Special handling of notification configuration where the required properties # are conditional on the type selected. if field.field_name == 'notification_configuration': for (notification_type_name, notification_tr_name, notification_type_class) in NotificationTemplate.NOTIFICATION_TYPES: field_info[notification_type_name] = notification_type_class.init_parameters # Special handling of notification messages where the required properties # are conditional on the type selected. try: view_model = field.context['view'].model except (AttributeError, KeyError): view_model = None if view_model == NotificationTemplate and field.field_name == 'messages': for (notification_type_name, notification_tr_name, notification_type_class) in NotificationTemplate.NOTIFICATION_TYPES: field_info[notification_type_name] = notification_type_class.default_messages # Update type of fields returned... model_field = None if serializer and hasattr(serializer, 'Meta') and hasattr(serializer.Meta, 'model'): try: model_field = serializer.Meta.model._meta.get_field(field.field_name) except Exception: pass if field.field_name == 'type': field_info['type'] = 'choice' elif field.field_name in ('url', 'custom_virtualenv', 'token'): field_info['type'] = 'string' elif field.field_name in ('related', 'summary_fields'): field_info['type'] = 'object' elif isinstance(field, PositiveIntegerField): field_info['type'] = 'integer' elif field.field_name in ('created', 'modified'): field_info['type'] = 'datetime' elif ( RelatedField in field.__class__.__bases__ or isinstance(model_field, ForeignKey) ): field_info['type'] = 'id' elif ( isinstance(field, JSONField) or isinstance(model_field, JSONField) or isinstance(field, DRFJSONField) or isinstance(getattr(field, 'model_field', None), JSONField) or field.field_name == 'credential_passwords' ): field_info['type'] = 'json' elif ( isinstance(field, ManyRelatedField) and field.field_name == 'credentials' # launch-time credentials ): field_info['type'] = 'list_of_ids' elif isinstance(model_field, BooleanField): field_info['type'] = 'boolean' return field_info def get_serializer_info(self, serializer, method=None): filterer = getattr(serializer, 'filter_field_metadata', lambda fields, method: fields) return filterer( super(Metadata, self).get_serializer_info(serializer), method ) def determine_actions(self, request, view): # Add field information for GET requests (so field names/labels are # available even when we can't POST/PUT). actions = {} for method in {'GET', 'PUT', 'POST'} & set(view.allowed_methods): view.request = clone_request(request, method) obj = None try: # Test global permissions if hasattr(view, 'check_permissions'): view.check_permissions(view.request) # Test object permissions if method == 'PUT' and hasattr(view, 'get_object'): obj = view.get_object() except (exceptions.APIException, PermissionDenied, Http404): continue else: # If user has appropriate permissions for the view, include # appropriate metadata about the fields that should be supplied. serializer = view.get_serializer(instance=obj) actions[method] = self.get_serializer_info(serializer, method=method) finally: view.request = request for field, meta in list(actions[method].items()): if not isinstance(meta, dict): continue if field == "pod_spec_override": meta['default'] = PodManager().pod_definition # Add type choices if available from the serializer. if field == 'type' and hasattr(serializer, 'get_type_choices'): meta['choices'] = serializer.get_type_choices() # For GET method, remove meta attributes that aren't relevant # when reading a field and remove write-only fields. if method == 'GET': attrs_to_remove = ('required', 'read_only', 'default', 'min_length', 'max_length', 'placeholder') for attr in attrs_to_remove: meta.pop(attr, None) meta.get('child', {}).pop(attr, None) if meta.pop('write_only', False): actions['GET'].pop(field) # For PUT/POST methods, remove read-only fields. if method in ('PUT', 'POST'): # This value should always be False for PUT/POST, so don't # show it (file-based read-only settings can't be updated) meta.pop('defined_in_file', False) if meta.pop('read_only', False): if field == 'id' and hasattr(view, 'attach'): continue actions[method].pop(field) return actions def determine_metadata(self, request, view): # store request on self so we can use it to generate field defaults # (such as TOWER_URL_BASE) self.request = request try: setattr(view, '_request', request) metadata = super(Metadata, self).determine_metadata(request, view) finally: delattr(view, '_request') # Add type(s) handled by this view/serializer. if hasattr(view, 'get_serializer'): serializer = view.get_serializer() if hasattr(serializer, 'get_types'): metadata['types'] = serializer.get_types() # Add search fields if available from the view. if getattr(view, 'search_fields', None): metadata['search_fields'] = view.search_fields # Add related search fields if available from the view. if getattr(view, 'related_search_fields', None): metadata['related_search_fields'] = view.related_search_fields # include role names in metadata roles = [] model = getattr(view, 'model', None) if model: for field in model._meta.get_fields(): if type(field) is ImplicitRoleField: roles.append(field.name) if len(roles) > 0: metadata['object_roles'] = roles from rest_framework import generics if isinstance(view, generics.ListAPIView) and hasattr(view, 'paginator'): metadata['max_page_size'] = view.paginator.max_page_size return metadata class RoleMetadata(Metadata): def determine_metadata(self, request, view): metadata = super(RoleMetadata, self).determine_metadata(request, view) if 'actions' in metadata: metadata['actions'].pop('POST') metadata['actions']['POST'] = { "id": {"type": "integer", "label": "ID", "help_text": "Database ID for this role."}, "disassociate": {"type": "integer", "label": "Disassociate", "help_text": "Provide to remove this role."}, } return metadata class SublistAttachDetatchMetadata(Metadata): def determine_actions(self, request, view): actions = super(SublistAttachDetatchMetadata, self).determine_actions(request, view) method = 'POST' if method in actions: for field in list(actions[method].keys()): if field == 'id': continue actions[method].pop(field) return actions
43.630719
131
0.601004
12,416
0.929968
0
0
0
0
0
0
3,486
0.261104
b9d7834f2dd39b0c5b6da30b8ebfe19e7026adeb
1,985
py
Python
plugins/python/tasks.py
BBVA/deeptracy
40f4b6bba2bdd345e95e42d474c05fa90f15c3e9
[ "Apache-1.1" ]
85
2017-09-22T10:48:51.000Z
2021-06-11T18:33:28.000Z
plugins/python/tasks.py
BBVA/deeptracy
40f4b6bba2bdd345e95e42d474c05fa90f15c3e9
[ "Apache-1.1" ]
51
2017-10-17T10:16:16.000Z
2020-08-29T23:10:21.000Z
plugins/python/tasks.py
BBVA/deeptracy
40f4b6bba2bdd345e95e42d474c05fa90f15c3e9
[ "Apache-1.1" ]
14
2017-11-20T10:20:16.000Z
2021-02-02T21:35:07.000Z
import json from washer.worker.actions import AppendStdout, AppendStderr from washer.worker.actions import CreateNamedLog, AppendToLog from washer.worker.actions import SetProperty from washer.worker.commands import washertask def pipenv_graph2deps(rawgraph): graph = json.loads(rawgraph) def build_entry(data): if 'required_version' in data: spec = data['key'] + data['required_version'] else: spec = data['key'] return {'installer': 'pipenv', 'spec': spec, 'source': 'pypi', 'name': data['package_name'], 'version': data['installed_version']} def extract_dependencies(entries): for entry in entries: if 'package' in entry: package = entry['package'] dependencies = entry.get('dependencies', []) yield build_entry(package) yield from extract_dependencies(dependencies) else: yield build_entry(entry) yield from extract_dependencies(graph) @washertask def pip_install(repopath, path=".", **kwargs): import invoke c = invoke.Context() with c.cd(repopath): with c.cd(path): res = c.run("pipenv install .") deps = c.run("pipenv graph --json") yield AppendStdout(res.stdout) yield AppendStderr(res.stderr) yield SetProperty("dependencies", list(pipenv_graph2deps(deps.stdout))) return True @washertask def requirement_file(repopath, requirement="requirements.txt", path=".", **kwargs): import invoke c = invoke.Context() with c.cd(repopath): with c.cd(path): res = c.run("pipenv install -r %s" % requirement) deps = c.run("pipenv graph --json") yield AppendStdout(res.stdout) yield AppendStderr(res.stderr) yield SetProperty("dependencies", list(pipenv_graph2deps(deps.stdout))) return True
28.357143
75
0.614106
0
0
1,724
0.868514
892
0.44937
0
0
299
0.15063
b9d84b2b4c7d4cbbbf84bcb2ee37459c480a1a5e
715
py
Python
senity/utils/getSiteProfile.py
pkokkinos/senity
c6e41678620bef558cc3600929a8320ff2a285cf
[ "MIT" ]
1
2017-10-26T12:30:04.000Z
2017-10-26T12:30:04.000Z
senity/utils/getSiteProfile.py
pkokkinos/senity
c6e41678620bef558cc3600929a8320ff2a285cf
[ "MIT" ]
null
null
null
senity/utils/getSiteProfile.py
pkokkinos/senity
c6e41678620bef558cc3600929a8320ff2a285cf
[ "MIT" ]
null
null
null
import json import os # get site profile def getSiteProfile(site_file): with open(site_file) as json_file: json_data = json.load(json_file) return json_data # get all site profile def getAllSiteProfiles(site_folder): allSiteProfiles = {} allSiteFiles = os.listdir(site_folder) for sf in allSiteFiles: sp = getSiteProfile(site_folder + "/" + sf) allSiteProfiles[sp["siteName"]] = [] for device in sp["devicesAvailable"]: for i in range(device["deviceCounter"]): allSiteProfiles[sp["siteName"]].append(device["deviceName"]) return allSiteProfiles #sites_folder = "sites" #print getAllSiteProfiles(sites_folder)
23.833333
77
0.664336
0
0
0
0
0
0
0
0
170
0.237762
b9d87f8b647f237794f75914da625ea130e200c3
5,959
py
Python
ppo_new/baseline.py
QingXinHu123/Lane_change_RL
06c70e6f58d3478669b56800028e320ca03f5222
[ "MIT" ]
1
2022-03-17T03:40:57.000Z
2022-03-17T03:40:57.000Z
ppo_new/baseline.py
QingXinHu123/Lane_change_RL
06c70e6f58d3478669b56800028e320ca03f5222
[ "MIT" ]
null
null
null
ppo_new/baseline.py
QingXinHu123/Lane_change_RL
06c70e6f58d3478669b56800028e320ca03f5222
[ "MIT" ]
null
null
null
import os, sys from env.LaneChangeEnv import LaneChangeEnv import random import numpy as np if 'SUMO_HOME' in os.environ: tools = os.path.join(os.environ['SUMO_HOME'], 'tools') sys.path.append(tools) print('success') else: sys.exit("please declare environment variable 'SUMO_HOME'") import traci def episode_generator(pi, env, is_gui, ttc, gap, sumoseed, randomseed): egoid = 'lane1.' + str(random.randint(1, 6)) ob = env.reset(egoid=egoid, tlane=0, tfc=2, is_gui=is_gui, sumoseed=sumoseed, randomseed=randomseed) traci.vehicle.setColor(egoid, (255, 69, 0)) cur_ep_ret = 0 # return in current episode cur_ep_ret_detail = 0 cur_ep_len = 0 # len of current episode cur_ep_obs = [] cur_ep_acs = [] while True: ac = pi(ob=ob, env=env, ttc=ttc, gap=gap) ob, rew, new, info = env.step(ac) cur_ep_ret += rew cur_ep_ret_detail += np.array(list(info['reward_dict'].values())) cur_ep_len += 1 cur_ep_obs.append(ob) cur_ep_acs.append(ac) if new: return {"ep_obs": cur_ep_obs, "ep_acs": cur_ep_acs, "ep_ret": cur_ep_ret, 'ep_rets_detail': cur_ep_ret_detail, "ep_len": cur_ep_len, 'ep_num_danger': info['num_danger'], 'ep_is_success': info['is_success'], 'ep_num_crash': info['num_crash'], 'ep_is_collision': info["is_collision"]} def pi_baseline(ob, env, ttc, gap): # safety gap set to seconds to collision if env.ego.trgt_leader: leader_speed = env.ego.trgt_leader.speed else: leader_speed = env.ego.speed if env.ego.trgt_follower: follower_speed = env.ego.trgt_follower.speed else: follower_speed = env.ego.speed leader_dis = abs(ob[3 * 4 + 0 + 1])*239.8 follower_dis = abs(ob[4 * 4 + 0 + 1])*239.8 TTC = (leader_dis - 5) / max(env.ego.speed, 0.001) TTC2 = (follower_dis - 5) / max(follower_speed, 0.001) # print(TTC, TTC) if TTC > ttc and TTC2 > ttc and leader_dis > gap and follower_dis > gap: ac_lat = 1 # change lane else: ac_lat = 0 # abort ac = ac_lat * 3 + 1 return ac def evaluate_baseline(num_eps, ttc, gap, is_gui): sumoseed = 0 randomseed = 0 pi = pi_baseline env = LaneChangeEnv(is_train=False) ret_eval = 0 ret_det_eval = 0 # not a integer, will be broadcasted danger_num = 0 crash_num = 0 level_1_danger = [] level_2_danger = [] collision_num = 0 ep_len_list = [] success_num = 0 for i in range(num_eps): ep_eval = episode_generator(pi, env, is_gui=is_gui, ttc=ttc, gap=gap, sumoseed=sumoseed, randomseed=randomseed) ret_eval += ep_eval['ep_ret'] ret_det_eval += ep_eval['ep_rets_detail'] danger_num += ep_eval['ep_num_danger'] crash_num += ep_eval['ep_num_crash'] level_1_danger.append(1 if ep_eval['ep_num_danger'] > 0 else 0) level_2_danger.append((1 if ep_eval['ep_num_crash'] > 0 else 0)) collision_num += ep_eval['ep_is_collision'] success_num += int(ep_eval['ep_is_success']) if ep_eval['ep_is_success']: ep_len_list.append(ep_eval['ep_len']) sumoseed += 1 randomseed += 1 ret_eval /= float(num_eps) ret_det_eval /= float(num_eps) danger_rate = danger_num / num_eps crash_rate = crash_num / num_eps level_1_danger_rate = np.mean(level_1_danger) level_2_danger_rate = np.mean(level_2_danger) coll_rate = collision_num / num_eps success_rate = success_num / float(num_eps) success_len = np.mean(ep_len_list) print('reward_detail: ', ret_det_eval) print('reward: ', ret_eval, '\ndanger_rate: ', danger_rate, '\ncrash_rate: ', crash_rate, '\nlevel-1-danger_rate: ', level_1_danger_rate, '\nlevel-2-danger_rate: ', level_2_danger_rate, '\ncollision_rate: ', coll_rate, '\nsuccess_rate: ', success_rate, '\nsucess_len: ', success_len) env.close() return ret_eval, danger_rate, crash_rate, level_1_danger_rate, level_2_danger_rate, coll_rate, success_rate, success_len NUM_EPS = 100 IS_GUI = False # f = open('../data/baseline_evaluation/testseed2.csv', 'w+') # safety_gap = 2 constraints_list = [3.0] # [1.0, 2.0, 3.0, 4.0, 5.0, 10.0, 20.0] ttcs = [0.1, 0.3, 0.5, 1, 2, 3] # ttcs = [2] gap = 0 reward_list = [] danger_rate_list = [] crash_rate_list = [] level_1_danger_list = [] level_2_danger_list = [] coll_rate_list = [] succ_rate_list = [] succ_len_list = [] for ttc in ttcs: ret_eval, danger_rate, crash_rate, level_1_danger_rate, level_2_danger_rate, coll_rate, success_rate, success_len = evaluate_baseline(NUM_EPS, ttc, gap, IS_GUI) reward_list.append(ret_eval) danger_rate_list.append(danger_rate) crash_rate_list.append(crash_rate) level_1_danger_list.append(level_1_danger_rate) level_2_danger_list.append(level_2_danger_rate) coll_rate_list.append(coll_rate) succ_rate_list.append(success_rate) succ_len_list.append(success_len) print('reward: ', reward_list) print('danger rate: ', danger_rate_list) print('crash rate: ', crash_rate_list) print('level-1-danger_rate: ', level_1_danger_list) print('level-2-danger_rate: ', level_2_danger_list) print('collison rate: ', coll_rate_list) print('success rate: ', succ_rate_list) print('sucess len: ', succ_len_list) # reward: [-89.12552753359037, -69.84537459892903, -73.81562785829651, -148.23580687485645, -227.71842861064192, -229.9101089174337] # danger rate: [2.13, 0.88, 0.77, 1.88, 3.82, 3.82] # crash rate: [0.58, 0.33, 0.5, 1.24, 2.09, 2.09] # level-1-danger_rate: [0.23, 0.09, 0.05, 0.14, 0.25, 0.25] # level-2-danger_rate: [0.05, 0.03, 0.05, 0.12, 0.2, 0.2] # collison rate: [0.0, 0.0, 0.02, 0.09, 0.14, 0.14] # success rate: [0.99, 0.99, 0.9, 0.6, 0.08, 0.05] # sucess len: [55.656565656565654, 62.43434343434343, 67.5, 90.1, 66.625, 73.4]
36.558282
164
0.659171
0
0
0
0
0
0
0
0
1,527
0.256251
b9d8a3bc2867b57ba7db6ffd06a68bdf7372909c
1,261
py
Python
clean_data.py
toogy/pendigits-hmm
03382e1457941714439d40b67e53eaf117fe4d08
[ "MIT" ]
null
null
null
clean_data.py
toogy/pendigits-hmm
03382e1457941714439d40b67e53eaf117fe4d08
[ "MIT" ]
null
null
null
clean_data.py
toogy/pendigits-hmm
03382e1457941714439d40b67e53eaf117fe4d08
[ "MIT" ]
null
null
null
import numpy as np import pickle from collections import defaultdict from parsing import parser from analysis import training def main(): parse = parser.Parser(); train_digits = parse.parse_file('data/pendigits-train'); test_digits = parse.parse_file('data/pendigits-test') centroids = training.get_digit_kmeans_centroids( train_digits, 256 - 3) training.set_digit_observations( train_digits, centroids, 256) training.set_digit_observations( test_digits, centroids, 256) train_sequences = defaultdict(list) test_sequences = [] n_test_sequences = len(test_digits) test_expected_labels = np.ndarray(shape=(n_test_sequences,)) for digit in train_digits: train_sequences[digit.label].append(digit.np_array_observations) for i, digit in enumerate(test_digits): test_sequences.append(digit.np_array_observations) test_expected_labels[i] = digit.label with open('train_sequences', 'wb') as f: pickle.dump(train_sequences, f) with open('test_sequences', 'wb') as f: pickle.dump(test_sequences, f) with open('test_expected_labels', 'wb') as f: pickle.dump(test_expected_labels, f) if __name__ == '__main__': main()
24.25
72
0.704996
0
0
0
0
0
0
0
0
120
0.095163
b9d992fc9c803eca7ba614c187b28cbfcef4b1f8
5,988
py
Python
scripts/commit_validation/commit_validation/commit_validation.py
cypherdotXd/o3de
bb90c4ddfe2d495e9c00ebf1e2650c6d603a5676
[ "Apache-2.0", "MIT" ]
8
2021-08-31T02:14:19.000Z
2021-12-28T19:20:59.000Z
scripts/commit_validation/commit_validation/commit_validation.py
cypherdotXd/o3de
bb90c4ddfe2d495e9c00ebf1e2650c6d603a5676
[ "Apache-2.0", "MIT" ]
8
2021-07-12T13:55:00.000Z
2021-10-04T14:53:21.000Z
scripts/commit_validation/commit_validation/commit_validation.py
cypherdotXd/o3de
bb90c4ddfe2d495e9c00ebf1e2650c6d603a5676
[ "Apache-2.0", "MIT" ]
1
2021-09-16T05:06:18.000Z
2021-09-16T05:06:18.000Z
# # Copyright (c) Contributors to the Open 3D Engine Project. # For complete copyright and license terms please see the LICENSE at the root of this distribution. # # SPDX-License-Identifier: Apache-2.0 OR MIT # # import abc import importlib import os import pkgutil import re import time from typing import Dict, List, Tuple VERBOSE = False class Commit(abc.ABC): """An interface for accessing details about a commit""" @abc.abstractmethod def get_files(self) -> List[str]: """Returns a list of local files added/modified by the commit""" pass @abc.abstractmethod def get_removed_files(self) -> List[str]: """Returns a list of local files removed by the commit""" pass @abc.abstractmethod def get_file_diff(self, str) -> str: """ Given a file name, returns a string in unified diff format that represents the changes made to that file for this commit. Most validators will only pay attention to added lines (with + in front) """ pass @abc.abstractmethod def get_description(self) -> str: """Returns the description of the commit""" pass @abc.abstractmethod def get_author(self) -> str: """Returns the author of the commit""" pass def validate_commit(commit: Commit, out_errors: List[str] = None, ignore_validators: List[str] = None) -> bool: """Validates a commit against all validators :param commit: The commit to validate :param out_errors: if not None, will populate with the list of errors given by the validators :param ignore_validators: Optional list of CommitValidator classes to ignore, by class name :return: True if there are no validation errors, and False otherwise """ failed_count = 0 passed_count = 0 start_time = time.time() # Find all the validators in the validators package (recursively) validator_classes = [] validators_dir = os.path.join(os.path.dirname(__file__), 'validators') for _, module_name, is_package in pkgutil.iter_modules([validators_dir]): if not is_package: module = importlib.import_module('commit_validation.validators.' + module_name) validator = module.get_validator() if ignore_validators and validator.__name__ in ignore_validators: print(f"Disabled validation for '{validator.__name__}'") else: validator_classes.append(validator) error_summary = {} # Process validators for validator_class in validator_classes: validator = validator_class() validator_name = validator.__class__.__name__ error_list = [] passed = validator.run(commit, errors = error_list) if passed: passed_count += 1 print(f'{validator.__class__.__name__} PASSED') else: failed_count += 1 print(f'{validator.__class__.__name__} FAILED') error_summary[validator_name] = error_list end_time = time.time() if failed_count: print("VALIDATION FAILURE SUMMARY") for val_name in error_summary.keys(): errors = error_summary[val_name] if errors: for error_message in errors: first_line = True for line in error_message.splitlines(): if first_line: first_line = False print(f'VALIDATOR_FAILED: {val_name} {line}') else: print(f' {line}') # extra detail lines do not need machine parsing stats_strs = [] if failed_count > 0: stats_strs.append(f'{failed_count} failed') if passed_count > 0: stats_strs.append(f'{passed_count} passed') stats_str = ', '.join(stats_strs) + f' in {end_time - start_time:.2f}s' print() print(stats_str) return failed_count == 0 def IsFileSkipped(file_name) -> bool: if os.path.splitext(file_name)[1].lower() not in SOURCE_AND_SCRIPT_FILE_EXTENSIONS: skipped = True for pattern in SOURCE_AND_SCRIPT_FILE_PATTERNS: if pattern.match(file_name): skipped = False break return skipped return False class CommitValidator(abc.ABC): """A commit validator""" @abc.abstractmethod def run(self, commit: Commit, errors: List[str]) -> bool: """Validates a commit :param commit: The commit to validate :param errors: List of errors generated, append them to this list :return: True if the commit is valid, and False otherwise """ pass SOURCE_FILE_EXTENSIONS: Tuple[str, ...] = ( '.c', '.cc', '.cpp', '.cxx', '.h', '.hpp', '.hxx', '.inl', '.m', '.mm', '.cs', '.java' ) """File extensions for compiled source code""" SCRIPT_FILE_EXTENSIONS: Tuple[str, ...] = ( '.py', '.lua', '.bat', '.cmd', '.sh', '.js' ) """File extensions for interpreted code""" BUILD_FILE_EXTENSIONS: Tuple[str, ...] = ( '.cmake', ) """File extensions for build files""" SOURCE_AND_SCRIPT_FILE_EXTENSIONS: Tuple[str, ...] = SOURCE_FILE_EXTENSIONS + SCRIPT_FILE_EXTENSIONS + BUILD_FILE_EXTENSIONS """File extensions for both compiled and interpreted code""" BUILD_FILE_PATTERNS: Tuple[re.Pattern, ...] = ( re.compile(r'.*CMakeLists\.txt'), re.compile(r'.*Jenkinsfile') ) """File patterns for build files""" SOURCE_AND_SCRIPT_FILE_PATTERNS: Tuple[re.Pattern, ...] = BUILD_FILE_PATTERNS EXCLUDED_VALIDATION_PATTERNS = [ '*/.git/*', '*/3rdParty/*', '*/__pycache__/*', '*/External/*', 'build', 'Cache', '*/Code/Framework/AzCore/azgnmx/azgnmx/*', 'Code/Tools/CryFXC', 'Code/Tools/HLSLCrossCompiler', 'Code/Tools/HLSLCrossCompilerMETAL', 'Docs', 'python/runtime', 'restricted/*/Tools/*RemoteControl', 'Tools/3dsmax', '*/user/Cache/*', '*/user/log/*', ]
31.68254
124
0.631096
1,338
0.223447
0
0
1,160
0.193721
0
0
2,430
0.405812
b9db09c1d1c26d802117168878ef76954cf77560
3,360
py
Python
matrixprofile/algorithms/snippets.py
KSaiRahul21/matrixprofile
d8250e30d90ed0453bb7c35bb34ab0c04ae7b334
[ "Apache-2.0" ]
null
null
null
matrixprofile/algorithms/snippets.py
KSaiRahul21/matrixprofile
d8250e30d90ed0453bb7c35bb34ab0c04ae7b334
[ "Apache-2.0" ]
null
null
null
matrixprofile/algorithms/snippets.py
KSaiRahul21/matrixprofile
d8250e30d90ed0453bb7c35bb34ab0c04ae7b334
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals range = getattr(__builtins__, 'xrange', range) # end of py2 compatability boilerplate import numpy as np from matrixprofile import core from matrixprofile.algorithms.mpdist import mpdist_vector def snippets(ts, snippet_size, num_snippets=2, window_size=None): """ The snippets algorithm is used to summarize your time series by identifying N number of representative subsequences. If you want to identify typical patterns in your time series, then this is the algorithm to use. Parameters ---------- ts : array_like The time series. snippet_size : int The size of snippet desired. num_snippets : int, Default 2 The number of snippets you would like to find. window_size : int, Default (snippet_size / 2) The window size. Returns ------- list : snippets A list of snippets as dictionary objects with the following structure. >>> { >>> fraction: fraction of the snippet, >>> index: the index of the snippet, >>> snippet: the snippet values >>> } """ ts = core.to_np_array(ts).astype('d') n = len(ts) if not isinstance(snippet_size, int) or snippet_size < 4: raise ValueError('snippet_size must be an integer >= 4') if n < (2 * snippet_size): raise ValueError('Time series is too short relative to snippet length') if not window_size: window_size = int(np.floor(snippet_size / 2)) if window_size >= snippet_size: raise ValueError('window_size must be smaller than snippet_size') # pad end of time series with zeros num_zeros = int(snippet_size * np.ceil(n / snippet_size) - n) ts = np.append(ts, np.zeros(num_zeros)) # compute all profiles indices = np.arange(0, len(ts) - snippet_size, snippet_size) distances = [] for j, i in enumerate(indices): distance = mpdist_vector(ts, ts[i:(i + snippet_size - 1)], int(window_size)) distances.append(distance) distances = np.array(distances) # find N snippets snippets = [] minis = np.inf total_min = None for n in range(num_snippets): minims = np.inf for i in range(len(indices)): s = np.sum(np.minimum(distances[i, :], minis)) if minims > s: minims = s index = i minis = np.minimum(distances[index, :], minis) actual_index = indices[index] snippet = ts[actual_index:actual_index + snippet_size] snippet_distance = distances[index] snippets.append({ 'index': actual_index, 'snippet': snippet, 'distance': snippet_distance }) if isinstance(total_min, type(None)): total_min = snippet_distance else: total_min = np.minimum(total_min, snippet_distance) # compute the fraction of each snippet for snippet in snippets: mask = (snippet['distance'] <= total_min) snippet['fraction'] = mask.sum() / (len(ts) - snippet_size) total_min = total_min - mask del snippet['distance'] return snippets
29.734513
84
0.633036
0
0
0
0
0
0
0
0
1,237
0.368155
b9db24edad8766b6e734d6a8a9c26aff6bb04235
2,360
py
Python
jina/logging/formatter.py
yk/jina
ab66e233e74b956390f266881ff5dc4e0110d3ff
[ "Apache-2.0" ]
1
2020-12-23T12:34:00.000Z
2020-12-23T12:34:00.000Z
jina/logging/formatter.py
yk/jina
ab66e233e74b956390f266881ff5dc4e0110d3ff
[ "Apache-2.0" ]
null
null
null
jina/logging/formatter.py
yk/jina
ab66e233e74b956390f266881ff5dc4e0110d3ff
[ "Apache-2.0" ]
null
null
null
import json import re from copy import copy from logging import Formatter from .profile import used_memory from ..helper import colored class ColorFormatter(Formatter): """Format the log into colored logs based on the log-level. """ MAPPING = { 'DEBUG': dict(color='white', on_color=None), # white 'INFO': dict(color='white', on_color=None), # cyan 'WARNING': dict(color='yellow', on_color='on_grey'), # yellow 'ERROR': dict(color='red', on_color=None), # 31 for red 'CRITICAL': dict(color='white', on_color='on_red'), # white on red bg 'SUCCESS': dict(color='green', on_color=None), # white on red bg } #: log-level to color mapping def format(self, record): cr = copy(record) seq = self.MAPPING.get(cr.levelname, self.MAPPING['INFO']) # default white cr.msg = colored(cr.msg, **seq) return super().format(cr) class PlainFormatter(Formatter): """Remove all control chars from the log and format it as plain text Also restrict the max-length of msg to 512 """ def format(self, record): cr = copy(record) if isinstance(cr.msg, str): cr.msg = re.sub(r'\u001b\[.*?[@-~]', '', str(cr.msg))[:512] return super().format(cr) class JsonFormatter(Formatter): """Format the log message as a JSON object so that it can be later used/parsed in browser with javascript. """ KEYS = {'created', 'filename', 'funcName', 'levelname', 'lineno', 'msg', 'module', 'name', 'pathname', 'process', 'thread', 'processName', 'threadName', 'log_id'} #: keys to extract from the log def format(self, record): cr = copy(record) cr.msg = re.sub(r'\u001b\[.*?[@-~]', '', str(cr.msg)) return json.dumps( {k: getattr(cr, k) for k in self.KEYS if hasattr(cr, k)}, sort_keys=True) class ProfileFormatter(Formatter): """Format the log message as JSON object and add the current used memory into it""" def format(self, record): cr = copy(record) if isinstance(cr.msg, dict): cr.msg.update({k: getattr(cr, k) for k in ['created', 'module', 'process', 'thread']}) cr.msg['memory'] = used_memory(unit=1) return json.dumps(cr.msg, sort_keys=True) else: return ''
34.705882
114
0.601695
2,211
0.936864
0
0
0
0
0
0
847
0.358898
b9db51239c1e9a509c29f6e80aebfb0363b62210
194
py
Python
atcoder/abc191/b.py
sugitanishi/competitive-programming
51af65fdce514ece12f8afbf142b809d63eefb5d
[ "MIT" ]
null
null
null
atcoder/abc191/b.py
sugitanishi/competitive-programming
51af65fdce514ece12f8afbf142b809d63eefb5d
[ "MIT" ]
null
null
null
atcoder/abc191/b.py
sugitanishi/competitive-programming
51af65fdce514ece12f8afbf142b809d63eefb5d
[ "MIT" ]
null
null
null
import sys sys.setrecursionlimit(10000000) input=lambda : sys.stdin.readline().rstrip() n,x=map(int,input().split()) a=list(map(int,input().split())) aa=list(filter(lambda b:b!=x,a)) print(*aa)
24.25
44
0.71134
0
0
0
0
0
0
0
0
0
0
b9dc15c3ca6876833207138ba4d65fbd0be25acd
61,341
py
Python
tests/integration/test_streaming_e2e.py
cfogg/python-client
40e6891c8240e6b2acd5df538e622e9f15de43d6
[ "Apache-2.0" ]
null
null
null
tests/integration/test_streaming_e2e.py
cfogg/python-client
40e6891c8240e6b2acd5df538e622e9f15de43d6
[ "Apache-2.0" ]
null
null
null
tests/integration/test_streaming_e2e.py
cfogg/python-client
40e6891c8240e6b2acd5df538e622e9f15de43d6
[ "Apache-2.0" ]
null
null
null
"""Streaming integration tests.""" # pylint:disable=no-self-use,invalid-name,too-many-arguments,too-few-public-methods,line-too-long # pylint:disable=too-many-statements,too-many-locals,too-many-lines import threading import time import json from queue import Queue from splitio.client.factory import get_factory from tests.helpers.mockserver import SSEMockServer, SplitMockServer try: # try to import python3 names. fallback to python2 from urllib.parse import parse_qs except ImportError: from urlparse import parse_qs class StreamingIntegrationTests(object): """Test streaming operation and failover.""" def test_happiness(self): """Test initialization & splits/segment updates.""" auth_server_response = { 'pushEnabled': True, 'token': ('eyJhbGciOiJIUzI1NiIsImtpZCI6IjVZOU05US45QnJtR0EiLCJ0eXAiOiJKV1QifQ.' 'eyJ4LWFibHktY2FwYWJpbGl0eSI6IntcIk1UWXlNVGN4T1RRNE13PT1fTWpBNE16Y3pO' 'RFUxTWc9PV9zZWdtZW50c1wiOltcInN1YnNjcmliZVwiXSxcIk1UWXlNVGN4T1RRNE13P' 'T1fTWpBNE16Y3pORFUxTWc9PV9zcGxpdHNcIjpbXCJzdWJzY3JpYmVcIl0sXCJjb250cm' '9sX3ByaVwiOltcInN1YnNjcmliZVwiLFwiY2hhbm5lbC1tZXRhZGF0YTpwdWJsaXNoZXJ' 'zXCJdLFwiY29udHJvbF9zZWNcIjpbXCJzdWJzY3JpYmVcIixcImNoYW5uZWwtbWV0YWRh' 'dGE6cHVibGlzaGVyc1wiXX0iLCJ4LWFibHktY2xpZW50SWQiOiJjbGllbnRJZCIsImV4c' 'CI6MTYwNDEwMDU5MSwiaWF0IjoxNjA0MDk2OTkxfQ.aP9BfR534K6J9h8gfDWg_CQgpz5E' 'vJh17WlOlAKhcD0') } split_changes = { -1: { 'since': -1, 'till': 1, 'splits': [make_simple_split('split1', 1, True, False, 'on', 'user', True)] }, 1: { 'since': 1, 'till': 1, 'splits': [] } } segment_changes = {} split_backend_requests = Queue() split_backend = SplitMockServer(split_changes, segment_changes, split_backend_requests, auth_server_response) sse_requests = Queue() sse_server = SSEMockServer(sse_requests) split_backend.start() sse_server.start() sse_server.publish(make_initial_event()) sse_server.publish(make_occupancy('control_pri', 2)) sse_server.publish(make_occupancy('control_sec', 2)) kwargs = { 'sdk_api_base_url': 'http://localhost:%d/api' % split_backend.port(), 'events_api_base_url': 'http://localhost:%d/api' % split_backend.port(), 'auth_api_base_url': 'http://localhost:%d/api' % split_backend.port(), 'streaming_api_base_url': 'http://localhost:%d' % sse_server.port(), 'config': {'connectTimeout': 10000} } factory = get_factory('some_apikey', **kwargs) factory.block_until_ready(1) assert factory.ready assert factory.client().get_treatment('maldo', 'split1') == 'on' time.sleep(1) split_changes[1] = { 'since': 1, 'till': 2, 'splits': [make_simple_split('split1', 2, True, False, 'off', 'user', False)] } split_changes[2] = {'since': 2, 'till': 2, 'splits': []} sse_server.publish(make_split_change_event(2)) time.sleep(1) assert factory.client().get_treatment('maldo', 'split1') == 'off' split_changes[2] = { 'since': 2, 'till': 3, 'splits': [make_split_with_segment('split2', 2, True, False, 'off', 'user', 'off', 'segment1')] } split_changes[3] = {'since': 3, 'till': 3, 'splits': []} segment_changes[('segment1', -1)] = { 'name': 'segment1', 'added': ['maldo'], 'removed': [], 'since': -1, 'till': 1 } segment_changes[('segment1', 1)] = {'name': 'segment1', 'added': [], 'removed': [], 'since': 1, 'till': 1} sse_server.publish(make_split_change_event(3)) time.sleep(1) sse_server.publish(make_segment_change_event('segment1', 1)) time.sleep(1) assert factory.client().get_treatment('pindon', 'split2') == 'off' assert factory.client().get_treatment('maldo', 'split2') == 'on' # Validate the SSE request sse_request = sse_requests.get() assert sse_request.method == 'GET' path, qs = sse_request.path.split('?', 1) assert path == '/event-stream' qs = parse_qs(qs) assert qs['accessToken'][0] == ( 'eyJhbGciOiJIUzI1NiIsImtpZCI6IjVZOU05' 'US45QnJtR0EiLCJ0eXAiOiJKV1QifQ.eyJ4LWFibHktY2FwYWJpbGl0eSI6IntcIk1UW' 'XlNVGN4T1RRNE13PT1fTWpBNE16Y3pORFUxTWc9PV9zZWdtZW50c1wiOltcInN1YnNjc' 'mliZVwiXSxcIk1UWXlNVGN4T1RRNE13PT1fTWpBNE16Y3pORFUxTWc9PV9zcGxpdHNcI' 'jpbXCJzdWJzY3JpYmVcIl0sXCJjb250cm9sX3ByaVwiOltcInN1YnNjcmliZVwiLFwiY' '2hhbm5lbC1tZXRhZGF0YTpwdWJsaXNoZXJzXCJdLFwiY29udHJvbF9zZWNcIjpbXCJzd' 'WJzY3JpYmVcIixcImNoYW5uZWwtbWV0YWRhdGE6cHVibGlzaGVyc1wiXX0iLCJ4LWFib' 'HktY2xpZW50SWQiOiJjbGllbnRJZCIsImV4cCI6MTYwNDEwMDU5MSwiaWF0IjoxNjA0M' 'Dk2OTkxfQ.aP9BfR534K6J9h8gfDWg_CQgpz5EvJh17WlOlAKhcD0' ) assert set(qs['channels'][0].split(',')) == set(['MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_splits', 'MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_segments', '[?occupancy=metrics.publishers]control_pri', '[?occupancy=metrics.publishers]control_sec']) assert qs['v'][0] == '1.1' # Initial apikey validation req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/segmentChanges/__SOME_INVALID_SEGMENT__?since=-1' assert req.headers['authorization'] == 'Bearer some_apikey' # Initial splits fetch req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/splitChanges?since=-1' assert req.headers['authorization'] == 'Bearer some_apikey' # Iteration until since == till req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/splitChanges?since=1' assert req.headers['authorization'] == 'Bearer some_apikey' # Auth req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/auth' assert req.headers['authorization'] == 'Bearer some_apikey' # SyncAll after streaming connected req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/splitChanges?since=1' assert req.headers['authorization'] == 'Bearer some_apikey' # Fetch after first notification req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/splitChanges?since=1' assert req.headers['authorization'] == 'Bearer some_apikey' # Iteration until since == till req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/splitChanges?since=2' assert req.headers['authorization'] == 'Bearer some_apikey' # Fetch after second notification req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/splitChanges?since=2' assert req.headers['authorization'] == 'Bearer some_apikey' # Iteration until since == till req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/splitChanges?since=3' assert req.headers['authorization'] == 'Bearer some_apikey' # Segment change notification req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/segmentChanges/segment1?since=-1' assert req.headers['authorization'] == 'Bearer some_apikey' # Iteration until segment1 since == till req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/segmentChanges/segment1?since=1' assert req.headers['authorization'] == 'Bearer some_apikey' # Cleanup destroy_event = threading.Event() factory.destroy(destroy_event) destroy_event.wait() sse_server.publish(sse_server.GRACEFUL_REQUEST_END) sse_server.stop() split_backend.stop() def test_occupancy_flicker(self): """Test that changes in occupancy switch between polling & streaming properly.""" auth_server_response = { 'pushEnabled': True, 'token': ('eyJhbGciOiJIUzI1NiIsImtpZCI6IjVZOU05US45QnJtR0EiLCJ0eXAiOiJKV1QifQ.' 'eyJ4LWFibHktY2FwYWJpbGl0eSI6IntcIk1UWXlNVGN4T1RRNE13PT1fTWpBNE16Y3pO' 'RFUxTWc9PV9zZWdtZW50c1wiOltcInN1YnNjcmliZVwiXSxcIk1UWXlNVGN4T1RRNE13P' 'T1fTWpBNE16Y3pORFUxTWc9PV9zcGxpdHNcIjpbXCJzdWJzY3JpYmVcIl0sXCJjb250cm' '9sX3ByaVwiOltcInN1YnNjcmliZVwiLFwiY2hhbm5lbC1tZXRhZGF0YTpwdWJsaXNoZXJ' 'zXCJdLFwiY29udHJvbF9zZWNcIjpbXCJzdWJzY3JpYmVcIixcImNoYW5uZWwtbWV0YWRh' 'dGE6cHVibGlzaGVyc1wiXX0iLCJ4LWFibHktY2xpZW50SWQiOiJjbGllbnRJZCIsImV4c' 'CI6MTYwNDEwMDU5MSwiaWF0IjoxNjA0MDk2OTkxfQ.aP9BfR534K6J9h8gfDWg_CQgpz5E' 'vJh17WlOlAKhcD0') } split_changes = { -1: { 'since': -1, 'till': 1, 'splits': [make_simple_split('split1', 1, True, False, 'off', 'user', True)] }, 1: {'since': 1, 'till': 1, 'splits': []} } segment_changes = {} split_backend_requests = Queue() split_backend = SplitMockServer(split_changes, segment_changes, split_backend_requests, auth_server_response) sse_requests = Queue() sse_server = SSEMockServer(sse_requests) split_backend.start() sse_server.start() sse_server.publish(make_initial_event()) sse_server.publish(make_occupancy('control_pri', 2)) sse_server.publish(make_occupancy('control_sec', 2)) kwargs = { 'sdk_api_base_url': 'http://localhost:%d/api' % split_backend.port(), 'events_api_base_url': 'http://localhost:%d/api' % split_backend.port(), 'auth_api_base_url': 'http://localhost:%d/api' % split_backend.port(), 'streaming_api_base_url': 'http://localhost:%d' % sse_server.port(), 'config': {'connectTimeout': 10000, 'featuresRefreshRate': 10} } factory = get_factory('some_apikey', **kwargs) factory.block_until_ready(1) assert factory.ready time.sleep(2) # Get a hook of the task so we can query its status task = factory._sync_manager._synchronizer._split_tasks.split_task._task # pylint:disable=protected-access assert not task.running() assert factory.client().get_treatment('maldo', 'split1') == 'on' # Make a change in the BE but don't send the event. # After dropping occupancy, the sdk should switch to polling # and perform a syncAll that gets this change split_changes[1] = { 'since': 1, 'till': 2, 'splits': [make_simple_split('split1', 2, True, False, 'off', 'user', False)] } split_changes[2] = {'since': 2, 'till': 2, 'splits': []} sse_server.publish(make_occupancy('control_pri', 0)) sse_server.publish(make_occupancy('control_sec', 0)) time.sleep(2) assert factory.client().get_treatment('maldo', 'split1') == 'off' assert task.running() # We make another chagne in the BE and don't send the event. # We restore occupancy, and it should be fetched by the # sync all after streaming is restored. split_changes[2] = { 'since': 2, 'till': 3, 'splits': [make_simple_split('split1', 3, True, False, 'off', 'user', True)] } split_changes[3] = {'since': 3, 'till': 3, 'splits': []} sse_server.publish(make_occupancy('control_pri', 1)) time.sleep(2) assert factory.client().get_treatment('maldo', 'split1') == 'on' assert not task.running() # Now we make another change and send an event so it's propagated split_changes[3] = { 'since': 3, 'till': 4, 'splits': [make_simple_split('split1', 4, True, False, 'off', 'user', False)] } split_changes[4] = {'since': 4, 'till': 4, 'splits': []} sse_server.publish(make_split_change_event(4)) time.sleep(2) assert factory.client().get_treatment('maldo', 'split1') == 'off' # Kill the split split_changes[4] = { 'since': 4, 'till': 5, 'splits': [make_simple_split('split1', 5, True, True, 'frula', 'user', False)] } split_changes[5] = {'since': 5, 'till': 5, 'splits': []} sse_server.publish(make_split_kill_event('split1', 'frula', 5)) time.sleep(2) assert factory.client().get_treatment('maldo', 'split1') == 'frula' # Validate the SSE request sse_request = sse_requests.get() assert sse_request.method == 'GET' path, qs = sse_request.path.split('?', 1) assert path == '/event-stream' qs = parse_qs(qs) assert qs['accessToken'][0] == ( 'eyJhbGciOiJIUzI1NiIsImtpZCI6IjVZOU05' 'US45QnJtR0EiLCJ0eXAiOiJKV1QifQ.eyJ4LWFibHktY2FwYWJpbGl0eSI6IntcIk1UW' 'XlNVGN4T1RRNE13PT1fTWpBNE16Y3pORFUxTWc9PV9zZWdtZW50c1wiOltcInN1YnNjc' 'mliZVwiXSxcIk1UWXlNVGN4T1RRNE13PT1fTWpBNE16Y3pORFUxTWc9PV9zcGxpdHNcI' 'jpbXCJzdWJzY3JpYmVcIl0sXCJjb250cm9sX3ByaVwiOltcInN1YnNjcmliZVwiLFwiY' '2hhbm5lbC1tZXRhZGF0YTpwdWJsaXNoZXJzXCJdLFwiY29udHJvbF9zZWNcIjpbXCJzd' 'WJzY3JpYmVcIixcImNoYW5uZWwtbWV0YWRhdGE6cHVibGlzaGVyc1wiXX0iLCJ4LWFib' 'HktY2xpZW50SWQiOiJjbGllbnRJZCIsImV4cCI6MTYwNDEwMDU5MSwiaWF0IjoxNjA0M' 'Dk2OTkxfQ.aP9BfR534K6J9h8gfDWg_CQgpz5EvJh17WlOlAKhcD0' ) assert set(qs['channels'][0].split(',')) == set(['MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_splits', 'MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_segments', '[?occupancy=metrics.publishers]control_pri', '[?occupancy=metrics.publishers]control_sec']) assert qs['v'][0] == '1.1' # Initial apikey validation req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/segmentChanges/__SOME_INVALID_SEGMENT__?since=-1' assert req.headers['authorization'] == 'Bearer some_apikey' # Initial splits fetch req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/splitChanges?since=-1' assert req.headers['authorization'] == 'Bearer some_apikey' # Iteration until since == till req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/splitChanges?since=1' assert req.headers['authorization'] == 'Bearer some_apikey' # Auth req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/auth' assert req.headers['authorization'] == 'Bearer some_apikey' # SyncAll after streaming connected req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/splitChanges?since=1' assert req.headers['authorization'] == 'Bearer some_apikey' # Fetch after first notification req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/splitChanges?since=1' assert req.headers['authorization'] == 'Bearer some_apikey' # Iteration until since == till req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/splitChanges?since=2' assert req.headers['authorization'] == 'Bearer some_apikey' # Fetch after second notification req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/splitChanges?since=2' assert req.headers['authorization'] == 'Bearer some_apikey' # Iteration until since == till req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/splitChanges?since=3' assert req.headers['authorization'] == 'Bearer some_apikey' # Iteration until since == till req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/splitChanges?since=3' assert req.headers['authorization'] == 'Bearer some_apikey' # Iteration until since == till req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/splitChanges?since=4' assert req.headers['authorization'] == 'Bearer some_apikey' # Split kill req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/splitChanges?since=4' assert req.headers['authorization'] == 'Bearer some_apikey' # Iteration until since == till req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/splitChanges?since=5' assert req.headers['authorization'] == 'Bearer some_apikey' # Cleanup destroy_event = threading.Event() factory.destroy(destroy_event) destroy_event.wait() sse_server.publish(sse_server.GRACEFUL_REQUEST_END) sse_server.stop() split_backend.stop() def test_start_without_occupancy(self): """Test an SDK starting with occupancy on 0 and switching to streamin afterwards.""" auth_server_response = { 'pushEnabled': True, 'token': ('eyJhbGciOiJIUzI1NiIsImtpZCI6IjVZOU05US45QnJtR0EiLCJ0eXAiOiJKV1QifQ.' 'eyJ4LWFibHktY2FwYWJpbGl0eSI6IntcIk1UWXlNVGN4T1RRNE13PT1fTWpBNE16Y3pO' 'RFUxTWc9PV9zZWdtZW50c1wiOltcInN1YnNjcmliZVwiXSxcIk1UWXlNVGN4T1RRNE13P' 'T1fTWpBNE16Y3pORFUxTWc9PV9zcGxpdHNcIjpbXCJzdWJzY3JpYmVcIl0sXCJjb250cm' '9sX3ByaVwiOltcInN1YnNjcmliZVwiLFwiY2hhbm5lbC1tZXRhZGF0YTpwdWJsaXNoZXJ' 'zXCJdLFwiY29udHJvbF9zZWNcIjpbXCJzdWJzY3JpYmVcIixcImNoYW5uZWwtbWV0YWRh' 'dGE6cHVibGlzaGVyc1wiXX0iLCJ4LWFibHktY2xpZW50SWQiOiJjbGllbnRJZCIsImV4c' 'CI6MTYwNDEwMDU5MSwiaWF0IjoxNjA0MDk2OTkxfQ.aP9BfR534K6J9h8gfDWg_CQgpz5E' 'vJh17WlOlAKhcD0') } split_changes = { -1: { 'since': -1, 'till': 1, 'splits': [make_simple_split('split1', 1, True, False, 'off', 'user', True)] }, 1: {'since': 1, 'till': 1, 'splits': []} } segment_changes = {} split_backend_requests = Queue() split_backend = SplitMockServer(split_changes, segment_changes, split_backend_requests, auth_server_response) sse_requests = Queue() sse_server = SSEMockServer(sse_requests) split_backend.start() sse_server.start() sse_server.publish(make_initial_event()) sse_server.publish(make_occupancy('control_pri', 0)) sse_server.publish(make_occupancy('control_sec', 0)) kwargs = { 'sdk_api_base_url': 'http://localhost:%d/api' % split_backend.port(), 'events_api_base_url': 'http://localhost:%d/api' % split_backend.port(), 'auth_api_base_url': 'http://localhost:%d/api' % split_backend.port(), 'streaming_api_base_url': 'http://localhost:%d' % sse_server.port(), 'config': {'connectTimeout': 10000, 'featuresRefreshRate': 10} } factory = get_factory('some_apikey', **kwargs) factory.block_until_ready(1) assert factory.ready time.sleep(2) # Get a hook of the task so we can query its status task = factory._sync_manager._synchronizer._split_tasks.split_task._task # pylint:disable=protected-access assert task.running() assert factory.client().get_treatment('maldo', 'split1') == 'on' # Make a change in the BE but don't send the event. # After restoring occupancy, the sdk should switch to polling # and perform a syncAll that gets this change split_changes[1] = { 'since': 1, 'till': 2, 'splits': [make_simple_split('split1', 2, True, False, 'off', 'user', False)] } split_changes[2] = {'since': 2, 'till': 2, 'splits': []} sse_server.publish(make_occupancy('control_sec', 1)) time.sleep(2) assert factory.client().get_treatment('maldo', 'split1') == 'off' assert not task.running() # Validate the SSE request sse_request = sse_requests.get() assert sse_request.method == 'GET' path, qs = sse_request.path.split('?', 1) assert path == '/event-stream' qs = parse_qs(qs) assert qs['accessToken'][0] == ( 'eyJhbGciOiJIUzI1NiIsImtpZCI6IjVZOU05' 'US45QnJtR0EiLCJ0eXAiOiJKV1QifQ.eyJ4LWFibHktY2FwYWJpbGl0eSI6IntcIk1UW' 'XlNVGN4T1RRNE13PT1fTWpBNE16Y3pORFUxTWc9PV9zZWdtZW50c1wiOltcInN1YnNjc' 'mliZVwiXSxcIk1UWXlNVGN4T1RRNE13PT1fTWpBNE16Y3pORFUxTWc9PV9zcGxpdHNcI' 'jpbXCJzdWJzY3JpYmVcIl0sXCJjb250cm9sX3ByaVwiOltcInN1YnNjcmliZVwiLFwiY' '2hhbm5lbC1tZXRhZGF0YTpwdWJsaXNoZXJzXCJdLFwiY29udHJvbF9zZWNcIjpbXCJzd' 'WJzY3JpYmVcIixcImNoYW5uZWwtbWV0YWRhdGE6cHVibGlzaGVyc1wiXX0iLCJ4LWFib' 'HktY2xpZW50SWQiOiJjbGllbnRJZCIsImV4cCI6MTYwNDEwMDU5MSwiaWF0IjoxNjA0M' 'Dk2OTkxfQ.aP9BfR534K6J9h8gfDWg_CQgpz5EvJh17WlOlAKhcD0' ) assert set(qs['channels'][0].split(',')) == set(['MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_splits', 'MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_segments', '[?occupancy=metrics.publishers]control_pri', '[?occupancy=metrics.publishers]control_sec']) assert qs['v'][0] == '1.1' # Initial apikey validation req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/segmentChanges/__SOME_INVALID_SEGMENT__?since=-1' assert req.headers['authorization'] == 'Bearer some_apikey' # Initial splits fetch req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/splitChanges?since=-1' assert req.headers['authorization'] == 'Bearer some_apikey' # Iteration until since == till req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/splitChanges?since=1' assert req.headers['authorization'] == 'Bearer some_apikey' # Auth req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/auth' assert req.headers['authorization'] == 'Bearer some_apikey' # SyncAll after streaming connected req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/splitChanges?since=1' assert req.headers['authorization'] == 'Bearer some_apikey' # SyncAll after push down req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/splitChanges?since=1' assert req.headers['authorization'] == 'Bearer some_apikey' # SyncAll after push restored req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/splitChanges?since=1' assert req.headers['authorization'] == 'Bearer some_apikey' # Second iteration of previous syncAll req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/splitChanges?since=2' assert req.headers['authorization'] == 'Bearer some_apikey' # Cleanup destroy_event = threading.Event() factory.destroy(destroy_event) destroy_event.wait() sse_server.publish(sse_server.GRACEFUL_REQUEST_END) sse_server.stop() split_backend.stop() def test_streaming_status_changes(self): """Test changes between streaming enabled, paused and disabled.""" auth_server_response = { 'pushEnabled': True, 'token': ('eyJhbGciOiJIUzI1NiIsImtpZCI6IjVZOU05US45QnJtR0EiLCJ0eXAiOiJKV1QifQ.' 'eyJ4LWFibHktY2FwYWJpbGl0eSI6IntcIk1UWXlNVGN4T1RRNE13PT1fTWpBNE16Y3pO' 'RFUxTWc9PV9zZWdtZW50c1wiOltcInN1YnNjcmliZVwiXSxcIk1UWXlNVGN4T1RRNE13P' 'T1fTWpBNE16Y3pORFUxTWc9PV9zcGxpdHNcIjpbXCJzdWJzY3JpYmVcIl0sXCJjb250cm' '9sX3ByaVwiOltcInN1YnNjcmliZVwiLFwiY2hhbm5lbC1tZXRhZGF0YTpwdWJsaXNoZXJ' 'zXCJdLFwiY29udHJvbF9zZWNcIjpbXCJzdWJzY3JpYmVcIixcImNoYW5uZWwtbWV0YWRh' 'dGE6cHVibGlzaGVyc1wiXX0iLCJ4LWFibHktY2xpZW50SWQiOiJjbGllbnRJZCIsImV4c' 'CI6MTYwNDEwMDU5MSwiaWF0IjoxNjA0MDk2OTkxfQ.aP9BfR534K6J9h8gfDWg_CQgpz5E' 'vJh17WlOlAKhcD0') } split_changes = { -1: { 'since': -1, 'till': 1, 'splits': [make_simple_split('split1', 1, True, False, 'off', 'user', True)] }, 1: {'since': 1, 'till': 1, 'splits': []} } segment_changes = {} split_backend_requests = Queue() split_backend = SplitMockServer(split_changes, segment_changes, split_backend_requests, auth_server_response) sse_requests = Queue() sse_server = SSEMockServer(sse_requests) split_backend.start() sse_server.start() sse_server.publish(make_initial_event()) sse_server.publish(make_occupancy('control_pri', 2)) sse_server.publish(make_occupancy('control_sec', 2)) kwargs = { 'sdk_api_base_url': 'http://localhost:%d/api' % split_backend.port(), 'events_api_base_url': 'http://localhost:%d/api' % split_backend.port(), 'auth_api_base_url': 'http://localhost:%d/api' % split_backend.port(), 'streaming_api_base_url': 'http://localhost:%d' % sse_server.port(), 'config': {'connectTimeout': 10000, 'featuresRefreshRate': 10} } factory = get_factory('some_apikey', **kwargs) factory.block_until_ready(1) assert factory.ready time.sleep(2) # Get a hook of the task so we can query its status task = factory._sync_manager._synchronizer._split_tasks.split_task._task # pylint:disable=protected-access assert not task.running() assert factory.client().get_treatment('maldo', 'split1') == 'on' # Make a change in the BE but don't send the event. # After dropping occupancy, the sdk should switch to polling # and perform a syncAll that gets this change split_changes[1] = { 'since': 1, 'till': 2, 'splits': [make_simple_split('split1', 2, True, False, 'off', 'user', False)] } split_changes[2] = {'since': 2, 'till': 2, 'splits': []} sse_server.publish(make_control_event('STREAMING_PAUSED', 1)) time.sleep(2) assert factory.client().get_treatment('maldo', 'split1') == 'off' assert task.running() # We make another chagne in the BE and don't send the event. # We restore occupancy, and it should be fetched by the # sync all after streaming is restored. split_changes[2] = { 'since': 2, 'till': 3, 'splits': [make_simple_split('split1', 3, True, False, 'off', 'user', True)] } split_changes[3] = {'since': 3, 'till': 3, 'splits': []} sse_server.publish(make_control_event('STREAMING_ENABLED', 2)) time.sleep(2) assert factory.client().get_treatment('maldo', 'split1') == 'on' assert not task.running() # Now we make another change and send an event so it's propagated split_changes[3] = { 'since': 3, 'till': 4, 'splits': [make_simple_split('split1', 4, True, False, 'off', 'user', False)] } split_changes[4] = {'since': 4, 'till': 4, 'splits': []} sse_server.publish(make_split_change_event(4)) time.sleep(2) assert factory.client().get_treatment('maldo', 'split1') == 'off' assert not task.running() split_changes[4] = { 'since': 4, 'till': 5, 'splits': [make_simple_split('split1', 5, True, False, 'off', 'user', True)] } split_changes[5] = {'since': 5, 'till': 5, 'splits': []} sse_server.publish(make_control_event('STREAMING_DISABLED', 2)) time.sleep(2) assert factory.client().get_treatment('maldo', 'split1') == 'on' assert task.running() assert 'PushStatusHandler' not in [t.name for t in threading.enumerate()] # Validate the SSE request sse_request = sse_requests.get() assert sse_request.method == 'GET' path, qs = sse_request.path.split('?', 1) assert path == '/event-stream' qs = parse_qs(qs) assert qs['accessToken'][0] == ( 'eyJhbGciOiJIUzI1NiIsImtpZCI6IjVZOU05' 'US45QnJtR0EiLCJ0eXAiOiJKV1QifQ.eyJ4LWFibHktY2FwYWJpbGl0eSI6IntcIk1UW' 'XlNVGN4T1RRNE13PT1fTWpBNE16Y3pORFUxTWc9PV9zZWdtZW50c1wiOltcInN1YnNjc' 'mliZVwiXSxcIk1UWXlNVGN4T1RRNE13PT1fTWpBNE16Y3pORFUxTWc9PV9zcGxpdHNcI' 'jpbXCJzdWJzY3JpYmVcIl0sXCJjb250cm9sX3ByaVwiOltcInN1YnNjcmliZVwiLFwiY' '2hhbm5lbC1tZXRhZGF0YTpwdWJsaXNoZXJzXCJdLFwiY29udHJvbF9zZWNcIjpbXCJzd' 'WJzY3JpYmVcIixcImNoYW5uZWwtbWV0YWRhdGE6cHVibGlzaGVyc1wiXX0iLCJ4LWFib' 'HktY2xpZW50SWQiOiJjbGllbnRJZCIsImV4cCI6MTYwNDEwMDU5MSwiaWF0IjoxNjA0M' 'Dk2OTkxfQ.aP9BfR534K6J9h8gfDWg_CQgpz5EvJh17WlOlAKhcD0' ) assert set(qs['channels'][0].split(',')) == set(['MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_splits', 'MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_segments', '[?occupancy=metrics.publishers]control_pri', '[?occupancy=metrics.publishers]control_sec']) assert qs['v'][0] == '1.1' # Initial apikey validation req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/segmentChanges/__SOME_INVALID_SEGMENT__?since=-1' assert req.headers['authorization'] == 'Bearer some_apikey' # Initial splits fetch req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/splitChanges?since=-1' assert req.headers['authorization'] == 'Bearer some_apikey' # Iteration until since == till req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/splitChanges?since=1' assert req.headers['authorization'] == 'Bearer some_apikey' # Auth req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/auth' assert req.headers['authorization'] == 'Bearer some_apikey' # SyncAll after streaming connected req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/splitChanges?since=1' assert req.headers['authorization'] == 'Bearer some_apikey' # SyncAll on push down req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/splitChanges?since=1' assert req.headers['authorization'] == 'Bearer some_apikey' # Iteration until since == till req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/splitChanges?since=2' assert req.headers['authorization'] == 'Bearer some_apikey' # SyncAll after push is up req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/splitChanges?since=2' assert req.headers['authorization'] == 'Bearer some_apikey' # Iteration until since == till req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/splitChanges?since=3' assert req.headers['authorization'] == 'Bearer some_apikey' # Fetch after notification req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/splitChanges?since=3' assert req.headers['authorization'] == 'Bearer some_apikey' # Iteration until since == till req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/splitChanges?since=4' assert req.headers['authorization'] == 'Bearer some_apikey' # SyncAll after streaming disabled req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/splitChanges?since=4' assert req.headers['authorization'] == 'Bearer some_apikey' # Iteration until since == till req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/splitChanges?since=5' assert req.headers['authorization'] == 'Bearer some_apikey' # Cleanup destroy_event = threading.Event() factory.destroy(destroy_event) destroy_event.wait() sse_server.publish(sse_server.GRACEFUL_REQUEST_END) sse_server.stop() split_backend.stop() def test_server_closes_connection(self): """Test that if the server closes the connection, the whole flow is retried with BO.""" auth_server_response = { 'pushEnabled': True, 'token': ('eyJhbGciOiJIUzI1NiIsImtpZCI6IjVZOU05US45QnJtR0EiLCJ0eXAiOiJKV1QifQ.' 'eyJ4LWFibHktY2FwYWJpbGl0eSI6IntcIk1UWXlNVGN4T1RRNE13PT1fTWpBNE16Y3pO' 'RFUxTWc9PV9zZWdtZW50c1wiOltcInN1YnNjcmliZVwiXSxcIk1UWXlNVGN4T1RRNE13P' 'T1fTWpBNE16Y3pORFUxTWc9PV9zcGxpdHNcIjpbXCJzdWJzY3JpYmVcIl0sXCJjb250cm' '9sX3ByaVwiOltcInN1YnNjcmliZVwiLFwiY2hhbm5lbC1tZXRhZGF0YTpwdWJsaXNoZXJ' 'zXCJdLFwiY29udHJvbF9zZWNcIjpbXCJzdWJzY3JpYmVcIixcImNoYW5uZWwtbWV0YWRh' 'dGE6cHVibGlzaGVyc1wiXX0iLCJ4LWFibHktY2xpZW50SWQiOiJjbGllbnRJZCIsImV4c' 'CI6MTYwNDEwMDU5MSwiaWF0IjoxNjA0MDk2OTkxfQ.aP9BfR534K6J9h8gfDWg_CQgpz5E' 'vJh17WlOlAKhcD0') } split_changes = { -1: { 'since': -1, 'till': 1, 'splits': [make_simple_split('split1', 1, True, False, 'on', 'user', True)] }, 1: { 'since': 1, 'till': 1, 'splits': [] } } segment_changes = {} split_backend_requests = Queue() split_backend = SplitMockServer(split_changes, segment_changes, split_backend_requests, auth_server_response) sse_requests = Queue() sse_server = SSEMockServer(sse_requests) split_backend.start() sse_server.start() sse_server.publish(make_initial_event()) sse_server.publish(make_occupancy('control_pri', 2)) sse_server.publish(make_occupancy('control_sec', 2)) kwargs = { 'sdk_api_base_url': 'http://localhost:%d/api' % split_backend.port(), 'events_api_base_url': 'http://localhost:%d/api' % split_backend.port(), 'auth_api_base_url': 'http://localhost:%d/api' % split_backend.port(), 'streaming_api_base_url': 'http://localhost:%d' % sse_server.port(), 'config': {'connectTimeout': 10000, 'featuresRefreshRate': 100, 'segmentsRefreshRate': 100, 'metricsRefreshRate': 100, 'impressionsRefreshRate': 100, 'eventsPushRate': 100} } factory = get_factory('some_apikey', **kwargs) factory.block_until_ready(1) assert factory.ready assert factory.client().get_treatment('maldo', 'split1') == 'on' task = factory._sync_manager._synchronizer._split_tasks.split_task._task # pylint:disable=protected-access assert not task.running() time.sleep(1) split_changes[1] = { 'since': 1, 'till': 2, 'splits': [make_simple_split('split1', 2, True, False, 'off', 'user', False)] } split_changes[2] = {'since': 2, 'till': 2, 'splits': []} sse_server.publish(make_split_change_event(2)) time.sleep(1) assert factory.client().get_treatment('maldo', 'split1') == 'off' sse_server.publish(SSEMockServer.GRACEFUL_REQUEST_END) time.sleep(1) assert factory.client().get_treatment('maldo', 'split1') == 'off' assert task.running() time.sleep(2) # wait for the backoff to expire so streaming gets re-attached # re-send initial event AND occupancy sse_server.publish(make_initial_event()) sse_server.publish(make_occupancy('control_pri', 2)) sse_server.publish(make_occupancy('control_sec', 2)) time.sleep(2) assert not task.running() split_changes[2] = { 'since': 2, 'till': 3, 'splits': [make_simple_split('split1', 3, True, False, 'off', 'user', True)] } split_changes[3] = {'since': 3, 'till': 3, 'splits': []} sse_server.publish(make_split_change_event(3)) time.sleep(1) assert factory.client().get_treatment('maldo', 'split1') == 'on' assert not task.running() # Validate the SSE requests sse_request = sse_requests.get() assert sse_request.method == 'GET' path, qs = sse_request.path.split('?', 1) assert path == '/event-stream' qs = parse_qs(qs) assert qs['accessToken'][0] == ( 'eyJhbGciOiJIUzI1NiIsImtpZCI6IjVZOU05' 'US45QnJtR0EiLCJ0eXAiOiJKV1QifQ.eyJ4LWFibHktY2FwYWJpbGl0eSI6IntcIk1UW' 'XlNVGN4T1RRNE13PT1fTWpBNE16Y3pORFUxTWc9PV9zZWdtZW50c1wiOltcInN1YnNjc' 'mliZVwiXSxcIk1UWXlNVGN4T1RRNE13PT1fTWpBNE16Y3pORFUxTWc9PV9zcGxpdHNcI' 'jpbXCJzdWJzY3JpYmVcIl0sXCJjb250cm9sX3ByaVwiOltcInN1YnNjcmliZVwiLFwiY' '2hhbm5lbC1tZXRhZGF0YTpwdWJsaXNoZXJzXCJdLFwiY29udHJvbF9zZWNcIjpbXCJzd' 'WJzY3JpYmVcIixcImNoYW5uZWwtbWV0YWRhdGE6cHVibGlzaGVyc1wiXX0iLCJ4LWFib' 'HktY2xpZW50SWQiOiJjbGllbnRJZCIsImV4cCI6MTYwNDEwMDU5MSwiaWF0IjoxNjA0M' 'Dk2OTkxfQ.aP9BfR534K6J9h8gfDWg_CQgpz5EvJh17WlOlAKhcD0' ) assert set(qs['channels'][0].split(',')) == set(['MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_splits', 'MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_segments', '[?occupancy=metrics.publishers]control_pri', '[?occupancy=metrics.publishers]control_sec']) assert qs['v'][0] == '1.1' sse_request = sse_requests.get() assert sse_request.method == 'GET' path, qs = sse_request.path.split('?', 1) assert path == '/event-stream' qs = parse_qs(qs) assert qs['accessToken'][0] == ( 'eyJhbGciOiJIUzI1NiIsImtpZCI6IjVZOU05' 'US45QnJtR0EiLCJ0eXAiOiJKV1QifQ.eyJ4LWFibHktY2FwYWJpbGl0eSI6IntcIk1UW' 'XlNVGN4T1RRNE13PT1fTWpBNE16Y3pORFUxTWc9PV9zZWdtZW50c1wiOltcInN1YnNjc' 'mliZVwiXSxcIk1UWXlNVGN4T1RRNE13PT1fTWpBNE16Y3pORFUxTWc9PV9zcGxpdHNcI' 'jpbXCJzdWJzY3JpYmVcIl0sXCJjb250cm9sX3ByaVwiOltcInN1YnNjcmliZVwiLFwiY' '2hhbm5lbC1tZXRhZGF0YTpwdWJsaXNoZXJzXCJdLFwiY29udHJvbF9zZWNcIjpbXCJzd' 'WJzY3JpYmVcIixcImNoYW5uZWwtbWV0YWRhdGE6cHVibGlzaGVyc1wiXX0iLCJ4LWFib' 'HktY2xpZW50SWQiOiJjbGllbnRJZCIsImV4cCI6MTYwNDEwMDU5MSwiaWF0IjoxNjA0M' 'Dk2OTkxfQ.aP9BfR534K6J9h8gfDWg_CQgpz5EvJh17WlOlAKhcD0' ) assert set(qs['channels'][0].split(',')) == set(['MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_splits', 'MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_segments', '[?occupancy=metrics.publishers]control_pri', '[?occupancy=metrics.publishers]control_sec']) assert qs['v'][0] == '1.1' # Initial apikey validation req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/segmentChanges/__SOME_INVALID_SEGMENT__?since=-1' assert req.headers['authorization'] == 'Bearer some_apikey' # Initial splits fetch req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/splitChanges?since=-1' assert req.headers['authorization'] == 'Bearer some_apikey' # Iteration until since == till req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/splitChanges?since=1' assert req.headers['authorization'] == 'Bearer some_apikey' # Auth req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/auth' assert req.headers['authorization'] == 'Bearer some_apikey' # SyncAll after streaming connected req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/splitChanges?since=1' assert req.headers['authorization'] == 'Bearer some_apikey' # Fetch after first notification req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/splitChanges?since=1' assert req.headers['authorization'] == 'Bearer some_apikey' # Iteration until since == till req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/splitChanges?since=2' assert req.headers['authorization'] == 'Bearer some_apikey' # SyncAll on retryable error handling req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/splitChanges?since=2' assert req.headers['authorization'] == 'Bearer some_apikey' # Auth after connection breaks req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/auth' assert req.headers['authorization'] == 'Bearer some_apikey' # SyncAll after streaming connected again req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/splitChanges?since=2' assert req.headers['authorization'] == 'Bearer some_apikey' # Fetch after new notification req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/splitChanges?since=2' assert req.headers['authorization'] == 'Bearer some_apikey' # Iteration until since == till req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/splitChanges?since=3' assert req.headers['authorization'] == 'Bearer some_apikey' # Cleanup destroy_event = threading.Event() factory.destroy(destroy_event) destroy_event.wait() sse_server.publish(sse_server.GRACEFUL_REQUEST_END) sse_server.stop() split_backend.stop() def test_ably_errors_handling(self): """Test incoming ably errors and validate its handling.""" import logging logger = logging.getLogger('splitio') handler = logging.StreamHandler() formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s') handler.setFormatter(formatter) logger.addHandler(handler) logger.setLevel(logging.DEBUG) auth_server_response = { 'pushEnabled': True, 'token': ('eyJhbGciOiJIUzI1NiIsImtpZCI6IjVZOU05US45QnJtR0EiLCJ0eXAiOiJKV1QifQ.' 'eyJ4LWFibHktY2FwYWJpbGl0eSI6IntcIk1UWXlNVGN4T1RRNE13PT1fTWpBNE16Y3pO' 'RFUxTWc9PV9zZWdtZW50c1wiOltcInN1YnNjcmliZVwiXSxcIk1UWXlNVGN4T1RRNE13P' 'T1fTWpBNE16Y3pORFUxTWc9PV9zcGxpdHNcIjpbXCJzdWJzY3JpYmVcIl0sXCJjb250cm' '9sX3ByaVwiOltcInN1YnNjcmliZVwiLFwiY2hhbm5lbC1tZXRhZGF0YTpwdWJsaXNoZXJ' 'zXCJdLFwiY29udHJvbF9zZWNcIjpbXCJzdWJzY3JpYmVcIixcImNoYW5uZWwtbWV0YWRh' 'dGE6cHVibGlzaGVyc1wiXX0iLCJ4LWFibHktY2xpZW50SWQiOiJjbGllbnRJZCIsImV4c' 'CI6MTYwNDEwMDU5MSwiaWF0IjoxNjA0MDk2OTkxfQ.aP9BfR534K6J9h8gfDWg_CQgpz5E' 'vJh17WlOlAKhcD0') } split_changes = { -1: { 'since': -1, 'till': 1, 'splits': [make_simple_split('split1', 1, True, False, 'off', 'user', True)] }, 1: {'since': 1, 'till': 1, 'splits': []} } segment_changes = {} split_backend_requests = Queue() split_backend = SplitMockServer(split_changes, segment_changes, split_backend_requests, auth_server_response) sse_requests = Queue() sse_server = SSEMockServer(sse_requests) split_backend.start() sse_server.start() sse_server.publish(make_initial_event()) sse_server.publish(make_occupancy('control_pri', 2)) sse_server.publish(make_occupancy('control_sec', 2)) kwargs = { 'sdk_api_base_url': 'http://localhost:%d/api' % split_backend.port(), 'events_api_base_url': 'http://localhost:%d/api' % split_backend.port(), 'auth_api_base_url': 'http://localhost:%d/api' % split_backend.port(), 'streaming_api_base_url': 'http://localhost:%d' % sse_server.port(), 'config': {'connectTimeout': 10000, 'featuresRefreshRate': 10} } factory = get_factory('some_apikey', **kwargs) factory.block_until_ready(1) assert factory.ready time.sleep(2) # Get a hook of the task so we can query its status task = factory._sync_manager._synchronizer._split_tasks.split_task._task # pylint:disable=protected-access assert not task.running() assert factory.client().get_treatment('maldo', 'split1') == 'on' # Make a change in the BE but don't send the event. # We'll send an ignorable error and check it has nothing happened split_changes[1] = { 'since': 1, 'till': 2, 'splits': [make_simple_split('split1', 2, True, False, 'off', 'user', False)] } split_changes[2] = {'since': 2, 'till': 2, 'splits': []} sse_server.publish(make_ably_error_event(60000, 600)) time.sleep(1) assert factory.client().get_treatment('maldo', 'split1') == 'on' assert not task.running() sse_server.publish(make_ably_error_event(40145, 401)) sse_server.publish(sse_server.GRACEFUL_REQUEST_END) time.sleep(3) assert task.running() assert factory.client().get_treatment('maldo', 'split1') == 'off' # Re-publish initial events so that the retry succeeds sse_server.publish(make_initial_event()) sse_server.publish(make_occupancy('control_pri', 2)) sse_server.publish(make_occupancy('control_sec', 2)) time.sleep(3) assert not task.running() # Assert streaming is working properly split_changes[2] = { 'since': 2, 'till': 3, 'splits': [make_simple_split('split1', 3, True, False, 'off', 'user', True)] } split_changes[3] = {'since': 3, 'till': 3, 'splits': []} sse_server.publish(make_split_change_event(3)) time.sleep(2) assert factory.client().get_treatment('maldo', 'split1') == 'on' assert not task.running() # Send a non-retryable ably error sse_server.publish(make_ably_error_event(40200, 402)) sse_server.publish(sse_server.GRACEFUL_REQUEST_END) time.sleep(3) # Assert sync-task is running and the streaming status handler thread is over assert task.running() assert 'PushStatusHandler' not in [t.name for t in threading.enumerate()] # Validate the SSE requests sse_request = sse_requests.get() assert sse_request.method == 'GET' path, qs = sse_request.path.split('?', 1) assert path == '/event-stream' qs = parse_qs(qs) assert qs['accessToken'][0] == ( 'eyJhbGciOiJIUzI1NiIsImtpZCI6IjVZOU05' 'US45QnJtR0EiLCJ0eXAiOiJKV1QifQ.eyJ4LWFibHktY2FwYWJpbGl0eSI6IntcIk1UW' 'XlNVGN4T1RRNE13PT1fTWpBNE16Y3pORFUxTWc9PV9zZWdtZW50c1wiOltcInN1YnNjc' 'mliZVwiXSxcIk1UWXlNVGN4T1RRNE13PT1fTWpBNE16Y3pORFUxTWc9PV9zcGxpdHNcI' 'jpbXCJzdWJzY3JpYmVcIl0sXCJjb250cm9sX3ByaVwiOltcInN1YnNjcmliZVwiLFwiY' '2hhbm5lbC1tZXRhZGF0YTpwdWJsaXNoZXJzXCJdLFwiY29udHJvbF9zZWNcIjpbXCJzd' 'WJzY3JpYmVcIixcImNoYW5uZWwtbWV0YWRhdGE6cHVibGlzaGVyc1wiXX0iLCJ4LWFib' 'HktY2xpZW50SWQiOiJjbGllbnRJZCIsImV4cCI6MTYwNDEwMDU5MSwiaWF0IjoxNjA0M' 'Dk2OTkxfQ.aP9BfR534K6J9h8gfDWg_CQgpz5EvJh17WlOlAKhcD0' ) assert set(qs['channels'][0].split(',')) == set(['MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_splits', 'MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_segments', '[?occupancy=metrics.publishers]control_pri', '[?occupancy=metrics.publishers]control_sec']) assert qs['v'][0] == '1.1' assert sse_request.method == 'GET' path, qs = sse_request.path.split('?', 1) assert path == '/event-stream' qs = parse_qs(qs) assert qs['accessToken'][0] == ( 'eyJhbGciOiJIUzI1NiIsImtpZCI6IjVZOU05' 'US45QnJtR0EiLCJ0eXAiOiJKV1QifQ.eyJ4LWFibHktY2FwYWJpbGl0eSI6IntcIk1UW' 'XlNVGN4T1RRNE13PT1fTWpBNE16Y3pORFUxTWc9PV9zZWdtZW50c1wiOltcInN1YnNjc' 'mliZVwiXSxcIk1UWXlNVGN4T1RRNE13PT1fTWpBNE16Y3pORFUxTWc9PV9zcGxpdHNcI' 'jpbXCJzdWJzY3JpYmVcIl0sXCJjb250cm9sX3ByaVwiOltcInN1YnNjcmliZVwiLFwiY' '2hhbm5lbC1tZXRhZGF0YTpwdWJsaXNoZXJzXCJdLFwiY29udHJvbF9zZWNcIjpbXCJzd' 'WJzY3JpYmVcIixcImNoYW5uZWwtbWV0YWRhdGE6cHVibGlzaGVyc1wiXX0iLCJ4LWFib' 'HktY2xpZW50SWQiOiJjbGllbnRJZCIsImV4cCI6MTYwNDEwMDU5MSwiaWF0IjoxNjA0M' 'Dk2OTkxfQ.aP9BfR534K6J9h8gfDWg_CQgpz5EvJh17WlOlAKhcD0' ) assert set(qs['channels'][0].split(',')) == set(['MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_splits', 'MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_segments', '[?occupancy=metrics.publishers]control_pri', '[?occupancy=metrics.publishers]control_sec']) assert qs['v'][0] == '1.1' # Initial apikey validation req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/segmentChanges/__SOME_INVALID_SEGMENT__?since=-1' assert req.headers['authorization'] == 'Bearer some_apikey' # Initial splits fetch req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/splitChanges?since=-1' assert req.headers['authorization'] == 'Bearer some_apikey' # Iteration until since == till req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/splitChanges?since=1' assert req.headers['authorization'] == 'Bearer some_apikey' # Auth req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/auth' assert req.headers['authorization'] == 'Bearer some_apikey' # SyncAll after streaming connected req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/splitChanges?since=1' assert req.headers['authorization'] == 'Bearer some_apikey' # SyncAll retriable error req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/splitChanges?since=1' assert req.headers['authorization'] == 'Bearer some_apikey' # Iteration until since == till req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/splitChanges?since=2' assert req.headers['authorization'] == 'Bearer some_apikey' # Auth again req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/auth' assert req.headers['authorization'] == 'Bearer some_apikey' # SyncAll after push is up req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/splitChanges?since=2' assert req.headers['authorization'] == 'Bearer some_apikey' # Fetch after notification req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/splitChanges?since=2' assert req.headers['authorization'] == 'Bearer some_apikey' # Iteration until since == till req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/splitChanges?since=3' assert req.headers['authorization'] == 'Bearer some_apikey' # SyncAll after non recoverable ably error req = split_backend_requests.get() assert req.method == 'GET' assert req.path == '/api/splitChanges?since=3' assert req.headers['authorization'] == 'Bearer some_apikey' # Cleanup destroy_event = threading.Event() factory.destroy(destroy_event) destroy_event.wait() sse_server.publish(sse_server.GRACEFUL_REQUEST_END) sse_server.stop() split_backend.stop() def make_split_change_event(change_number): """Make a split change event.""" return { 'event': 'message', 'data': json.dumps({ 'id':'TVUsxaabHs:0:0', 'clientId':'pri:MzM0ODI1MTkxMw==', 'timestamp': change_number-1, 'encoding':'json', 'channel':'MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_splits', 'data': json.dumps({ 'type': 'SPLIT_UPDATE', 'changeNumber': change_number }) }) } def make_split_kill_event(name, default_treatment, change_number): """Make a split change event.""" return { 'event': 'message', 'data': json.dumps({ 'id':'TVUsxaabHs:0:0', 'clientId':'pri:MzM0ODI1MTkxMw==', 'timestamp': change_number-1, 'encoding':'json', 'channel':'MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_splits', 'data': json.dumps({ 'type': 'SPLIT_KILL', 'splitName': name, 'defaultTreatment': default_treatment, 'changeNumber': change_number }) }) } def make_initial_event(): """Make a split change event.""" return {'id':'TVUsxaabHs:0:0'} def make_occupancy(channel, publishers): """Make an occupancy event.""" return { 'event': 'message', 'data': json.dumps({ 'id':'aP6EuhrcUm:0:0', 'timestamp':1604325712734, 'encoding': 'json', 'channel': "[?occupancy=metrics.publishers]%s" % channel, 'data': json.dumps({'metrics': {'publishers': publishers}}), 'name':'[meta]occupancy' }) } def make_segment_change_event(name, change_number): """Make a split change event.""" return { 'event': 'message', 'data': json.dumps({ 'id':'TVUsxaabHs:0:0', 'clientId':'pri:MzM0ODI1MTkxMw==', 'timestamp': change_number-1, 'encoding':'json', 'channel':'MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_segments', 'data': json.dumps({ 'type': 'SEGMENT_UPDATE', 'segmentName': name, 'changeNumber': change_number }) }) } def make_control_event(control_type, timestamp): """Make a control event.""" return { 'event': 'message', 'data': json.dumps({ 'id':'TVUsxaabHs:0:0', 'clientId':'pri:MzM0ODI1MTkxMw==', 'timestamp': timestamp, 'encoding':'json', 'channel':'[?occupancy=metrics.publishers]control_pri', 'data': json.dumps({ 'type': 'CONTROL', 'controlType': control_type, }) }) } def make_ably_error_event(code, status): """Make a control event.""" return { 'event': 'error', 'data': json.dumps({ 'message':'Invalid accessToken in request: sarasa', 'code': code, 'statusCode': status, 'href':"https://help.ably.io/error/%d" % code }) } def make_simple_split(name, cn, active, killed, default_treatment, tt, on): """Make a simple split.""" return { 'trafficTypeName': tt, 'name': name, 'seed': 1699838640, 'status': 'ACTIVE' if active else 'ARCHIVED', 'changeNumber': cn, 'killed': killed, 'defaultTreatment': default_treatment, 'conditions': [ { 'matcherGroup': { 'combiner': 'AND', 'matchers': [ { 'matcherType': 'ALL_KEYS', 'negate': False, 'userDefinedSegmentMatcherData': None, 'whitelistMatcherData': None } ] }, 'partitions': [ {'treatment': 'on' if on else 'off', 'size': 100}, {'treatment': 'off' if on else 'on', 'size': 0} ] } ] } def make_split_with_segment(name, cn, active, killed, default_treatment, tt, on, segment): """Make a split with a segment.""" return { 'trafficTypeName': tt, 'name': name, 'seed': cn, 'status': 'ACTIVE' if active else 'ARCHIVED', 'changeNumber': cn, 'killed': killed, 'defaultTreatment': default_treatment, 'configurations': { 'on': '{\'size\':15,\'test\':20}' }, 'conditions': [ { 'matcherGroup': { 'combiner': 'AND', 'matchers': [ { 'matcherType': 'IN_SEGMENT', 'negate': False, 'userDefinedSegmentMatcherData': {'segmentName': segment}, 'whitelistMatcherData': None } ] }, 'partitions': [{ 'treatment': 'on' if on else 'off', 'size': 100 }] } ] }
43.137131
115
0.600887
55,493
0.904664
0
0
0
0
0
0
25,177
0.410443
b9dc3713922fc2f091f8ac06b4fabec4e905eb4d
1,647
py
Python
venues/abstract_venue.py
weezel/BandEventNotifier
55824ba26aba9882f46d1770ec5df592a5dc32bc
[ "0BSD" ]
null
null
null
venues/abstract_venue.py
weezel/BandEventNotifier
55824ba26aba9882f46d1770ec5df592a5dc32bc
[ "0BSD" ]
2
2020-02-10T19:37:47.000Z
2020-02-10T19:44:54.000Z
venues/abstract_venue.py
weezel/BandEventNotifier
55824ba26aba9882f46d1770ec5df592a5dc32bc
[ "0BSD" ]
null
null
null
import re from abc import ABC, abstractmethod from typing import Any, Dict, Generator class IncorrectVenueImplementation(Exception): pass # class AbstractVenue(metaclass=ABC): class AbstractVenue(ABC): def __init__(self): self.url = "" self.name = "" self.city = "" self.country = "" self.pricepat_monetary = re.compile("[0-9.,]+.€") self.pricepat_plain = re.compile("[0-9.,]+") def get_venue_name(self) -> str: return self.name def get_city(self) -> str: return self.city def get_country(self) -> str: return self.country def event_sqlentity(self) -> Dict[str, str]: return {"name": self.name, "city": self.city, "country": self.country} def parse_price(self, info_tag: str) -> str: prices_with_mon = self.pricepat_monetary.findall(info_tag) prices = [] for price in prices_with_mon: parsed_price = self.pricepat_plain.findall(price) if len(parsed_price) == 0: continue prices.append("".join(parsed_price)) if len(prices) == 0: return "0€" elif len(prices) == 2: in_advance, from_door = prices[0], prices[1] return f"{in_advance}€/{from_door}€" return "{}€".format("".join(prices)) # FIXME Proper class type checking def __eq__(self, other): return hasattr(other, "url") \ and other.url == self.url @abstractmethod def parse_events(self, data: Any) \ -> Generator[Dict[str, Any], None, None]: pass
27.45
66
0.57377
1,527
0.921545
0
0
122
0.073627
0
0
179
0.108027
b9dcf24da986778ebcd29602d923908626cfea3c
4,263
py
Python
mtl/util/pipeline.py
vandurme/TFMTL
5958187900bdf67089a237c523b6caa899f63ac1
[ "Apache-2.0" ]
10
2019-05-18T22:23:44.000Z
2022-01-25T15:24:45.000Z
mtl/util/pipeline.py
vandurme/TFMTL
5958187900bdf67089a237c523b6caa899f63ac1
[ "Apache-2.0" ]
1
2020-01-07T15:24:16.000Z
2020-01-15T00:39:01.000Z
mtl/util/pipeline.py
vandurme/TFMTL
5958187900bdf67089a237c523b6caa899f63ac1
[ "Apache-2.0" ]
1
2021-12-02T02:24:06.000Z
2021-12-02T02:24:06.000Z
# Copyright 2018 Johns Hopkins University. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function from collections import namedtuple import tensorflow as tf from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib from tensorflow.python.ops import parsing_ops class Pipeline(object): def __init__(self, tfrecord_file, feature_map, batch_size=32, num_threads=4, prefetch_buffer_size=1, static_max_length=None, shuffle_buffer_size=10000, shuffle=True, num_epochs=None, one_shot=False): self._feature_map = feature_map self._batch_size = batch_size self._static_max_length = static_max_length # Initialize the dataset dataset = tf.data.TFRecordDataset(tfrecord_file) # Maybe randomize if shuffle: dataset = dataset.shuffle(shuffle_buffer_size) # Maybe repeat if num_epochs is None: dataset = dataset.repeat() # repeat indefinitely elif num_epochs > 1: dataset = dataset.repeat(count=num_epochs) dataset = dataset.batch(batch_size) dataset = dataset.map(self.parse_example, num_parallel_calls=num_threads) # Pre-fetch a batch for faster processing dataset = dataset.prefetch(prefetch_buffer_size) # Get the iterator if one_shot: self._iterator = dataset.make_one_shot_iterator() else: self._iterator = dataset.make_initializable_iterator() self._init_op = self._iterator.initializer # Get outputs self._outputs = self._iterator.get_next() # Map to features index = 0 result = {} for key in sorted(self._feature_map.keys()): result[key] = self._outputs[index] index += 1 self._result = result def pad(self, t): s = tf.shape(t) paddings = [[0, 0], [0, self._static_max_length - s[1]]] x = tf.pad(t, paddings, 'CONSTANT', constant_values=0) x = tf.reshape(x, [s[0], self._static_max_length]) assert x.get_shape().as_list()[1] is self._static_max_length return x def parse_example(self, serialized): parsed = parsing_ops.parse_example(serialized, self._feature_map) result = [] for key in sorted(self._feature_map.keys()): val = parsed[key] if isinstance(val, sparse_tensor_lib.SparseTensor): dense_tensor = tf.sparse_tensor_to_dense(val) if self._static_max_length is not None: dense_tensor = self.pad(dense_tensor) result.append(dense_tensor) else: result.append(val) return tuple(result) @property def iterator(self): return self._iterator @property def init_op(self): return self._init_op @property def batch(self): return self._result # namedtuple for bucket_info object (used in Pipeline) # func: a mapping from examples to tf.int64 keys # pads: a set of tf shapes that correspond to padded examples bucket_info = namedtuple("bucket_info", "func pads") def int64_feature(value): """ Takes a single int (e.g. 3) and converts it to a tf Feature """ return tf.train.Feature(int64_list=tf.train.Int64List(value=[value])) def int64_list_feature(sequence): """ Sequence of ints (e.g [1,2,3]) to TF feature """ return tf.train.Feature(int64_list=tf.train.Int64List(value=sequence))
34.658537
80
0.649073
2,714
0.636641
0
0
182
0.042693
0
0
1,158
0.27164
b9dd82e962e13070a8526b2d4d0da1d0be6265ee
7,417
py
Python
src/py65/devices/mpu65c02.py
dabeaz/py65
62d790445018f0616508022912b67d8d64935a29
[ "BSD-3-Clause" ]
5
2015-03-19T22:22:45.000Z
2020-05-15T18:26:59.000Z
src/py65/devices/mpu65c02.py
BigEd/py65
57d5e7191362006c1d6fa20662da3e4854f1b7c2
[ "BSD-3-Clause" ]
null
null
null
src/py65/devices/mpu65c02.py
BigEd/py65
57d5e7191362006c1d6fa20662da3e4854f1b7c2
[ "BSD-3-Clause" ]
3
2015-04-27T02:42:29.000Z
2021-07-16T20:50:23.000Z
from py65.devices import mpu6502 from py65.utils.devices import make_instruction_decorator class MPU(mpu6502.MPU): def __init__(self, *args, **kwargs): mpu6502.MPU.__init__(self, *args, **kwargs) self.name = '65C02' self.waiting = False def step(self): if self.waiting: self.processorCycles += 1 else: mpu6502.MPU.step(self) return self # Make copies of the lists instruct = mpu6502.MPU.instruct[:] cycletime = mpu6502.MPU.cycletime[:] extracycles = mpu6502.MPU.extracycles[:] disassemble = mpu6502.MPU.disassemble[:] instruction = make_instruction_decorator(instruct, disassemble, cycletime, extracycles) # addressing modes def ZeroPageIndirectAddr(self): return self.WordAt( 255 & (self.ByteAt(self.pc))) def AccumulatorAddr(self): return self.a # operations def opRMB(self, x, mask): address = x() self.memory[address] &= mask def opSMB(self, x, mask): address = x() self.memory[address] |= mask def opSTZ(self, x): self.memory[x()] = 0x00 def opTSB(self, x): address = x() m = self.memory[address] self.p &= ~self.ZERO z = m & self.a if z != 0: self.p |= self.ZERO self.memory[address] = m | self.a def opTRB(self, x): address = x() m = self.memory[address] self.p &= ~self.ZERO z = m & self.a if z != 0: self.p |= self.ZERO self.memory[address] = m & ~self.a # instructions @instruction(name="RMB0", mode="zpg", cycles=5) def inst_0x07(self): self.opRMB(self.ZeroPageAddr, 0xFE) self.pc += 1 @instruction(name="ORA", mode="zpi", cycles=5) def inst_0x12(self): self.opORA(self.ZeroPageIndirectAddr) self.pc += 1 @instruction(name="RMB1", mode="zpg", cycles=5) def inst_0x17(self): self.opRMB(self.ZeroPageAddr, 0xFD) self.pc += 1 @instruction(name="RMB2", mode="zpg", cycles=5) def inst_0x27(self): self.opRMB(self.ZeroPageAddr, 0xFB) self.pc += 1 @instruction(name="AND", mode="zpi", cycles=5) def inst_0x32(self): self.opAND(self.ZeroPageIndirectAddr) self.pc += 1 @instruction(name="BIT", mode="zpx", cycles=4) def inst_0x34(self): self.opBIT(self.ZeroPageXAddr) self.pc += 1 @instruction(name="RMB3", mode="zpg", cycles=5) def inst_0x37(self): self.opRMB(self.ZeroPageAddr, 0xF7) self.pc += 1 @instruction(name="BIT", mode="abx", cycles=4) def inst_0x3c(self): self.opBIT(self.AbsoluteXAddr) self.pc += 2 @instruction(name="RMB4", mode="zpg", cycles=5) def inst_0x47(self): self.opRMB(self.ZeroPageAddr, 0xEF) self.pc += 1 @instruction(name="EOR", mode="zpi", cycles=5) def inst_0x52(self): self.opEOR(self.ZeroPageIndirectAddr) self.pc += 1 @instruction(name="RMB5", mode="zpg", cycles=5) def inst_0x57(self): self.opRMB(self.ZeroPageAddr, 0xDF) self.pc += 1 @instruction(name="PHY", mode="imp", cycles=3) def inst_0x5a(self): self.stPush(self.y) @instruction(name="STZ", mode="imp", cycles=3) def inst_0x64(self): self.opSTZ(self.ZeroPageAddr) self.pc += 1 @instruction(name="RMB6", mode="zpg", cycles=5) def inst_0x67(self): self.opRMB(self.ZeroPageAddr, 0xBF) self.pc += 1 @instruction(name="ADC", mode="zpi", cycles=5) def inst_0x72(self): self.opADC(self.ZeroPageIndirectAddr) self.pc += 1 @instruction(name="STZ", mode="zpx", cycles=4) def inst_0x74(self): self.opSTZ(self.ZeroPageXAddr) self.pc += 1 @instruction(name="PHY", mode="imp", cycles=4) def inst_0x7a(self): self.y = self.stPop() self.FlagsNZ(self.y) @instruction(name="RMB7", mode="zpg", cycles=5) def inst_0x77(self): self.opRMB(self.ZeroPageAddr, 0x7F) self.pc += 1 @instruction(name="SMB0", mode="zpg", cycles=5) def inst_0x87(self): self.opSMB(self.ZeroPageAddr, 0x01) self.pc += 1 @instruction(name="STA", mode="zpi", cycles=5) def inst_0x92(self): self.opSTA(self.ZeroPageIndirectAddr) self.pc += 1 @instruction(name="SMB1", mode="zpg", cycles=5) def inst_0x97(self): self.opSMB(self.ZeroPageAddr, 0x02) self.pc += 1 @instruction(name="STZ", mode="abs", cycles=4) def inst_0x9c(self): self.opSTZ(self.AbsoluteAddr) self.pc += 2 @instruction(name="STZ", mode="abx", cycles=5) def inst_0x9e(self): self.opSTZ(self.AbsoluteXAddr) self.pc += 2 @instruction(name="SMB2", mode="zpg", cycles=5) def inst_0xa7(self): self.opSMB(self.ZeroPageAddr, 0x04) self.pc += 1 @instruction(name="LDA", mode="zpi", cycles=5) def inst_0xb2(self): self.opLDA(self.ZeroPageIndirectAddr) self.pc += 1 @instruction(name="SMB3", mode="zpg", cycles=5) def inst_0xb7(self): self.opSMB(self.ZeroPageAddr, 0x08) self.pc += 1 @instruction(name="SMB4", mode="zpg", cycles=5) def inst_0xc7(self): self.opSMB(self.ZeroPageAddr, 0x10) self.pc += 1 @instruction(name="SMB5", mode="zpg", cycles=5) def inst_0xd7(self): self.opSMB(self.ZeroPageAddr, 0x20) self.pc += 1 @instruction(name="PHX", mode="imp", cycles=3) def inst_0xda(self): self.stPush(self.x) @instruction(name="SMB6", mode="zpg", cycles=5) def inst_0xe7(self): self.opSMB(self.ZeroPageAddr, 0x40) self.pc += 1 @instruction(name="SMB7", mode="zpg", cycles=5) def inst_0xf7(self): self.opSMB(self.ZeroPageAddr, 0x80) self.pc += 1 @instruction(name="PLX", mode="imp", cycles=4) def inst_0xfa(self): self.x = self.stPop() self.FlagsNZ(self.x) @instruction(name="TSB", mode="zpg", cycles=5) def inst_0x04(self): self.opTSB(self.ZeroPageAddr) self.pc += 1 @instruction(name="TSB", mode="abs", cycles=6) def inst_0x0c(self): self.opTSB(self.AbsoluteAddr) self.pc += 2 @instruction(name="TRB", mode="zpg", cycles=5) def inst_0x14(self): self.opTRB(self.ZeroPageAddr) self.pc += 1 @instruction(name="INC", mode="acc", cycles=2) def inst_0x1a(self): self.opINCR(None) @instruction(name="TRB", mode="abs", cycles=6) def inst_0x1c(self): self.opTRB(self.AbsoluteAddr) self.pc += 2 @instruction(name="DEC", mode="acc", cycles=2) def inst_0x3a(self): self.opDECR(None) @instruction(name="BRA", mode="rel", cycles=1, extracycles=1) def inst_0x80(self): self.BranchRelAddr() @instruction(name="WAI", mode='imp', cycles=3) def inst_0xCB(self): self.waiting = True @instruction(name="CMP", mode='zpi', cycles=6) # Don't know cycles def inst_0xD2(self): self.opCPY(self.ZeroPageIndirectAddr) self.pc += 1 @instruction(name="SBC", mode="zpi", cycles=5) def inst_0xf2(self): self.opSBC(self.ZeroPageIndirectAddr) self.pc += 1
27.369004
71
0.58676
7,322
0.987192
0
0
5,486
0.739652
0
0
532
0.071727
b9ddc98cf55e2bef4fcf498ec4787ca57bad46d0
5,623
py
Python
tests/test__io.py
soerendip/ms-mint
bf5f5d87d07a0d2108c6cd0d92c278f2ea762e58
[ "MIT" ]
1
2021-09-03T04:02:25.000Z
2021-09-03T04:02:25.000Z
tests/test__io.py
soerendip/ms-mint
bf5f5d87d07a0d2108c6cd0d92c278f2ea762e58
[ "MIT" ]
3
2020-09-29T21:43:39.000Z
2021-07-21T22:18:27.000Z
tests/test__io.py
soerendip/ms-mint
bf5f5d87d07a0d2108c6cd0d92c278f2ea762e58
[ "MIT" ]
4
2019-11-14T13:25:24.000Z
2021-04-30T22:08:53.000Z
import pandas as pd import shutil import os import io from ms_mint.Mint import Mint from pathlib import Path as P from ms_mint.io import ( ms_file_to_df, mzml_to_pandas_df_pyteomics, convert_ms_file_to_feather, convert_ms_file_to_parquet, MZMLB_AVAILABLE, ) from paths import ( TEST_MZML, TEST_MZXML, TEST_PARQUET, TEST_MZMLB_POS, TEST_MZML_POS, TEST_MZML_NEG, ) def test__ms_file_to_df__mzML(): result = ms_file_to_df(TEST_MZML) expected_cols = [ "scan_id", "ms_level", "polarity", "scan_time_min", "mz", "intensity", ] assert isinstance(result, pd.DataFrame), f"{type(result)} is not a dataframe" assert expected_cols == result.columns.to_list(), result.columns def test__ms_file_to_df__mzML_timeunit_minutes(): result = ms_file_to_df(TEST_MZML, time_unit="minutes") expected_cols = [ "scan_id", "ms_level", "polarity", "scan_time_min", "mz", "intensity", ] assert isinstance(result, pd.DataFrame), f"{type(result)} is not a dataframe" assert expected_cols == result.columns.to_list(), result.columns def test__ms_file_to_df__mzXML(): result = ms_file_to_df(TEST_MZXML) expected_cols = [ "scan_id", "ms_level", "polarity", "scan_time_min", "mz", "intensity", ] assert isinstance(result, pd.DataFrame), f"{type(result)} is not a dataframe" assert expected_cols == result.columns.to_list(), result.columns def test__mzml_to_pandas_df_pyteomics_pos(): result = mzml_to_pandas_df_pyteomics(TEST_MZML_POS) expected_cols = [ "scan_id", "ms_level", "polarity", "scan_time_min", "mz", "intensity", ] assert isinstance(result, pd.DataFrame), f"{type(result)} is not a dataframe" assert expected_cols == result.columns.to_list(), result.columns assert all(result.polarity == "+"), f'Polarity should be "+"\n{result}' def test__mzml_to_pandas_df_pyteomics_neg(): result = mzml_to_pandas_df_pyteomics(TEST_MZML_NEG) expected_cols = [ "scan_id", "ms_level", "polarity", "scan_time_min", "mz", "intensity", ] assert isinstance(result, pd.DataFrame), f"{type(result)} is not a dataframe" assert expected_cols == result.columns.to_list(), result.columns assert all(result.polarity == "-"), f'Polarity should be "-"\n{result}' def test__read_parquet(): result = ms_file_to_df(TEST_PARQUET) expected_cols = [ "scan_id", "ms_level", "polarity", "scan_time_min", "mz", "intensity", ] assert isinstance(result, pd.DataFrame), f"{type(result)} is not a dataframe" assert expected_cols == result.columns.to_list(), result.columns def test__write_read_hdf(tmpdir): df = ms_file_to_df(TEST_PARQUET) fn = P(tmpdir) / "file.hdf" df.to_hdf(fn, key="data") result = ms_file_to_df(fn) expected_cols = [ "scan_id", "ms_level", "polarity", "scan_time_min", "mz", "intensity", ] assert isinstance(result, pd.DataFrame), f"{type(result)} is not a dataframe" assert expected_cols == result.columns.to_list(), result.columns def test__read_mzMLb(tmpdir): if not MZMLB_AVAILABLE: return None result = ms_file_to_df(TEST_MZMLB_POS) expected_cols = [ "scan_id", "ms_level", "polarity", "scan_time_min", "mz", "intensity", ] assert isinstance(result, pd.DataFrame), f"{type(result)} is not a dataframe" assert expected_cols == result.columns.to_list(), result.columns # assert all(result.polarity == '+'), f'Polarity should be "+"\n{result}' def test__convert_ms_file_to_feather(tmpdir): print(tmpdir) shutil.copy(TEST_MZML, tmpdir) fn = P(tmpdir) / P(TEST_MZML).name fn_out = fn.with_suffix(".feather") print(fn, fn_out) convert_ms_file_to_feather(fn) assert fn_out.is_file(), f"File not generated {fn_out}" df = ms_file_to_df(fn) df_fea = ms_file_to_df(fn_out) assert df_fea.equals(df), "DataFrames not equal" def test__convert_ms_file_to_parquet(tmpdir): print(tmpdir) shutil.copy(TEST_MZML, tmpdir) fn = P(tmpdir) / P(TEST_MZML).name fn_out = fn.with_suffix(".parquet") print(fn, fn_out) convert_ms_file_to_parquet(fn) assert fn_out.is_file(), f"File not generated {fn_out}" df = ms_file_to_df(fn) df_fea = ms_file_to_df(fn_out) assert df_fea.equals(df), "DataFrames not equal" def test__export_to_excel(tmp_path): filename = os.path.join(tmp_path, "output.xlsx") mint = Mint(verbose=True) mint.ms_files = "tests/data/test.mzXML" mint.run() mint.export(filename) assert os.path.isfile(filename) def test__export_to_excel_without_fn(): mint = Mint(verbose=True) mint.ms_files = TEST_MZXML mint.targets = pd.DataFrame( { "peak_label": ["A"], "mz_mean": [200], "mz_width": [10], "intensity_threshold": [0], "rt_min": [0], "rt_max": [10], "targets_filename": ["unknown"], } ) mint.run() buffer = mint.export() assert isinstance(buffer, io.BytesIO) df = pd.read_excel(buffer, sheet_name="Results") assert len(df) == 1, len(df) assert df.loc[0, "peak_label"] == "A", df.loc[0, "peak_label"] assert df.loc[0, "ms_file"] == P(TEST_MZXML).name, df.loc[0, "ms_file"]
27.563725
81
0.634181
0
0
0
0
0
0
0
0
1,246
0.22159
b9de795b7b1298f8cad5f30e914735224920a0f9
1,158
py
Python
core/views.py
moiyad/image
d4515ef3057794f38268a6887bfff157115f26f7
[ "MIT" ]
null
null
null
core/views.py
moiyad/image
d4515ef3057794f38268a6887bfff157115f26f7
[ "MIT" ]
null
null
null
core/views.py
moiyad/image
d4515ef3057794f38268a6887bfff157115f26f7
[ "MIT" ]
null
null
null
from django.core.files.storage import FileSystemStorage from django.shortcuts import render, redirect from core.forms import DocumentForm from core.models import Document from media import image_cv2 def home(request): documents = Document.objects.all() number = len(image_cv2.myList) return render(request, 'core/home.html', {'documents': documents, 'number': number}) def simple_upload(request): if request.method == 'POST' and request.FILES['myfile']: myfile = request.FILES['myfile'] fs = FileSystemStorage() filename = fs.save(myfile.name, myfile) uploaded_file_url = fs.url(filename) return render(request, 'core/simple_upload.html', { 'uploaded_file_url': uploaded_file_url }) return render(request, 'core/simple_upload.html') def model_form_upload(request): if request.method == 'POST': form = DocumentForm(request.POST, request.FILES) if form.is_valid(): form.save() return redirect('home') else: form = DocumentForm() return render(request, 'core/model_form_upload.html', { 'form': form })
30.473684
88
0.668394
0
0
0
0
0
0
0
0
173
0.149396
b9df48f54330cde291fba9c3ce4e17b22e7c1da1
1,156
py
Python
python/verifair/benchmarks/fairsquare/M_BN_F_SVM_A_Q.py
obastani/verifair
1d5efea041330fa9fe8d59d976bdd3ef97aff417
[ "ECL-2.0", "Apache-2.0" ]
5
2019-11-05T20:40:40.000Z
2020-09-16T03:13:54.000Z
python/verifair/benchmarks/fairsquare/M_BN_F_SVM_A_Q.py
obastani/verifair
1d5efea041330fa9fe8d59d976bdd3ef97aff417
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
python/verifair/benchmarks/fairsquare/M_BN_F_SVM_A_Q.py
obastani/verifair
1d5efea041330fa9fe8d59d976bdd3ef97aff417
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
from .helper import * def sample(flag): sex = step([(0,1,0.3307), (1,2,0.6693)]) if sex < 1: capital_gain = gaussian(568.4105, 24248365.5428) if capital_gain < 7298.0000: age = gaussian(38.4208, 184.9151) capital_loss = gaussian(86.5949, 157731.9553) else: age = gaussian(38.8125, 193.4918) capital_loss = gaussian(117.8083, 252612.0300) else: capital_gain = gaussian(1329.3700, 69327473.1006) if capital_gain < 5178.0000: age = gaussian(38.6361, 187.2435) capital_loss = gaussian(87.0152, 161032.4157) else: age = gaussian(38.2668, 187.2747) capital_loss = gaussian(101.7672, 189798.1926) sensitiveAttribute(sex < 1, flag) qualified(age > 18) N_age = (age - 17.0) / 62.0 N_capital_gain = (capital_gain - 0.0) / 22040.0 N_capital_loss = (capital_loss - 0.0) / 1258.0 t = 0.0006 * N_age + -5.7363 * N_capital_gain + -0.0002 * N_capital_loss + 1.0003 if sex > 1: t = t + -0.0003 if sex < 1: t = t - 0.5 return int(t < 0) fairnessTarget(t < 0)
33.028571
85
0.569204
0
0
0
0
0
0
0
0
0
0
b9dfea4e7beba7ec415b85a76c49ed3af214dec4
25,442
py
Python
ml4chem/atomistic/models/neuralnetwork.py
muammar/mlchem
365487c23ea3386657e178e56ab31adfe8d5d073
[ "BSD-3-Clause-LBNL" ]
77
2019-08-05T17:30:22.000Z
2022-03-28T14:31:35.000Z
ml4chem/atomistic/models/neuralnetwork.py
muammar/ml4chem
365487c23ea3386657e178e56ab31adfe8d5d073
[ "BSD-3-Clause-LBNL" ]
6
2019-07-31T18:59:38.000Z
2020-10-18T18:15:07.000Z
ml4chem/atomistic/models/neuralnetwork.py
muammar/mlchem
365487c23ea3386657e178e56ab31adfe8d5d073
[ "BSD-3-Clause-LBNL" ]
15
2020-02-28T10:11:21.000Z
2021-12-01T13:45:33.000Z
import dask import datetime import logging import time import torch import numpy as np import pandas as pd from collections import OrderedDict from ml4chem.metrics import compute_rmse from ml4chem.atomistic.models.base import DeepLearningModel, DeepLearningTrainer from ml4chem.atomistic.models.loss import AtomicMSELoss from ml4chem.optim.handler import get_optimizer, get_lr_scheduler, get_lr from ml4chem.utils import convert_elapsed_time, get_chunks, get_number_of_parameters from pprint import pformat # Setting precision and starting logger object torch.set_printoptions(precision=10) logger = logging.getLogger() class NeuralNetwork(DeepLearningModel, torch.nn.Module): """Atom-centered Neural Network Regression with Pytorch This model is based on Ref. 1 by Behler and Parrinello. Parameters ---------- hiddenlayers : tuple Structure of hidden layers in the neural network. activation : str Activation functions. Supported "tanh", "relu", or "celu". References ---------- 1. Behler, J. & Parrinello, M. Generalized Neural-Network Representation of High-Dimensional Potential-Energy Surfaces. Phys. Rev. Lett. 98, 146401 (2007). 2. Khorshidi, A. & Peterson, A. A. Amp : A modular approach to machine learning in atomistic simulations. Comput. Phys. Commun. 207, 310–324 (2016). """ NAME = "PytorchPotentials" @classmethod def name(cls): """Returns name of class""" return cls.NAME def __init__(self, hiddenlayers=(3, 3), activation="relu", **kwargs): super(DeepLearningModel, self).__init__() self.hiddenlayers = hiddenlayers self.activation = activation def prepare_model(self, input_dimension, data=None, purpose="training"): """Prepare the model Parameters ---------- input_dimension : int Input's dimension. data : object Data object created from the handler. purpose : str Purpose of this model: 'training', 'inference'. """ self.input_dimension = input_dimension activation = { "tanh": torch.nn.Tanh, "relu": torch.nn.ReLU, "celu": torch.nn.CELU, } hl = len(self.hiddenlayers) if purpose == "training": logger.info(" ") logger.info("Model") logger.info("=====") now = datetime.datetime.now() logger.info( "Module accessed on {}.".format(now.strftime("%Y-%m-%d %H:%M:%S")) ) logger.info("Model name: {}.".format(self.name())) logger.info("Number of hidden-layers: {}".format(hl)) logger.info( "Structure of Neural Net: {}".format( "(input, " + str(self.hiddenlayers)[1:-1] + ", output)" ) ) layers = range(len(self.hiddenlayers) + 1) try: unique_element_symbols = data.unique_element_symbols[purpose] except TypeError: unique_element_symbols = data.get_unique_element_symbols(purpose=purpose) unique_element_symbols = unique_element_symbols[purpose] symbol_model_pair = [] for symbol in unique_element_symbols: linears = [] intercept_name = "intercept_" + symbol slope_name = "slope_" + symbol if purpose == "training": intercept = (data.max_energy + data.min_energy) / 2.0 intercept = torch.nn.Parameter( torch.tensor(intercept, requires_grad=True) ) slope = (data.max_energy - data.min_energy) / 2.0 slope = torch.nn.Parameter(torch.tensor(slope, requires_grad=True)) self.register_parameter(intercept_name, intercept) self.register_parameter(slope_name, slope) elif purpose == "inference": intercept = torch.nn.Parameter(torch.tensor(0.0)) slope = torch.nn.Parameter(torch.tensor(0.0)) self.register_parameter(intercept_name, intercept) self.register_parameter(slope_name, slope) for index in layers: # This is the input layer if index == 0: out_dimension = self.hiddenlayers[0] _linear = torch.nn.Linear(input_dimension, out_dimension) linears.append(_linear) linears.append(activation[self.activation]()) # This is the output layer elif index == len(self.hiddenlayers): inp_dimension = self.hiddenlayers[index - 1] out_dimension = 1 _linear = torch.nn.Linear(inp_dimension, out_dimension) linears.append(_linear) # These are hidden-layers else: inp_dimension = self.hiddenlayers[index - 1] out_dimension = self.hiddenlayers[index] _linear = torch.nn.Linear(inp_dimension, out_dimension) linears.append(_linear) linears.append(activation[self.activation]()) # Stacking up the layers. linears = torch.nn.Sequential(*linears) symbol_model_pair.append([symbol, linears]) self.linears = torch.nn.ModuleDict(symbol_model_pair) if purpose == "training": total_params, train_params = get_number_of_parameters(self) logger.info("Total number of parameters: {}.".format(total_params)) logger.info("Number of training parameters: {}.".format(train_params)) logger.info(" ") logger.info(self.linears) # Iterate over all modules and just intialize those that are # a linear layer. logger.warning( "Initialization of weights with Xavier Uniform by " "default." ) for m in self.modules(): if isinstance(m, torch.nn.Linear): # nn.init.normal_(m.weight) # , mean=0, std=0.01) torch.nn.init.xavier_uniform_(m.weight) def forward(self, X): """Forward propagation This is forward propagation and it returns the atomic energy. Parameters ---------- X : list List of inputs in the feature space. Returns ------- outputs : tensor A list of tensors with energies per image. """ outputs = [] for hash in X: image = X[hash] atomic_energies = [] for symbol, x in image: # FIXME this conditional can be removed after de/serialization # is fixed. if isinstance(symbol, bytes): symbol = symbol.decode("utf-8") x = self.linears[symbol](x) intercept_name = "intercept_" + symbol slope_name = "slope_" + symbol slope = getattr(self, slope_name) intercept = getattr(self, intercept_name) x = (slope * x) + intercept atomic_energies.append(x) atomic_energies = torch.cat(atomic_energies) image_energy = torch.sum(atomic_energies) outputs.append(image_energy) outputs = torch.stack(outputs) return outputs def get_activations(self, images, model=None, numpy=True): """Get activations of each hidden-layer This function allows to extract activations of each hidden-layer of the neural network. Parameters ---------- image : dict Image with structure hash, features. model : object A ML4Chem model object. numpy : bool Whether we want numpy arrays or tensors. Returns ------- activations : DataFrame A DataFrame with activations for each layer. """ activations = [] columns = ["hash", "atom.index", "atom.symbol"] if model is None: model = self model.eval() for hash, data in images.items(): for index, (symbol, features) in enumerate(data): counter = 0 layer_counter = 0 for l, layer in enumerate(model.linears[symbol].modules()): if isinstance(layer, torch.nn.Linear) and counter == 0: x = layer(features) if numpy: data_ = [hash, index, symbol, x.detach_().numpy()] else: data_ = [hash, index, symbol, x.detach_()] layer_column_name = f"layer{layer_counter}" if layer_column_name not in columns: columns.append(layer_column_name) counter += 1 layer_counter += 1 elif isinstance(layer, torch.nn.Linear) and counter > 0: x = layer(x) if numpy: data_.append(x.detach_().numpy()) else: data_.append(x.detach_()) layer_column_name = f"layer{layer_counter}" if layer_column_name not in columns: columns.append(layer_column_name) counter += 1 layer_counter += 1 activations.append(data_) del data_ # Create DataFrame from lists df = pd.DataFrame(activations, columns=columns) return df class train(DeepLearningTrainer): """Train the model Parameters ---------- inputs : dict Dictionary with hashed feature space. targets : list The expected values that the model has to learn aka y. model : object The NeuralNetwork class. data : object Data object created from the handler. optimizer : tuple The optimizer is a tuple with the structure: >>> ('adam', {'lr': float, 'weight_decay'=float}) epochs : int Number of full training cycles. regularization : float This is the L2 regularization. It is not the same as weight decay. convergence : dict Instead of using epochs, users can set a convergence criterion. Supported keys are "training" and "test". lossfxn : obj A loss function object. device : str Calculation can be run in the cpu or cuda (gpu). batch_size : int Number of data points per batch to use for training. Default is None. lr_scheduler : tuple Tuple with structure: scheduler's name and a dictionary with keyword arguments. >>> lr_scheduler = ('ReduceLROnPlateau', {'mode': 'min', 'patience': 10}) uncertainty : list A list of uncertainties that are used to penalize during the loss function evaluation. checkpoint : dict Set checkpoints. Dictionary with following structure: >>> checkpoint = {"label": label, "checkpoint": 100, "path": ""} `label` refers to the name used to save the checkpoint, `checkpoint` is a integer or -1 for saving all epochs, and the path is where the checkpoint is stored. Default is None and no checkpoint is saved. test : dict A dictionary used to compute the error over a validation/test set during training procedures. >>> test = {"features": test_space, "targets": test_targets, "data": data_test} The keys,values of the dictionary are: - "data": a `Data` object. - "targets": test set targets. - "features": a feature space obtained using `features.calculate()`. """ def __init__( self, inputs, targets, model=None, data=None, optimizer=(None, None), regularization=None, epochs=100, convergence=None, lossfxn=None, device="cpu", batch_size=None, lr_scheduler=None, uncertainty=None, checkpoint=None, test=None, ): self.initial_time = time.time() if lossfxn is None: lossfxn = AtomicMSELoss logger.info("") logger.info("Training") logger.info("========") logger.info(f"Convergence criteria: {convergence}") logger.info(f"Loss function: {lossfxn.__name__}") if uncertainty is not None: logger.info("Options:") logger.info(f" - Uncertainty penalization: {pformat(uncertainty)}") logger.info("") atoms_per_image = data.atoms_per_image if batch_size is None: batch_size = len(inputs.values()) if isinstance(batch_size, int): # Data batches chunks = list(get_chunks(inputs, batch_size, svm=False)) targets = list(get_chunks(targets, batch_size, svm=False)) atoms_per_image = list(get_chunks(atoms_per_image, batch_size, svm=False)) if uncertainty != None: uncertainty = list(get_chunks(uncertainty, batch_size, svm=False)) uncertainty = [ torch.tensor(u, requires_grad=False, dtype=torch.float) for u in uncertainty ] logger.info("") logging.info("Batch Information") logging.info("-----------------") logging.info("Number of batches: {}.".format(len(chunks))) logging.info("Batch size: {} elements per batch.".format(batch_size)) logger.info(" ") atoms_per_image = [ torch.tensor(n_atoms, requires_grad=False, dtype=torch.float) for n_atoms in atoms_per_image ] targets = [torch.tensor(t, requires_grad=False) for t in targets] if device == "cuda": logger.info("Moving data to CUDA...") atoms_per_image = atoms_per_image.cuda() targets = targets.cuda() _inputs = OrderedDict() for hash, f in inputs.items(): _inputs[hash] = [] for features in f: symbol, vector = features _inputs[hash].append((symbol, vector.cuda())) inputs = _inputs move_time = time.time() - self.initial_time h, m, s = convert_elapsed_time(move_time) logger.info( "Data moved to GPU in {} hours {} minutes {:.2f} \ seconds.".format( h, m, s ) ) logger.info(" ") # Define optimizer self.optimizer_name, self.optimizer = get_optimizer( optimizer, model.parameters() ) if lr_scheduler is not None: self.scheduler = get_lr_scheduler(self.optimizer, lr_scheduler) self.atoms_per_image = atoms_per_image self.convergence = convergence self.device = device self.epochs = epochs self.model = model self.lr_scheduler = lr_scheduler self.lossfxn = lossfxn self.checkpoint = checkpoint self.test = test # Data scattering client = dask.distributed.get_client() self.chunks = [client.scatter(chunk) for chunk in chunks] self.targets = [client.scatter(target) for target in targets] if uncertainty != None: self.uncertainty = [client.scatter(u) for u in uncertainty] else: self.uncertainty = uncertainty # Let the hunger games begin... self.trainer() def trainer(self): """Run the training class""" logger.info(" ") logger.info("Starting training...\n") if self.test is None: logger.info( "{:6s} {:19s} {:12s} {:12s} {:8s}".format( "Epoch", "Time Stamp", "Loss", "Error/img", "Error/atom" ) ) logger.info( "{:6s} {:19s} {:12s} {:8s} {:8s}".format( "------", "-------------------", "------------", "------------", "------------", ) ) else: test_features = self.test.get("features", None) test_targets = self.test.get("targets", None) test_data = self.test.get("data", None) logger.info( "{:6s} {:19s} {:12s} {:12s} {:12s} {:12s} {:16s}".format( "Epoch", "Time Stamp", "Loss", "Error/img", "Error/atom", "Error/img (t)", "Error/atom (t)", ) ) logger.info( "{:6s} {:19s} {:12s} {:8s} {:8s} {:8s} {:8s}".format( "------", "-------------------", "------------", "------------", "------------", "------------", "------------", ) ) converged = False _loss = [] _rmse = [] epoch = 0 client = dask.distributed.get_client() while not converged: epoch += 1 self.optimizer.zero_grad() # clear previous gradients loss, outputs_ = train.closure( self.chunks, self.targets, self.uncertainty, self.model, self.lossfxn, self.atoms_per_image, self.device, ) # We step the optimizer if self.optimizer_name != "LBFGS": self.optimizer.step() else: options = {"closure": self.closure, "current_loss": loss, "max_ls": 10} self.optimizer.step(options) # RMSE per image and per/atom rmse = client.submit(compute_rmse, *(outputs_, self.targets)) atoms_per_image = torch.cat(self.atoms_per_image) rmse_atom = client.submit( compute_rmse, *(outputs_, self.targets, atoms_per_image) ) rmse = rmse.result() rmse_atom = rmse_atom.result() _loss.append(loss.item()) _rmse.append(rmse) # In the case that lr_scheduler is not None if self.lr_scheduler is not None: self.scheduler.step(loss) print("Epoch {} lr {}".format(epoch, get_lr(self.optimizer))) ts = time.time() ts = datetime.datetime.fromtimestamp(ts).strftime("%Y-%m-%d " "%H:%M:%S") if self.test is None: logger.info( "{:6d} {} {:8e} {:4e} {:4e}".format( epoch, ts, loss.detach(), rmse, rmse_atom ) ) else: test_model = self.model.eval() test_predictions = test_model(test_features).detach() rmse_test = client.submit( compute_rmse, *(test_predictions, test_targets) ) atoms_per_image_test = torch.tensor( test_data.atoms_per_image, requires_grad=False ) rmse_atom_test = client.submit( compute_rmse, *(test_predictions, test_targets, atoms_per_image_test), ) rmse_test = rmse_test.result() rmse_atom_test = rmse_atom_test.result() logger.info( "{:6d} {} {:8e} {:4e} {:4e} {:4e} {:4e}".format( epoch, ts, loss.detach(), rmse, rmse_atom, rmse_test, rmse_atom_test, ) ) if self.checkpoint is not None: self.checkpoint_save(epoch, self.model, **self.checkpoint) if self.convergence is None and epoch == self.epochs: converged = True elif self.convergence is not None and rmse < self.convergence["energy"]: converged = True training_time = time.time() - self.initial_time h, m, s = convert_elapsed_time(training_time) logger.info( "Training finished in {} hours {} minutes {:.2f} seconds.".format(h, m, s) ) @classmethod def closure( Cls, chunks, targets, uncertainty, model, lossfxn, atoms_per_image, device ): """Closure This class method clears previous gradients, iterates over batches, accumulates the gradients, reduces the gradients, update model params, and finally returns loss and outputs_. Parameters ---------- Cls : object Class object. chunks : tensor or list Tensor with input data points in batch with index. targets : tensor or list The targets. uncertainty : list A list of uncertainties that are used to penalize during the loss function evaluation. model : obj Pytorch model to perform forward() and get gradients. lossfxn : obj A loss function object. atoms_per_image : list Atoms per image because we are doing atom-centered methods. device : str Are we running cuda or cpu? """ outputs_ = [] # Get client to send futures to the scheduler client = dask.distributed.get_client() running_loss = torch.tensor(0, dtype=torch.float) accumulation = [] grads = [] # Accumulation of gradients for index, chunk in enumerate(chunks): accumulation.append( client.submit( train.train_batches, *( index, chunk, targets, uncertainty, model, lossfxn, atoms_per_image, device, ), ) ) dask.distributed.wait(accumulation) accumulation = client.gather(accumulation) for outputs, loss, grad in accumulation: grad = np.array(grad, dtype=object) running_loss += loss outputs_.append(outputs) grads.append(grad) grads = sum(grads) for index, param in enumerate(model.parameters()): param.grad = torch.tensor(grads[index], dtype=torch.float) del accumulation del grads return running_loss, outputs_ @classmethod def train_batches( Cls, index, chunk, targets, uncertainty, model, lossfxn, atoms_per_image, device ): """A function that allows training per batches Parameters ---------- index : int Index of batch. chunk : tensor or list Tensor with input data points in batch with index. targets : tensor or list The targets. model : obj Pytorch model to perform forward() and get gradients. uncertainty : list A list of uncertainties that are used to penalize during the loss function evaluation. lossfxn : obj A loss function object. atoms_per_image : list Atoms per image because we are doing atom-centered methods. device : str Are we running cuda or cpu? Returns ------- loss : tensor The loss function of the batch. """ inputs = OrderedDict(chunk) outputs = model(inputs) if uncertainty == None: loss = lossfxn(outputs, targets[index], atoms_per_image[index]) else: loss = lossfxn( outputs, targets[index], atoms_per_image[index], uncertainty[index] ) loss.backward() gradients = [] for param in model.parameters(): try: gradient = param.grad.detach().numpy() except AttributeError: # This exception catches the case where an image does not # contain variable that is following the gradient of certain # atom. For example, suppose two batches with 2 molecules each. # In the first batch we have only C, H, O but it turns out that # N is also available only in the second batch. The # contribution of the total gradient from the first batch for N is 0. gradient = 0.0 gradients.append(gradient) return outputs, loss, gradients
33.742706
88
0.526924
24,815
0.975279
0
0
4,504
0.177016
0
0
8,413
0.330648
b9e018d6290ebe7b0654b7e76a8df225914e3778
7,104
py
Python
hatsploit/core/db/db.py
EntySec/HatSploit
8e445804c252cc24e87888be2c2efc02750ce5ee
[ "MIT" ]
139
2021-02-17T15:52:30.000Z
2022-03-30T14:50:42.000Z
hatsploit/core/db/db.py
YurinDoctrine/HatSploit
b1550323e08336ec057cbafb77003c22a3bbee91
[ "MIT" ]
27
2021-03-24T17:14:30.000Z
2022-03-02T18:50:43.000Z
hatsploit/core/db/db.py
YurinDoctrine/HatSploit
b1550323e08336ec057cbafb77003c22a3bbee91
[ "MIT" ]
85
2021-02-17T15:39:03.000Z
2022-03-07T09:08:58.000Z
#!/usr/bin/env python3 # # MIT License # # Copyright (c) 2020-2022 EntySec # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # import json import os from hatsploit.core.cli.badges import Badges from hatsploit.lib.config import Config from hatsploit.lib.storage import LocalStorage class DB: badges = Badges() config = Config() local_storage = LocalStorage() def disconnect_payload_database(self, name): if self.local_storage.get("connected_payload_databases"): if name in self.local_storage.get("connected_payload_databases"): self.local_storage.delete_element("connected_payload_databases", name) self.local_storage.delete_element("payloads", name) return self.badges.print_error("No such payload database connected!") def disconnect_module_database(self, name): if self.local_storage.get("connected_module_databases"): if name in self.local_storage.get("connected_module_databases"): self.local_storage.delete_element("connected_module_databases", name) self.local_storage.delete_element("modules", name) return self.badges.print_error("No such module database connected!") def disconnect_plugin_database(self, name): if self.local_storage.get("connected_plugin_databases"): if name in self.local_storage.get("connected_plugin_databases"): self.local_storage.delete_element("connected_plugin_databases", name) self.local_storage.delete_element("plugins", name) return self.badges.print_error("No such plugin database connected!") def connect_payload_database(self, name, path): if self.local_storage.get("connected_payload_databases"): if name in self.local_storage.get("connected_payload_databases"): self.badges.print_error("Payload database already connected!") return if not os.path.exists(path) or not str.endswith(path, "json"): self.badges.print_error("Not a payload database!") return try: database = json.load(open(path)) except Exception: self.badges.print_error("Failed to connect payload database!") return if '__database__' not in database: self.badges.print_error("No __database__ section found!") return if database['__database__']['type'] != "payloads": self.badges.print_error("Not a payload database!") return del database['__database__'] payloads = { name: database } data = { name: { 'path': path } } if not self.local_storage.get("connected_payload_databases"): self.local_storage.set("connected_payload_databases", {}) self.local_storage.update("connected_payload_databases", data) if self.local_storage.get("payloads"): self.local_storage.update("payloads", payloads) else: self.local_storage.set("payloads", payloads) def connect_module_database(self, name, path): if self.local_storage.get("connected_module_databases"): if name in self.local_storage.get("connected_module_databases"): self.badges.print_error("Module database already connected!") return if not os.path.exists(path) or not str.endswith(path, "json"): self.badges.print_error("Not a module database!") return try: database = json.load(open(path)) except Exception: self.badges.print_error("Failed to connect module database!") return if '__database__' not in database: self.badges.print_error("No __database__ section found!") return if database['__database__']['type'] != "modules": self.badges.print_error("Not a module database!") return del database['__database__'] modules = { name: database } data = { name: { 'path': path } } if not self.local_storage.get("connected_module_databases"): self.local_storage.set("connected_module_databases", {}) self.local_storage.update("connected_module_databases", data) if self.local_storage.get("modules"): self.local_storage.update("modules", modules) else: self.local_storage.set("modules", modules) def connect_plugin_database(self, name, path): if self.local_storage.get("connected_plugin_databases"): if name in self.local_storage.get("connected_plugin_databases"): self.badges.print_error("Plugin database already connected!") return if not os.path.exists(path) or not str.endswith(path, "json"): self.badges.print_error("Not a database!") return try: database = json.load(open(path)) except Exception: self.badges.print_error("Failed to connect plugin database!") return if '__database__' not in database: self.badges.print_error("No __database__ section found!") return if database['__database__']['type'] != "plugins": self.badges.print_error("Not a plugin database!") return del database['__database__'] plugins = { name: database } data = { name: { 'path': path } } if not self.local_storage.get("connected_plugin_databases"): self.local_storage.set("connected_plugin_databases", {}) self.local_storage.update("connected_plugin_databases", data) if self.local_storage.get("plugins"): self.local_storage.update("plugins", plugins) else: self.local_storage.set("plugins", plugins)
38.193548
86
0.639077
5,810
0.817849
0
0
0
0
0
0
2,672
0.376126
b9e0543df8f2ae150950f2a9787edb6296aac618
2,482
py
Python
bluesky/tests/test_simulators.py
NSLS-II/bluesky
b7d666e65cf4ef556fb46b744c33264c8e3f7507
[ "BSD-3-Clause" ]
43
2015-08-04T20:13:41.000Z
2019-04-12T17:21:36.000Z
bluesky/tests/test_simulators.py
NSLS-II/bluesky
b7d666e65cf4ef556fb46b744c33264c8e3f7507
[ "BSD-3-Clause" ]
966
2015-07-29T16:43:21.000Z
2019-05-09T21:02:28.000Z
bluesky/tests/test_simulators.py
NSLS-II/bluesky
b7d666e65cf4ef556fb46b744c33264c8e3f7507
[ "BSD-3-Clause" ]
40
2015-07-29T16:42:41.000Z
2019-02-07T02:30:34.000Z
from bluesky.plans import scan from bluesky.simulators import (print_summary, print_summary_wrapper, summarize_plan, check_limits, plot_raster_path) import pytest from bluesky.plans import grid_scan def test_print_summary(hw): det = hw.det motor = hw.motor print_summary(scan([det], motor, -1, 1, 10)) # old name summarize_plan(scan([det], motor, -1, 1, 10)) # new name list(print_summary_wrapper(scan([det], motor, -1, 1, 10))) def test_old_module_name(hw): det = hw.det motor = hw.motor motor1 = hw.motor1 motor2 = hw.motor2 from bluesky.plan_tools import (print_summary, print_summary_wrapper, plot_raster_path) with pytest.warns(UserWarning): print_summary(scan([det], motor, -1, 1, 10)) with pytest.warns(UserWarning): list(print_summary_wrapper(scan([det], motor, -1, 1, 10))) with pytest.warns(UserWarning): plan = grid_scan([det], motor1, -5, 5, 10, motor2, -7, 7, 15, True) plot_raster_path(plan, 'motor1', 'motor2', probe_size=.3) def test_check_limits(RE, hw): det = hw.det motor = hw.motor # The motor object does not currently implement limits. # Use an assert to help us out if this changes in the future. assert not hasattr(motor, 'limits') # # check_limits should warn if it can't find check_value # TODO: Is there _any_ object to test? # with pytest.warns(UserWarning): # check_limits(scan([det], motor, -1, 1, 3)) # monkey-patch some limits motor.limits = (-2, 2) # check_limits should do nothing here check_limits(scan([det], motor, -1, 1, 3)) # check_limits should error if limits are exceeded only if object raises # this object does not raise check_limits(scan([det], motor, -3, 3, 3)) # check_limits should raise if limits are equal only if object raises # this object does not raise motor.limits = (2, 2) check_limits(scan([det], motor, -1, 1, 3)) def test_check_limits_needs_RE(): with pytest.raises(RuntimeError) as ctx: check_limits([]) assert str(ctx.value) == "Bluesky event loop not running" def test_plot_raster_path(hw): det = hw.det motor1 = hw.motor1 motor2 = hw.motor2 plan = grid_scan([det], motor1, -5, 5, 10, motor2, -7, 7, 15, True) plot_raster_path(plan, 'motor1', 'motor2', probe_size=.3)
34
76
0.636583
0
0
0
0
0
0
0
0
644
0.259468
b9e09def642ce98a753ac3053c44b1ba7d862f16
4,850
py
Python
shutTheBox/main.py
robi1467/shut-the-box
ed1a8f13bc74caa63361453e723768a9cbe1dac4
[ "MIT" ]
null
null
null
shutTheBox/main.py
robi1467/shut-the-box
ed1a8f13bc74caa63361453e723768a9cbe1dac4
[ "MIT" ]
null
null
null
shutTheBox/main.py
robi1467/shut-the-box
ed1a8f13bc74caa63361453e723768a9cbe1dac4
[ "MIT" ]
null
null
null
import random numbers_list = [1,2,3,4,5,6,7,8,9,10] game_won = False game_completed = False #Stats games_played = 0 games_won = 0 games_lost = 0 average_score = 0 total_score = 0 def welcome(): welcome_message = "Welcome to shut the box" print(welcome_message) i = 0 result = "" while i < len(numbers_list): if i < len(numbers_list)-1: result += str(numbers_list[i]) + " " else: result += str(numbers_list[i]) i+=1 print(result) def dice_roll(amount): total = 0 i = 0 while i < amount: total += random.randint(1, 6) i+=1 return total def choose_dice_amount(): amount = 0 while True: try: amount = int(input("You choose to roll one or two dice. Please enter either '1' or '2': ")) except ValueError: print("INVALID ENTRY PLEASE TRY AGAIN") continue if amount == 1 or amount == 2: return amount else: print("INVALID ENTRY PLEASE TRY AGAIN!") continue return amount def choose_number_to_drop(target_amount): entered = 0 goal = target_amount entered_numbers = list() while goal != 0: try: print("Available numbers: " + str(numbers_list) + " to get to " + str(target_amount)) entered = int(input("Please enter a number that is available: ")) except ValueError: print("Invalid Entry, please try again") continue if entered not in numbers_list or entered in entered_numbers: print("Invalid Entry, please try again") continue else: goal -= entered entered_numbers.append(entered) if goal < 0: goal = target_amount entered_numbers = list() i = 0 while i < len(entered_numbers): numbers_list.remove(entered_numbers[i]) i += 1 def check_lost_game(rolled): value = True if rolled not in numbers_list: i = 0 while i < len(numbers_list): j = i+1 while j< len(numbers_list): if numbers_list[i] + numbers_list[j] == rolled: return False k = j+1 while k < len(numbers_list): if numbers_list[i] + numbers_list[j] + numbers_list[k] == rolled: return False l = k+1 while l < len(numbers_list): if numbers_list[i] + numbers_list[j] + numbers_list[k] + numbers_list[l] == rolled: return False l+=1 k+=1 j+=1 i +=1 else: value = False return value def end_game(): game_completed = True return game_completed def win_game(): game_won = True return game_won def score_game(): score = 0 i = 0 while i < len(numbers_list): score += numbers_list[i] i+=1 return score def all_less_than_7(): less_than_7 = True i = 0 while i < len(numbers_list): if numbers_list[i] > 6: less_than_7 = False i += 1 return less_than_7 def keep_playing_input(): while True: try: continue_playing = (input("Do you wish to keep playing? y or n: ")) except ValueError: print("Invalid choice; please try again") continue if continue_playing.lower == "y": return True else: return False keep_playing = True while keep_playing: numbers_list = [1,2,3,4,5,6,7,8,9,10] welcome() roll_total = 0 while roll_total < 55: dice_amount = 2 if all_less_than_7(): dice_amount = choose_dice_amount() dice_total = dice_roll(dice_amount) print("Your roll is: " + str(dice_total)) if check_lost_game(dice_total): print("It is impossible to continue the game with this roll") break choose_number_to_drop(dice_total) roll_total += dice_total if roll_total == 55: game_won = win_game() if game_won: print("Congrats you won!!!!") games_played +=1 games_won +=1 else: print("You lose, your score is " + str(score_game())) print("Numbers remaining: " + str(numbers_list)) games_played += 1 games_lost += 1 total_score += score_game() average_score = total_score/games_played game_won = False print("STATS:\n Games Played: " + str(games_played) + "\nGames Won: " + str(games_won) + "\nGames Lost: " + str(games_lost) + "\nAverage Score: " + str(average_score) + "\nTotal Score: " + str(total_score)) keep_playing_input()
28.034682
127
0.549897
0
0
0
0
0
0
0
0
621
0.128041
b9e0c71df07f6cc03e495d11899558d7e577552a
3,803
py
Python
repos/system_upgrade/common/actors/systemfacts/tests/test_systemfacts_selinux.py
sm00th/leapp-repository
1c171ec3a5f9260a3c6f84a9b15cad78a875ac61
[ "Apache-2.0" ]
21
2018-11-20T15:58:39.000Z
2022-03-15T19:57:24.000Z
repos/system_upgrade/common/actors/systemfacts/tests/test_systemfacts_selinux.py
sm00th/leapp-repository
1c171ec3a5f9260a3c6f84a9b15cad78a875ac61
[ "Apache-2.0" ]
732
2018-11-21T18:33:26.000Z
2022-03-31T16:16:24.000Z
repos/system_upgrade/common/actors/systemfacts/tests/test_systemfacts_selinux.py
sm00th/leapp-repository
1c171ec3a5f9260a3c6f84a9b15cad78a875ac61
[ "Apache-2.0" ]
85
2018-11-20T17:55:00.000Z
2022-03-29T09:40:31.000Z
import warnings import pytest from leapp.libraries.actor.systemfacts import get_selinux_status from leapp.models import SELinuxFacts no_selinux = False try: import selinux except ImportError: no_selinux = True warnings.warn( 'Tests which uses `selinux` will be skipped' ' due to library unavailability.', ImportWarning) reason_to_skip_msg = "Selinux is not available" # FIXME: create valid tests... @pytest.mark.skipif(no_selinux, reason=reason_to_skip_msg) def test_selinux_enabled_enforcing(monkeypatch): """ Test case SELinux is enabled in enforcing mode """ monkeypatch.setattr(selinux, 'is_selinux_mls_enabled', lambda: 1) monkeypatch.setattr(selinux, 'security_getenforce', lambda: 1) monkeypatch.setattr(selinux, 'selinux_getenforcemode', lambda: [0, 1]) monkeypatch.setattr(selinux, 'is_selinux_enabled', lambda: 1) monkeypatch.setattr(selinux, 'selinux_getpolicytype', lambda: [0, 'targeted']) expected_data = {'policy': 'targeted', 'mls_enabled': True, 'enabled': True, 'runtime_mode': 'enforcing', 'static_mode': 'enforcing'} assert SELinuxFacts(**expected_data) == get_selinux_status() @pytest.mark.skipif(no_selinux, reason=reason_to_skip_msg) def test_selinux_enabled_permissive(monkeypatch): """ Test case SELinux is enabled in permissive mode """ monkeypatch.setattr(selinux, 'is_selinux_mls_enabled', lambda: 1) monkeypatch.setattr(selinux, 'security_getenforce', lambda: 0) monkeypatch.setattr(selinux, 'selinux_getenforcemode', lambda: [0, 0]) monkeypatch.setattr(selinux, 'is_selinux_enabled', lambda: 1) monkeypatch.setattr(selinux, 'selinux_getpolicytype', lambda: [0, 'targeted']) expected_data = {'policy': 'targeted', 'mls_enabled': True, 'enabled': True, 'runtime_mode': 'permissive', 'static_mode': 'permissive'} assert SELinuxFacts(**expected_data) == get_selinux_status() @pytest.mark.skipif(no_selinux, reason=reason_to_skip_msg) def test_selinux_disabled(monkeypatch): """ Test case SELinux is disabled """ monkeypatch.setattr(selinux, 'is_selinux_mls_enabled', lambda: 0) monkeypatch.setattr(selinux, 'security_getenforce', lambda: 0) monkeypatch.setattr(selinux, 'selinux_getenforcemode', lambda: [0, 0]) monkeypatch.setattr(selinux, 'is_selinux_enabled', lambda: 0) monkeypatch.setattr(selinux, 'selinux_getpolicytype', lambda: [0, 'targeted']) expected_data = {'policy': 'targeted', 'mls_enabled': False, 'enabled': False, 'runtime_mode': 'permissive', 'static_mode': 'permissive'} assert SELinuxFacts(**expected_data) == get_selinux_status() class MockNoConfigFileOSError(object): def __init__(self): raise OSError @pytest.mark.skipif(no_selinux, reason=reason_to_skip_msg) def test_selinux_disabled_no_config_file(monkeypatch): """ Test case SELinux is disabled """ monkeypatch.setattr(selinux, 'is_selinux_mls_enabled', lambda: 0) monkeypatch.setattr(selinux, 'security_getenforce', lambda: 0) monkeypatch.setattr(selinux, 'selinux_getenforcemode', MockNoConfigFileOSError) monkeypatch.setattr(selinux, 'is_selinux_enabled', lambda: 0) monkeypatch.setattr(selinux, 'selinux_getpolicytype', lambda: [0, 'targeted']) expected_data = {'policy': 'targeted', 'mls_enabled': False, 'enabled': False, 'runtime_mode': 'permissive', 'static_mode': 'disabled'} assert SELinuxFacts(**expected_data) == get_selinux_status()
38.414141
83
0.674993
84
0.022088
0
0
3,271
0.86011
0
0
1,196
0.314489
b9e1517b77ef8c0c8643211eb516389a83db60f8
2,608
py
Python
Phase-1/Python Basic 2/Day-24.py
emetowinner/python-challenges
520da69da0f2632deb1e81136d2b62d40555a4aa
[ "MIT" ]
3
2020-05-21T20:19:40.000Z
2022-02-27T08:20:10.000Z
Phase-1/Python Basic 2/Day-24.py
emetowinner/python-challenges
520da69da0f2632deb1e81136d2b62d40555a4aa
[ "MIT" ]
null
null
null
Phase-1/Python Basic 2/Day-24.py
emetowinner/python-challenges
520da69da0f2632deb1e81136d2b62d40555a4aa
[ "MIT" ]
4
2020-05-12T16:41:52.000Z
2020-05-21T20:17:22.000Z
""" 1. Write a Python program to reverse only the vowels of a given string. Sample Output: w3resuorce Python Perl ASU 2. Write a Python program to check whether a given integer is a palindrome or not. Note: An integer is a palindrome when it reads the same backward as forward. Negative numbers are not palindromic. Sample Output: False True False 3. Write a Python program to remove the duplicate elements of a given array of numbers such that each element appear only once and return the new length of the given array. Sample Output: 5 4 4. Write a Python program to calculate the maximum profit from selling and buying values of stock. An array of numbers represent the stock prices in chronological order. For example, given [8, 10, 7, 5, 7, 15], the function will return 10, since the buying value of the stock is 5 dollars and sell value is 15 dollars. Sample Output: 10 7 0 5. Write a Python program to remove all instances of a given value from a given array of integers and find the length of the new array. For example, given [8, 10, 7, 5, 7, 15], the function will return 10, since the buying value of the stock is 5 dollars and sell value is 15 dollars. Sample Output: 6 0 5 0 6. Write a Python program to find the starting and ending position of a given value in a given array of integers, sorted in ascending order. If the target is not found in the array, return [0, 0]. Input: [5, 7, 7, 8, 8, 8] target value = 8 Output: [0, 5] Input: [1, 3, 6, 9, 13, 14] target value = 4 Output: [0, 0] Sample Output: [0, 5] [0, 0] 7. The price of a given stock on each day is stored in an array. Write a Python program to find the maximum profit in one transaction i.e., buy one and sell one share of the stock from the given price value of the said array. You cannot sell a stock before you buy one. Input (Stock price of each day): [224, 236, 247, 258, 259, 225] Output: 35 Explanation: 236 - 224 = 12 247 - 224 = 23 258 - 224 = 34 259 - 224 = 35 225 - 224 = 1 247 - 236 = 11 258 - 236 = 22 259 - 236 = 23 225 - 236 = -11 258 - 247 = 11 259 - 247 = 12 225 - 247 = -22 259 - 258 = 1 225 - 258 = -33 225 - 259 = -34 8. Write a Python program to print a given N by M matrix of numbers line by line in forward > backwards > forward >... order. Input matrix: [[1, 2, 3,4], [5, 6, 7, 8], [0, 6, 2, 8], [2, 3, 0, 2]] Output: 1 2 3 4 8 7 6 5 0 6 2 8 2 0 3 2 9. Write a Python program to compute the largest product of three integers from a given list of integers. Sample Output: 4000 8 120 10. Write a Python program to find the first missing positive integer that does not exist in a given list. """
25.821782
204
0.717408
0
0
0
0
0
0
0
0
2,607
0.999617
b9e1d3ca3ecc29b35600c2af35a03fcf35a771c0
3,413
py
Python
etl/parsers/etw/Microsoft_Windows_IPxlatCfg.py
IMULMUL/etl-parser
76b7c046866ce0469cd129ee3f7bb3799b34e271
[ "Apache-2.0" ]
104
2020-03-04T14:31:31.000Z
2022-03-28T02:59:36.000Z
etl/parsers/etw/Microsoft_Windows_IPxlatCfg.py
IMULMUL/etl-parser
76b7c046866ce0469cd129ee3f7bb3799b34e271
[ "Apache-2.0" ]
7
2020-04-20T09:18:39.000Z
2022-03-19T17:06:19.000Z
etl/parsers/etw/Microsoft_Windows_IPxlatCfg.py
IMULMUL/etl-parser
76b7c046866ce0469cd129ee3f7bb3799b34e271
[ "Apache-2.0" ]
16
2020-03-05T18:55:59.000Z
2022-03-01T10:19:28.000Z
# -*- coding: utf-8 -*- """ Microsoft-Windows-IPxlatCfg GUID : 3e5ac668-af52-4c15-b99b-a3e7a6616ebd """ from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct from etl.utils import WString, CString, SystemTime, Guid from etl.dtyp import Sid from etl.parsers.etw.core import Etw, declare, guid @declare(guid=guid("3e5ac668-af52-4c15-b99b-a3e7a6616ebd"), event_id=1001, version=0) class Microsoft_Windows_IPxlatCfg_1001_0(Etw): pattern = Struct( "ErrorString" / CString, "ErrorCode" / Int32ul ) @declare(guid=guid("3e5ac668-af52-4c15-b99b-a3e7a6616ebd"), event_id=1002, version=0) class Microsoft_Windows_IPxlatCfg_1002_0(Etw): pattern = Struct( "ErrorString" / CString, "ErrorCode" / Int32ul, "InterfaceLuid" / Int64ul ) @declare(guid=guid("3e5ac668-af52-4c15-b99b-a3e7a6616ebd"), event_id=1003, version=0) class Microsoft_Windows_IPxlatCfg_1003_0(Etw): pattern = Struct( "InfoString" / CString ) @declare(guid=guid("3e5ac668-af52-4c15-b99b-a3e7a6616ebd"), event_id=1005, version=0) class Microsoft_Windows_IPxlatCfg_1005_0(Etw): pattern = Struct( "IPv4Address" / Int32ul, "IPv4Prefix" / Int32ul ) @declare(guid=guid("3e5ac668-af52-4c15-b99b-a3e7a6616ebd"), event_id=1006, version=0) class Microsoft_Windows_IPxlatCfg_1006_0(Etw): pattern = Struct( "InfoString" / CString, "InterfaceLuid" / Int64ul ) @declare(guid=guid("3e5ac668-af52-4c15-b99b-a3e7a6616ebd"), event_id=1007, version=0) class Microsoft_Windows_IPxlatCfg_1007_0(Etw): pattern = Struct( "InterfaceLuid" / Int64ul, "PrefixLength" / Int32ul ) @declare(guid=guid("3e5ac668-af52-4c15-b99b-a3e7a6616ebd"), event_id=1008, version=0) class Microsoft_Windows_IPxlatCfg_1008_0(Etw): pattern = Struct( "InterfaceLuid" / Int64ul, "IPv4Address" / Int32ul ) @declare(guid=guid("3e5ac668-af52-4c15-b99b-a3e7a6616ebd"), event_id=1009, version=0) class Microsoft_Windows_IPxlatCfg_1009_0(Etw): pattern = Struct( "InterfaceLuid" / Int64ul ) @declare(guid=guid("3e5ac668-af52-4c15-b99b-a3e7a6616ebd"), event_id=1010, version=0) class Microsoft_Windows_IPxlatCfg_1010_0(Etw): pattern = Struct( "InterfaceLuid" / Int64ul ) @declare(guid=guid("3e5ac668-af52-4c15-b99b-a3e7a6616ebd"), event_id=1011, version=0) class Microsoft_Windows_IPxlatCfg_1011_0(Etw): pattern = Struct( "InfoString" / CString, "MTU" / Int32ul ) @declare(guid=guid("3e5ac668-af52-4c15-b99b-a3e7a6616ebd"), event_id=1101, version=0) class Microsoft_Windows_IPxlatCfg_1101_0(Etw): pattern = Struct( "InterfaceLuid" / Int64ul, "Metric" / Int32ul, "RemotePrefixLength" / Int32ul, "LocalPrefixLength" / Int32ul ) @declare(guid=guid("3e5ac668-af52-4c15-b99b-a3e7a6616ebd"), event_id=1102, version=0) class Microsoft_Windows_IPxlatCfg_1102_0(Etw): pattern = Struct( "InterfaceLuid" / Int64ul, "Metric" / Int32ul, "RemotePrefixLength" / Int32ul, "LocalPrefixLength" / Int32ul ) @declare(guid=guid("3e5ac668-af52-4c15-b99b-a3e7a6616ebd"), event_id=1103, version=0) class Microsoft_Windows_IPxlatCfg_1103_0(Etw): pattern = Struct( "InterfaceLuid" / Int64ul, "PrefixLength" / Int32ul )
29.17094
123
0.699092
1,893
0.554644
0
0
3,011
0.882215
0
0
980
0.287137
b9e2c12e3855c30001fd37ab610587d3e95c803d
535
py
Python
microservices/users/config.py
Levakin/sanic-test-app
d96a54a21f6d0d3b262bbc7bc75f5fa3b12c3b61
[ "Apache-2.0" ]
null
null
null
microservices/users/config.py
Levakin/sanic-test-app
d96a54a21f6d0d3b262bbc7bc75f5fa3b12c3b61
[ "Apache-2.0" ]
null
null
null
microservices/users/config.py
Levakin/sanic-test-app
d96a54a21f6d0d3b262bbc7bc75f5fa3b12c3b61
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- import os from distutils.util import strtobool class Config: DEBUG = bool(strtobool(os.getenv('DEBUG', "False"))) DATABASE_URI = os.getenv('DATABASE_URI', '127.0.0.1:27017') WORKERS = int(os.getenv('WORKERS', 2)) LOGO = os.getenv('LOGO', None) HOST = os.getenv('HOST', '127.0.0.1') PORT = int(os.getenv('PORT', 8000)) SECRET = os.getenv('SECRET', 'secret') LOGIN_MIN_LENGTH = int(os.getenv('LOGIN_MIN_LENGTH', 1)) LOGIN_MAX_LENGTH = int(os.getenv('LOGIN_MAX_LENGTH', 32))
31.470588
63
0.646729
460
0.859813
0
0
0
0
0
0
158
0.295327
b9e36baa14d5265769af32c8ed910969e39eaf3a
199
py
Python
semantic-python/test/fixtures/4-01-lambda-literals.py
Temurson/semantic
2e9cd2c006cec9a0328791e47d8c6d60af6d5a1b
[ "MIT" ]
8,844
2019-05-31T15:47:12.000Z
2022-03-31T18:33:51.000Z
semantic-python/test/fixtures/4-01-lambda-literals.py
Qanora/semantic
b0eda9a61bbc690a342fb177cfc12eec8c1c001c
[ "MIT" ]
401
2019-05-31T18:30:26.000Z
2022-03-31T16:32:29.000Z
semantic-python/test/fixtures/4-01-lambda-literals.py
Qanora/semantic
b0eda9a61bbc690a342fb177cfc12eec8c1c001c
[ "MIT" ]
504
2019-05-31T17:55:03.000Z
2022-03-30T04:15:04.000Z
# CHECK-TREE: { const <- \x -> \y -> x; y <- const #true #true; z <- const #false #false; #record { const: const, y : y, z: z, }} const = lambda x, y: x y = const(True, True) z = const(False, False)
39.8
129
0.557789
0
0
0
0
0
0
0
0
129
0.648241
b9e379a95e3f4e855adb56ee1112dc1aa95e6a78
9,351
py
Python
main.py
mithi/semantic-segmentation
85e9df04397745e0c6ab252e30991fa9b514ec1a
[ "MIT" ]
33
2017-08-24T16:38:15.000Z
2022-03-17T15:55:52.000Z
main.py
mithi/semantic-segmentation
85e9df04397745e0c6ab252e30991fa9b514ec1a
[ "MIT" ]
3
2018-10-12T11:17:22.000Z
2019-05-30T09:49:11.000Z
main.py
mithi/semantic-segmentation
85e9df04397745e0c6ab252e30991fa9b514ec1a
[ "MIT" ]
26
2017-09-17T09:09:52.000Z
2020-01-14T02:48:56.000Z
import tensorflow as tf import os.path import warnings from distutils.version import LooseVersion import glob import helper import project_tests as tests #-------------------------- # USER-SPECIFIED DATA #-------------------------- # Tune these parameters NUMBER_OF_CLASSES = 2 IMAGE_SHAPE = (160, 576) EPOCHS = 20 BATCH_SIZE = 1 LEARNING_RATE = 0.0001 DROPOUT = 0.75 # Specify these directory paths DATA_DIRECTORY = './data' RUNS_DIRECTORY = './runs' TRAINING_DATA_DIRECTORY ='./data/data_road/training' NUMBER_OF_IMAGES = len(glob.glob('./data/data_road/training/calib/*.*')) VGG_PATH = './data/vgg' all_training_losses = [] # Used for plotting to visualize if our training is going well given parameters #-------------------------- # DEPENDENCY CHECK #-------------------------- # Check TensorFlow Version assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer. You are using {}'.format(tf.__version__) print('TensorFlow Version: {}'.format(tf.__version__)) # Check for a GPU if not tf.test.gpu_device_name(): warnings.warn('No GPU found. Please use a GPU to train your neural network.') else: print('Default GPU Device: {}'.format(tf.test.gpu_device_name())) #-------------------------- # PLACEHOLDER TENSORS #-------------------------- correct_label = tf.placeholder(tf.float32, [None, IMAGE_SHAPE[0], IMAGE_SHAPE[1], NUMBER_OF_CLASSES]) learning_rate = tf.placeholder(tf.float32) keep_prob = tf.placeholder(tf.float32) #-------------------------- # FUNCTIONS #-------------------------- def load_vgg(sess, vgg_path): """ Load Pretrained VGG Model into TensorFlow. sess: TensorFlow Session vgg_path: Path to vgg folder, containing "variables/" and "saved_model.pb" return: Tuple of Tensors from VGG model (image_input, keep_prob, layer3, layer4, layer7) """ # load the model and weights model = tf.saved_model.loader.load(sess, ['vgg16'], vgg_path) # Get Tensors to be returned from graph graph = tf.get_default_graph() image_input = graph.get_tensor_by_name('image_input:0') keep_prob = graph.get_tensor_by_name('keep_prob:0') layer3 = graph.get_tensor_by_name('layer3_out:0') layer4 = graph.get_tensor_by_name('layer4_out:0') layer7 = graph.get_tensor_by_name('layer7_out:0') return image_input, keep_prob, layer3, layer4, layer7 def conv_1x1(layer, layer_name): """ Return the output of a 1x1 convolution of a layer """ return tf.layers.conv2d(inputs = layer, filters = NUMBER_OF_CLASSES, kernel_size = (1, 1), strides = (1, 1), name = layer_name) def upsample(layer, k, s, layer_name): """ Return the output of transpose convolution given kernel_size k and strides s """ return tf.layers.conv2d_transpose(inputs = layer, filters = NUMBER_OF_CLASSES, kernel_size = (k, k), strides = (s, s), padding = 'same', name = layer_name) def layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes = NUMBER_OF_CLASSES): """ Create the layers for a fully convolutional network. Build skip-layers using the vgg layers. vgg_layerX_out: TF Tensor for VGG Layer X output num_classes: Number of classes to classify return: The Tensor for the last layer of output """ # Use a shorter variable name for simplicity layer3, layer4, layer7 = vgg_layer3_out, vgg_layer4_out, vgg_layer7_out # Apply a 1x1 convolution to encoder layers layer3x = conv_1x1(layer = layer3, layer_name = "layer3conv1x1") layer4x = conv_1x1(layer = layer4, layer_name = "layer4conv1x1") layer7x = conv_1x1(layer = layer7, layer_name = "layer7conv1x1") # Add decoder layers to the network with skip connections and upsampling # Note: the kernel size and strides are the same as the example in Udacity Lectures # Semantic Segmentation Scene Understanding Lesson 10-9: FCN-8 - Decoder decoderlayer1 = upsample(layer = layer7x, k = 4, s = 2, layer_name = "decoderlayer1") decoderlayer2 = tf.add(decoderlayer1, layer4x, name = "decoderlayer2") decoderlayer3 = upsample(layer = decoderlayer2, k = 4, s = 2, layer_name = "decoderlayer3") decoderlayer4 = tf.add(decoderlayer3, layer3x, name = "decoderlayer4") decoderlayer_output = upsample(layer = decoderlayer4, k = 16, s = 8, layer_name = "decoderlayer_output") return decoderlayer_output def optimize(nn_last_layer, correct_label, learning_rate, num_classes = NUMBER_OF_CLASSES): """ Build the TensorFLow loss and optimizer operations. nn_last_layer: TF Tensor of the last layer in the neural network correct_label: TF Placeholder for the correct label image learning_rate: TF Placeholder for the learning rate num_classes: Number of classes to classify return: Tuple of (logits, train_op, cross_entropy_loss) """ # Reshape 4D tensors to 2D, each row represents a pixel, each column a class logits = tf.reshape(nn_last_layer, (-1, num_classes)) class_labels = tf.reshape(correct_label, (-1, num_classes)) # The cross_entropy_loss is the cost which we are trying to minimize to yield higher accuracy cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits = logits, labels = class_labels) cross_entropy_loss = tf.reduce_mean(cross_entropy) # The model implements this operation to find the weights/parameters that would yield correct pixel labels train_op = tf.train.AdamOptimizer(learning_rate).minimize(cross_entropy_loss) return logits, train_op, cross_entropy_loss def train_nn(sess, epochs, batch_size, get_batches_fn, train_op, cross_entropy_loss, input_image, correct_label, keep_prob, learning_rate): """ Train neural network and print out the loss during training. sess: TF Session epochs: Number of epochs batch_size: Batch size get_batches_fn: Function to get batches of training data. Call using get_batches_fn(batch_size) train_op: TF Operation to train the neural network cross_entropy_loss: TF Tensor for the amount of loss input_image: TF Placeholder for input images correct_label: TF Placeholder for label images keep_prob: TF Placeholder for dropout keep probability learning_rate: TF Placeholder for learning rate """ for epoch in range(EPOCHS): losses, i = [], 0 for images, labels in get_batches_fn(BATCH_SIZE): i += 1 feed = { input_image: images, correct_label: labels, keep_prob: DROPOUT, learning_rate: LEARNING_RATE } _, partial_loss = sess.run([train_op, cross_entropy_loss], feed_dict = feed) print("---> iteration: ", i, " partial loss:", partial_loss) losses.append(partial_loss) training_loss = sum(losses) / len(losses) all_training_losses.append(training_loss) print("------------------") print("epoch: ", epoch + 1, " of ", EPOCHS, "training loss: ", training_loss) print("------------------") def run_tests(): tests.test_layers(layers) tests.test_optimize(optimize) tests.test_for_kitti_dataset(DATA_DIRECTORY) tests.test_train_nn(train_nn) def run(): """ Run a train a model and save output images resulting from the test image fed on the trained model """ # Get vgg model if we can't find it where it should be helper.maybe_download_pretrained_vgg(DATA_DIRECTORY) # A function to get batches get_batches_fn = helper.gen_batch_function(TRAINING_DATA_DIRECTORY, IMAGE_SHAPE) with tf.Session() as session: # Returns the three layers, keep probability and input layer from the vgg architecture image_input, keep_prob, layer3, layer4, layer7 = load_vgg(session, VGG_PATH) # The resulting network architecture from adding a decoder on top of the given vgg model model_output = layers(layer3, layer4, layer7, NUMBER_OF_CLASSES) # Returns the output logits, training operation and cost operation to be used # - logits: each row represents a pixel, each column a class # - train_op: function used to get the right parameters to the model to correctly label the pixels # - cross_entropy_loss: function outputting the cost which we are minimizing, lower cost should yield higher accuracy logits, train_op, cross_entropy_loss = optimize(model_output, correct_label, learning_rate, NUMBER_OF_CLASSES) # Initialize all variables session.run(tf.global_variables_initializer()) session.run(tf.local_variables_initializer()) # Train the neural network train_nn(session, EPOCHS, BATCH_SIZE, get_batches_fn, train_op, cross_entropy_loss, image_input, correct_label, keep_prob, learning_rate) # Run the model with the test images and save each painted output image (roads painted green) helper.save_inference_samples(RUNS_DIRECTORY, DATA_DIRECTORY, session, IMAGE_SHAPE, logits, keep_prob, image_input) #-------------------------- # MAIN #-------------------------- if __name__ == "__main__": run_tests() run() # Run a train a model and save output images resulting from the test image fed on the trained model print(all_training_losses)
37.8583
146
0.69276
0
0
0
0
0
0
0
0
4,277
0.457384
b9e38ca4d963e2aa4de106573e34682092b6337e
22,356
py
Python
tests/scanner/audit/log_sink_rules_engine_test.py
BrunoReboul/forseti-security
9d4a61b3e5a5d22a4330d15ddf61063fc9079071
[ "Apache-2.0" ]
null
null
null
tests/scanner/audit/log_sink_rules_engine_test.py
BrunoReboul/forseti-security
9d4a61b3e5a5d22a4330d15ddf61063fc9079071
[ "Apache-2.0" ]
null
null
null
tests/scanner/audit/log_sink_rules_engine_test.py
BrunoReboul/forseti-security
9d4a61b3e5a5d22a4330d15ddf61063fc9079071
[ "Apache-2.0" ]
null
null
null
# Copyright 2018 The Forseti Security Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests the LogSinkRulesEngine.""" import unittest import mock from tests.unittest_utils import ForsetiTestCase from tests.unittest_utils import get_datafile_path from google.cloud.forseti.common.gcp_type.billing_account import BillingAccount from google.cloud.forseti.common.gcp_type.folder import Folder from google.cloud.forseti.common.gcp_type.log_sink import LogSink from google.cloud.forseti.common.gcp_type.organization import Organization from google.cloud.forseti.common.gcp_type.project import Project from google.cloud.forseti.scanner.audit import log_sink_rules_engine as lsre from google.cloud.forseti.scanner.audit.errors import InvalidRulesSchemaError class LogSinkRulesEngineTest(ForsetiTestCase): """Tests for the LogSinkRulesEngine.""" def setUp(self): """Set up GCP resources for tests.""" self.lsre = lsre self.lsre.LOGGER = mock.MagicMock() # Set up resources in the following hierarchy: # +-----> billing_acct_abcd # | # | # +-----------------------> proj-1 # | # | # org_234 +-----> folder_56 +-----> proj-2 # | # | # +-----------------------> proj-3 self.org_234 = Organization( '234', display_name='Organization 234', full_name='organization/234/', data='fake_org_data_234') self.billing_acct_abcd = BillingAccount( 'ABCD-1234', display_name='Billing Account ABCD', full_name='organization/234/billingAccount/ABCD-1234/', data='fake_billing_account_data_abcd') self.folder_56 = Folder( '56', display_name='Folder 56', full_name='organization/234/folder/56/', data='fake_folder_data456456') self.proj_1 = Project( 'proj-1', project_number=11223344, display_name='My project 1', parent=self.org_234, full_name='organization/234/project/proj-1/', data='fake_project_data_2341') self.proj_2 = Project( 'proj-2', project_number=223344, display_name='My project 2', parent=self.folder_56, full_name='organization/234/folder/56/project/proj-2/', data='fake_project_data_4562') self.proj_3 = Project( 'proj-3', project_number=33445566, display_name='My project 3', parent=self.org_234, full_name='organization/234/project/proj-3/', data='fake_project_data_1233') def get_engine_with_valid_rules(self): """Create a rule engine build with a valid rules file.""" rules_local_path = get_datafile_path( __file__, 'log_sink_test_valid_rules.yaml') rules_engine = self.lsre.LogSinkRulesEngine( rules_file_path=rules_local_path) rules_engine.build_rule_book() return rules_engine def test_build_rule_book_from_local_yaml_file_works(self): """Tests that a RuleBook is built correctly with a yaml file.""" rules_engine = self.get_engine_with_valid_rules() # Creates 'self' rules for 5 difference resources and 'children' rules # for 2. self.assertEqual( 6, len(rules_engine.rule_book.resource_rules_map['self'])) self.assertEqual( 2, len(rules_engine.rule_book.resource_rules_map['children'])) self_rule_resources = [] for resource in rules_engine.rule_book.resource_rules_map['self']: self_rule_resources.append(resource.name) expected_rule_resources = [ 'billingAccounts/ABCD-1234', 'folders/56', 'organizations/234', 'projects/proj-1', 'projects/proj-2', 'projects/proj-3'] self.assertEqual(expected_rule_resources, sorted(self_rule_resources)) child_rule_resources = [] for resource in rules_engine.rule_book.resource_rules_map['children']: child_rule_resources.append(resource.name) expected_rule_resources = ['folders/56', 'organizations/234'] self.assertEqual(expected_rule_resources, sorted(child_rule_resources)) def test_build_rule_book_invalid_applies_to_fails(self): """Tests that a rule with invalid applies_to type cannot be created.""" rules_local_path = get_datafile_path( __file__, 'log_sink_test_invalid_rules.yaml') rules_engine = self.lsre.LogSinkRulesEngine( rules_file_path=rules_local_path) with self.assertRaises(InvalidRulesSchemaError): rules_engine.build_rule_book() def test_project_with_no_violations(self): """Tests that no violations are produced for a correct project.""" rules_engine = self.get_engine_with_valid_rules() # proj-1 needs an Audit Log sink. log_sinks = [ LogSink( sink_id='audit_logs_to_bq', destination=('bigquery.googleapis.com/projects/my-audit-logs/' 'datasets/proj_1_logs'), sink_filter='logName:"logs/cloudaudit.googleapis.com"', include_children=False, writer_identity='serviceAccount:[email protected]', parent=self.proj_1, raw_json='_SINK_1_' ), LogSink( sink_id='compute_logs_saver', destination=('bigquery.googleapis.com/projects/proj_1/' 'datasets/compute_logs'), sink_filter='resource.type="gce_instance"', include_children=False, writer_identity=('serviceAccount:p12345-67890@' 'gcp-sa-logging.iam.gserviceaccount.com'), parent=self.proj_1, raw_json='_SINK_2_' ) ] actual_violations = rules_engine.find_violations( self.proj_1, log_sinks) self.assertEqual(set(), actual_violations) def test_folder_with_no_violations(self): """Tests that no violations are produced for a correct folder.""" rules_engine = self.get_engine_with_valid_rules() # Rules disallow any folder-level LogSinks. actual_violations = rules_engine.find_violations(self.folder_56, []) self.assertEqual(set(), actual_violations) def test_billing_account_with_no_violations(self): """Tests that no violations are produced for a correct billing acct.""" rules_engine = self.get_engine_with_valid_rules() log_sinks = [ LogSink( sink_id='billing_logs', destination=('bigquery.googleapis.com/projects/my-audit-logs/' 'datasets/billing_logs'), sink_filter='', include_children=False, writer_identity='serviceAccount:[email protected]', parent=self.billing_acct_abcd, raw_json='__SINK_1__' ), ] actual_violations = rules_engine.find_violations( self.billing_acct_abcd, log_sinks) self.assertEqual(set(), actual_violations) def test_org_with_no_violations(self): """Tests that no violations are produced for a correct organization.""" rules_engine = self.get_engine_with_valid_rules() # Org needs an Audit Log sink, but to any destination. log_sinks = [ LogSink( sink_id='audit_logs_to_pubsub', destination=('pubsub.googleapis.com/projects/proj-3/topics/' 'org-audit-logs'), sink_filter='logName:"logs/cloudaudit.googleapis.com"', include_children=True, writer_identity='serviceAccount:[email protected]', parent=self.org_234, raw_json='__SINK_1__' ) ] actual_violations = rules_engine.find_violations( self.org_234, log_sinks) self.assertEqual(set(), actual_violations) def test_project_missing_required_sinks(self): """Tests violations are produced for project missing required sinks.""" rules_engine = self.get_engine_with_valid_rules() # proj-2 needs an Audit Log sink, by org-level rules, and a pubsub # sink, by folder-level rules. log_sinks = [ LogSink( sink_id='non_audit_logs_to_bq', destination=('bigquery.googleapis.com/projects/my-audit-logs/' 'datasets/proj_2_logs'), sink_filter='logName:"logs/non-cloudaudit.googleapis.com"', include_children=False, writer_identity='serviceAccount:[email protected]', parent=self.proj_2, raw_json='__SINK_1__' ), LogSink( sink_id='compute_logs_saver', destination=('bigquery.googleapis.com/projects/proj_2/' 'datasets/compute_logs'), sink_filter='resource.type="gce_instance"', include_children=False, writer_identity=('serviceAccount:p12345-67890@' 'gcp-sa-logging.iam.gserviceaccount.com'), parent=self.proj_2, raw_json='__SINK_2__' ) ] actual_violations = rules_engine.find_violations( self.proj_2, log_sinks) expected_violations = set([ lsre.Rule.RuleViolation( resource_name='proj-2', resource_type='project', resource_id='proj-2', full_name='organization/234/folder/56/project/proj-2/', rule_name='Require Audit Log sinks in all projects.', rule_index=0, violation_type='LOG_SINK_VIOLATION', sink_destination=('^bigquery\\.googleapis\\.com\\/projects\\/' 'my\\-audit\\-logs\\/datasets\\/.+$'), sink_filter=('^logName\\:\\"logs\\/' 'cloudaudit\\.googleapis\\.com\\"$'), sink_include_children='*', resource_data='' ), lsre.Rule.RuleViolation( resource_name='proj-2', resource_type='project', resource_id='proj-2', full_name='organization/234/folder/56/project/proj-2/', rule_name='Require a PubSub sink in folder-56 projects.', rule_index=3, violation_type='LOG_SINK_VIOLATION', sink_destination='^pubsub\\.googleapis\\.com\\/.+$', sink_filter='^$', sink_include_children='*', resource_data='' ) ]) self.assertEqual(expected_violations, actual_violations) def test_project_whitelist_violation(self): """Tests violations are produced for non-whitelisted sinks.""" rules_engine = self.get_engine_with_valid_rules() # proj-3 can only have BigQuery sinks. log_sinks = [ LogSink( sink_id='audit_logs_to_bq', destination=('bigquery.googleapis.com/projects/my-audit-logs/' 'datasets/proj_1_logs'), sink_filter='logName:"logs/cloudaudit.googleapis.com"', include_children=False, writer_identity='serviceAccount:[email protected]', parent=self.proj_3, raw_json='__SINK_1__' ), LogSink( sink_id='audit_logs_to_pubsub', destination=('pubsub.googleapis.com/projects/proj-3/topics/' 'proj-audit-logs'), sink_filter='logName:"logs/cloudaudit.googleapis.com"', include_children=True, writer_identity='serviceAccount:[email protected]', parent=self.proj_3, raw_json='__SINK_2__' ) ] actual_violations = rules_engine.find_violations( self.proj_3, log_sinks) expected_violations = set([ lsre.Rule.RuleViolation( resource_name='projects/proj-3/sinks/audit_logs_to_pubsub', resource_type='sink', resource_id='audit_logs_to_pubsub', full_name='organization/234/project/proj-3/audit_logs_to_pubsub/', rule_name='Only allow BigQuery sinks in Proj-1 and Proj-3.', rule_index=4, violation_type='LOG_SINK_VIOLATION', sink_destination=('pubsub.googleapis.com/projects/proj-3/' 'topics/proj-audit-logs'), sink_filter='logName:"logs/cloudaudit.googleapis.com"', sink_include_children=True, resource_data='__SINK_2__' ) ]) self.assertEqual(expected_violations, actual_violations) def test_folder_blacklist_violation(self): """Tests violations are produced for blacklisted sinks.""" rules_engine = self.get_engine_with_valid_rules() # Rules disallow any folder-level LogSinks. log_sinks = [ LogSink( sink_id='audit_logs_to_bq', destination=('bigquery.googleapis.com/projects/my-audit-logs/' 'datasets/folder_logs'), sink_filter='logName:"logs/cloudaudit.googleapis.com"', include_children=False, writer_identity='serviceAccount:[email protected]', parent=self.folder_56, raw_json='__SINK_1__' ) ] actual_violations = rules_engine.find_violations( self.folder_56, log_sinks) expected_violations = set([ lsre.Rule.RuleViolation( resource_name='folders/56/sinks/audit_logs_to_bq', resource_type='sink', resource_id='audit_logs_to_bq', full_name='organization/234/folder/56/audit_logs_to_bq/', rule_name='Disallow folder sinks.', rule_index=2, violation_type='LOG_SINK_VIOLATION', sink_destination=('bigquery.googleapis.com/projects/' 'my-audit-logs/datasets/folder_logs'), sink_filter='logName:"logs/cloudaudit.googleapis.com"', sink_include_children=False, resource_data='__SINK_1__') ]) self.assertEqual(expected_violations, actual_violations) def test_billing_account_with_whitelist_violations(self): """Tests violations are produced for billing account sinks.""" rules_engine = self.get_engine_with_valid_rules() log_sinks = [ LogSink( sink_id='billing_logs', destination=('bigquery.googleapis.com/projects/my-audit-logs/' 'datasets/wrong_dataset'), sink_filter='', include_children=False, writer_identity='serviceAccount:[email protected]', parent=self.billing_acct_abcd, raw_json='__SINK_1__' ), ] actual_violations = rules_engine.find_violations( self.billing_acct_abcd, log_sinks) expected_violations = set([ lsre.Rule.RuleViolation( resource_type='sink', resource_id='billing_logs', resource_name='billingAccounts/ABCD-1234/sinks/billing_logs', full_name='organization/234/billingAccount/ABCD-1234/billing_logs/', rule_name=('Only allow Billing Account sinks to audit logs ' 'project.'), rule_index=6, violation_type='LOG_SINK_VIOLATION', sink_destination=('bigquery.googleapis.com/projects/' 'my-audit-logs/datasets/wrong_dataset'), sink_filter='', sink_include_children=False, resource_data='__SINK_1__') ]) self.assertEqual(expected_violations, actual_violations) def test_org_missing_required_sinks(self): """Tests violations are produced for an org missing required sinks.""" rules_engine = self.get_engine_with_valid_rules() # Org needs an Audit Log sink, including children. log_sinks = [ LogSink( sink_id='sink_not_including_children', destination=('pubsub.googleapis.com/projects/proj-3/topics/' 'org-audit-logs'), sink_filter='logName:"logs/cloudaudit.googleapis.com"', include_children=False, writer_identity='serviceAccount:[email protected]', parent=self.org_234, raw_json='__SINK_1__' ), LogSink( sink_id='sink_with_wrong_filter', destination=('pubsub.googleapis.com/projects/proj-3/topics/' 'org-more-logs'), sink_filter='logName:"logs/otherapi.googleapis.com"', include_children=True, writer_identity='serviceAccount:[email protected]', parent=self.org_234, raw_json='__SINK_2__' ) ] actual_violations = rules_engine.find_violations( self.org_234, log_sinks) expected_violations = set([ lsre.Rule.RuleViolation( resource_name='234', resource_type='organization', resource_id='234', full_name='organization/234/', rule_name='Require an Org Level audit log sink.', rule_index=1, violation_type='LOG_SINK_VIOLATION', sink_destination='^.*$', sink_filter=('^logName\\:\\"logs\\/' 'cloudaudit\\.googleapis\\.com\\"$'), sink_include_children=True, resource_data='' ) ]) self.assertEqual(expected_violations, actual_violations) def test_add_invalid_rules(self): """Tests that adding invalid rules raises exceptions.""" rule_book = self.lsre.LogSinkRuleBook(global_configs=None) valid_resource = { 'type': 'organization', 'applies_to': 'children', 'resource_ids': ['1234'] } valid_sink_spec = { 'destination': 'bigquery.*', 'filter': '', 'include_children': '*' } rule_book.add_rule( { 'name': 'Valid rule', 'resource': [valid_resource], 'sink': valid_sink_spec, 'mode': 'whitelist' }, 0) bad_rules = [ {}, { 'name': 'Mising Resource', 'mode': 'whitelist', 'sink': valid_sink_spec, }, { 'name': 'Mising sink', 'resource': [valid_resource], 'mode': 'whitelist', }, { 'name': 'Bad mode', 'resource': [valid_resource], 'sink': valid_sink_spec, 'mode': 'other', }, { 'name': 'Bad resource type', 'resource': [{ 'type': 'bucket', 'applies_to': 'self', 'resource_ids': ['bucket-1'] }], 'sink': valid_sink_spec, 'mode': 'whitelist' }, { 'name': 'Bad applies to type', 'resource': [{ 'type': 'folder', 'applies_to': 'self_and_children', 'resource_ids': ['56'] }], 'sink': valid_sink_spec, 'mode': 'whitelist' }, { 'name': 'Bad applies to type', 'resource': [{ 'type': 'billing_account', 'applies_to': 'children', 'resource_ids': ['ABCD-1234'] }], 'sink': valid_sink_spec, 'mode': 'whitelist' }, { 'name': 'Empty resource_ids', 'resource': [{ 'type': 'project', 'applies_to': 'self', 'resource_ids': [] }], 'sink': valid_sink_spec, 'mode': 'whitelist' }, { 'name': 'Missing filter', 'resource': [valid_resource], 'sink': { 'destination': 'bigquery.*', 'include_children': '*' }, 'mode': 'whitelist' }, { 'name': 'Bad include_children', 'resource': [valid_resource], 'sink': { 'destination': 'bigquery.*', 'filter': '*', 'include_children': 'Yes' }, 'mode': 'whitelist' } ] for rule in bad_rules: with self.assertRaises(InvalidRulesSchemaError): rule_book.add_rule(rule, 1) if __name__ == '__main__': unittest.main()
40.79562
84
0.560834
21,020
0.94024
0
0
0
0
0
0
7,729
0.345724
b9e3fca3aec04c54b087304757154615d5a67e58
2,852
py
Python
backend/api/ulca-ums-service/user-management/utilities/orgUtils.py
agupta54/ulca
c1f570ac254ce2ac73f40c49716458f4f7cbaee2
[ "MIT" ]
3
2022-01-12T06:51:51.000Z
2022-02-23T18:54:33.000Z
backend/api/ulca-ums-service/user-management/utilities/orgUtils.py
agupta54/ulca
c1f570ac254ce2ac73f40c49716458f4f7cbaee2
[ "MIT" ]
6
2021-08-31T19:21:26.000Z
2022-01-03T05:53:42.000Z
backend/api/ulca-ums-service/user-management/utilities/orgUtils.py
agupta54/ulca
c1f570ac254ce2ac73f40c49716458f4f7cbaee2
[ "MIT" ]
8
2021-08-12T08:07:49.000Z
2022-01-25T04:40:51.000Z
import uuid from config import USR_ORG_MONGO_COLLECTION, USR_MONGO_COLLECTION import db from models.response import post_error import logging log = logging.getLogger('file') class OrgUtils: def __init__(self): pass #orgId generation @staticmethod def generate_org_id(): """UUID generation for org registeration""" return(uuid.uuid4().hex) @staticmethod def validate_org(org_code): """Validating Org Org should be registered and active on Anuvaad system. """ try: #connecting to mongo instance/collection collections = db.get_db()[USR_ORG_MONGO_COLLECTION] #searching for active org record result = collections.find({"code": org_code}, {"_id": 0, "active": 1}) if result.count() == 0: return post_error("Invalid Organization", "No such registered organization with the given Org Id", None) for value in result: if value["active"] == False: return post_error("Invalid Organization", "Organization is currently inactive", None) except Exception as e: log.exception(f"Db connection exception : {e}") return post_error("Database connection exception", "An error occurred while connecting to the database:{}".format(str(e)), None) @staticmethod def validate_org_upsert(i,org): """Org validation on upsert deactivation of org allowed only once all the users in the corresponding org is inactive. """ if "code" not in org or not org["code"]: return post_error("Data Missing", "code not found", None) if "active" not in org: return post_error("Data Missing", "active not found", None) code = str(org["code"]).upper() active = org["active"] if not isinstance(active,bool): return post_error("Invalid format", "active should be bool", None), 400 if active == False: try: #connecting to mongo instance/collection collections = db.get_db()[USR_MONGO_COLLECTION] #searching for active users in the org result = collections.find({"orgID": code,"is_active":True}) if result.count()!=0: log.info("Deactivation request for org failed, {} active users with the orgID".format(str(result.count()))) return post_error("Deactivation Failed","There exist active users in {} hence this action cannot be performed".format(code),None) except Exception as e: log.exception(f"Db connection exception : {e}") return post_error("Database connection exception", "An error occurred while connecting to the database:{}".format(str(e)), None)
41.333333
149
0.619565
2,674
0.937588
0
0
2,580
0.904628
0
0
1,160
0.406732
b9e478ed385905aa26b48748e1fbf896e8ced766
4,299
py
Python
setup.py
AntonBiryukovUofC/diffvg
e081098f52b82bfd0b7e91114d289d65ef969a60
[ "Apache-2.0" ]
null
null
null
setup.py
AntonBiryukovUofC/diffvg
e081098f52b82bfd0b7e91114d289d65ef969a60
[ "Apache-2.0" ]
null
null
null
setup.py
AntonBiryukovUofC/diffvg
e081098f52b82bfd0b7e91114d289d65ef969a60
[ "Apache-2.0" ]
null
null
null
# Adapted from https://github.com/pybind/cmake_example/blob/master/setup.py import os import re import sys import platform import subprocess import importlib from sysconfig import get_paths import importlib from setuptools import setup, Extension from setuptools.command.build_ext import build_ext from setuptools.command.install import install from distutils.sysconfig import get_config_var from distutils.version import LooseVersion class CMakeExtension(Extension): def __init__(self, name, sourcedir, build_with_cuda): Extension.__init__(self, name, sources=[]) self.sourcedir = os.path.abspath(sourcedir) self.build_with_cuda = build_with_cuda class Build(build_ext): def run(self): try: out = subprocess.check_output(['cmake', '--version']) except OSError: raise RuntimeError("CMake must be installed to build the following extensions: " + ", ".join(e.name for e in self.extensions)) super().run() def build_extension(self, ext): if isinstance(ext, CMakeExtension): extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name))) info = get_paths() include_path = info['include'] cmake_args = ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir, '-DPYTHON_INCLUDE_PATH=' + include_path, ] cfg = 'Debug' if self.debug else 'Release' build_args = ['--config', cfg] if platform.system() == "Windows": cmake_args += ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(cfg.upper(), extdir), '-DCMAKE_RUNTIME_OUTPUT_DIRECTORY_{}={}'.format(cfg.upper(), extdir)] if sys.maxsize > 2 ** 32: cmake_args += ['-A', 'x64'] build_args += ['--', '/m'] else: cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg] build_args += ['--', '-j8'] if ext.build_with_cuda: cmake_args += ['-DDIFFVG_CUDA=1'] else: cmake_args += ['-DDIFFVG_CUDA=0'] env = os.environ.copy() env['CXXFLAGS'] = '{} -DVERSION_INFO=\\"{}\\"'.format(env.get('CXXFLAGS', ''), self.distribution.get_version()) env_build = env env["CXX"] = "/usr/bin/g++-5" env["CC"] = "/usr/bin/gcc-5" env_build["CXX"] = "/usr/bin/g++-5" env_build["CC"] = "/usr/bin/gcc-5" env["PATH"] = "/usr/local/cuda-10.1/bin" + ":" + os.environ['PATH'] env_build["PATH"] = "/usr/local/cuda-10.1/bin" + ":" + os.environ['PATH'] if not os.path.exists(self.build_temp): os.makedirs(self.build_temp) subprocess.check_call(['cmake', ext.sourcedir] + cmake_args, cwd=self.build_temp, env=env) subprocess.check_call(['cmake', '--build', '.'] + build_args, cwd=self.build_temp, env=env_build) else: super().build_extension(ext) torch_spec = importlib.util.find_spec("torch") tf_spec = importlib.util.find_spec("tensorflow") packages = [] build_with_cuda = False if torch_spec is not None: packages.append('pydiffvg') import torch if torch.cuda.is_available(): build_with_cuda = True if tf_spec is not None and sys.platform != 'win32': packages.append('pydiffvg_tensorflow') if not build_with_cuda: import tensorflow as tf if tf.test.is_gpu_available(cuda_only=True, min_cuda_compute_capability=None): build_with_cuda = True if len(packages) == 0: print('Error: PyTorch or Tensorflow must be installed. For Windows platform only PyTorch is supported.') exit() # Override build_with_cuda with environment variable if 'DIFFVG_CUDA' in os.environ: build_with_cuda = os.environ['DIFFVG_CUDA'] == '1' setup(name='diffvg', version='0.0.1', install_requires=["svgpathtools"], description='Differentiable Vector Graphics', ext_modules=[CMakeExtension('diffvg', '', build_with_cuda)], cmdclass=dict(build_ext=Build, install=install), packages=packages, zip_safe=False)
38.044248
109
0.601303
2,711
0.630612
0
0
0
0
0
0
968
0.225169
b9e64ab7c515862e0dec6a8272d8a276b9bd86b9
14,587
py
Python
robotpy_ext/common_drivers/navx/registerio.py
twinters007/robotpy-wpilib-utilities
d2e18c16fc97a469e0621521e0fbed0093610d6e
[ "MIT", "BSD-3-Clause" ]
2
2017-01-16T03:10:57.000Z
2017-01-16T03:11:00.000Z
robotpy_ext/common_drivers/navx/registerio.py
twinters007/robotpy-wpilib-utilities
d2e18c16fc97a469e0621521e0fbed0093610d6e
[ "MIT", "BSD-3-Clause" ]
null
null
null
robotpy_ext/common_drivers/navx/registerio.py
twinters007/robotpy-wpilib-utilities
d2e18c16fc97a469e0621521e0fbed0093610d6e
[ "MIT", "BSD-3-Clause" ]
null
null
null
# validated: 2017-02-19 DS c5e3a8a9b642 roborio/java/navx_frc/src/com/kauailabs/navx/frc/RegisterIO.java #---------------------------------------------------------------------------- # Copyright (c) Kauai Labs 2015. All Rights Reserved. # # Created in support of Team 2465 (Kauaibots). Go Purple Wave! # # Open Source Software - may be modified and shared by FRC teams. Any # modifications to this code must be accompanied by the \License.txt file # in the root directory of the project #---------------------------------------------------------------------------- from ._impl import AHRSProtocol, IMUProtocol, IMURegisters from wpilib.timer import Timer import logging logger = logging.getLogger('navx') __all__ = ['RegisterIO'] IO_TIMEOUT_SECONDS = 1.0 DELAY_OVERHEAD_SECONDS = 0.004 class _BoardId: type = 0 hw_rev = 0 fw_ver_major = 0 fw_ver_minor = 0 fw_revision = 0 unique_id = [0]*12 class _BoardState: op_status = 0 sensor_status = 0 cal_status = 0 selftest_status = 0 capability_flags = 0 update_rate_hz = 0 accel_fsr_g = 0 gyro_fsr_dps = 0 class RegisterIO: def __init__(self, io_provider, update_rate_hz, notify_sink, board_capabilities): """ :param board_capabilities: must have the following callable attributes: _isOmniMountSupported, _isBoardYawResetSupported, _isDisplacementSupported :param notify_sink: must have the following callable attributes: _setYawPitchRoll, _setAHRSData, _setAHRSPosData, _setRawData, _setBoardID, _setBoardState, _yawResetComplete """ self.io_provider = io_provider self.update_rate_hz = update_rate_hz self.board_capabilities = board_capabilities self.notify_sink = notify_sink self.raw_data_update = IMUProtocol.GyroUpdate() self.ahrspos_update = AHRSProtocol.AHRSPosUpdate() self.board_state = _BoardState() self.board_id = _BoardId() self.last_update_time = 0 self.byte_count = 0 self.update_count = 0 self.last_sensor_timestamp = 0 self._stop = False def stop(self): self._stop = True def shutdown(self): self.io_provider.shutdown() def run(self): logger.info("NavX io thread starting") try: self.io_provider.init() # initial device configuration self.setUpdateRateHz(self.update_rate_hz) if not self.getConfiguration(): logger.warning("-- Did not get configuration data") else: logger.info("-- Board is %s (rev %s)", IMURegisters.model_type(self.board_id.type), self.board_id.hw_rev) logger.info("-- Firmware %s.%s", self.board_id.fw_ver_major, self.board_id.fw_ver_minor) log_error = True # Calculate delay to match configured update rate # Note: some additional time is removed from the # 1/update_rate value to ensure samples are not # dropped, esp. at higher update rates. update_rate = 1.0/(self.update_rate_hz & 0xFF) if update_rate > DELAY_OVERHEAD_SECONDS: update_rate -= DELAY_OVERHEAD_SECONDS logger.info("-- Update rate: %shz (%.4fs)", self.update_rate_hz, update_rate) # IO Loop while not self._stop: if self.board_state.update_rate_hz != self.update_rate_hz: self.setUpdateRateHz(self.update_rate_hz) try: self.getCurrentData() except IOError: if log_error: logger.exception("Error getting data") log_error = False else: log_error = True Timer.delay(update_rate) except Exception: logger.exception("Unhandled exception in NavX thread") finally: logger.info("NavX i/o thread exiting") def getConfiguration(self): success = False retry_count = 0 while retry_count < 5 and not success: try: config = self.io_provider.read(IMURegisters.NAVX_REG_WHOAMI, IMURegisters.NAVX_REG_SENSOR_STATUS_H+1) except IOError as e: logger.warning("Error reading configuration data, retrying (%s)", e) success = False Timer.delay(0.5) else: board_id = self.board_id board_id.hw_rev = config[IMURegisters.NAVX_REG_HW_REV] board_id.fw_ver_major = config[IMURegisters.NAVX_REG_FW_VER_MAJOR] board_id.fw_ver_minor = config[IMURegisters.NAVX_REG_FW_VER_MINOR] board_id.type = config[IMURegisters.NAVX_REG_WHOAMI] self.notify_sink._setBoardID(board_id) board_state = self.board_state board_state.cal_status = config[IMURegisters.NAVX_REG_CAL_STATUS] board_state.op_status = config[IMURegisters.NAVX_REG_OP_STATUS] board_state.selftest_status = config[IMURegisters.NAVX_REG_SELFTEST_STATUS] board_state.sensor_status = AHRSProtocol.decodeBinaryUint16(config,IMURegisters.NAVX_REG_SENSOR_STATUS_L) board_state.gyro_fsr_dps = AHRSProtocol.decodeBinaryUint16(config,IMURegisters.NAVX_REG_GYRO_FSR_DPS_L) board_state.accel_fsr_g = config[IMURegisters.NAVX_REG_ACCEL_FSR_G] board_state.update_rate_hz = config[IMURegisters.NAVX_REG_UPDATE_RATE_HZ] board_state.capability_flags = AHRSProtocol.decodeBinaryUint16(config,IMURegisters.NAVX_REG_CAPABILITY_FLAGS_L) self.notify_sink._setBoardState(board_state) success = True retry_count += 1 return success def getCurrentData(self): first_address = IMURegisters.NAVX_REG_UPDATE_RATE_HZ displacement_registers = self.board_capabilities._isDisplacementSupported() # If firmware supports displacement data, acquire it - otherwise implement # similar (but potentially less accurate) calculations on this processor. if displacement_registers: read_count = IMURegisters.NAVX_REG_LAST + 1 - first_address else: read_count = IMURegisters.NAVX_REG_QUAT_OFFSET_Z_H + 1 - first_address curr_data = self.io_provider.read(first_address, read_count) sensor_timestamp = AHRSProtocol.decodeBinaryUint32(curr_data, IMURegisters.NAVX_REG_TIMESTAMP_L_L-first_address) if sensor_timestamp == self.last_sensor_timestamp: return self.last_sensor_timestamp = sensor_timestamp ahrspos_update = self.ahrspos_update ahrspos_update.op_status = curr_data[IMURegisters.NAVX_REG_OP_STATUS - first_address] ahrspos_update.selftest_status = curr_data[IMURegisters.NAVX_REG_SELFTEST_STATUS - first_address] ahrspos_update.cal_status = curr_data[IMURegisters.NAVX_REG_CAL_STATUS] ahrspos_update.sensor_status = curr_data[IMURegisters.NAVX_REG_SENSOR_STATUS_L - first_address] ahrspos_update.yaw = AHRSProtocol.decodeProtocolSignedHundredthsFloat(curr_data, IMURegisters.NAVX_REG_YAW_L-first_address) ahrspos_update.pitch = AHRSProtocol.decodeProtocolSignedHundredthsFloat(curr_data, IMURegisters.NAVX_REG_PITCH_L-first_address) ahrspos_update.roll = AHRSProtocol.decodeProtocolSignedHundredthsFloat(curr_data, IMURegisters.NAVX_REG_ROLL_L-first_address) ahrspos_update.compass_heading = AHRSProtocol.decodeProtocolUnsignedHundredthsFloat(curr_data, IMURegisters.NAVX_REG_HEADING_L-first_address) ahrspos_update.mpu_temp_c = AHRSProtocol.decodeProtocolSignedHundredthsFloat(curr_data, IMURegisters.NAVX_REG_MPU_TEMP_C_L - first_address) ahrspos_update.world_linear_accel_x = AHRSProtocol.decodeProtocolSignedThousandthsFloat(curr_data, IMURegisters.NAVX_REG_LINEAR_ACC_X_L-first_address) ahrspos_update.world_linear_accel_y = AHRSProtocol.decodeProtocolSignedThousandthsFloat(curr_data, IMURegisters.NAVX_REG_LINEAR_ACC_Y_L-first_address) ahrspos_update.world_linear_accel_z = AHRSProtocol.decodeProtocolSignedThousandthsFloat(curr_data, IMURegisters.NAVX_REG_LINEAR_ACC_Z_L-first_address) ahrspos_update.altitude = AHRSProtocol.decodeProtocol1616Float(curr_data, IMURegisters.NAVX_REG_ALTITUDE_D_L - first_address) ahrspos_update.baro_pressure = AHRSProtocol.decodeProtocol1616Float(curr_data, IMURegisters.NAVX_REG_PRESSURE_DL - first_address) ahrspos_update.fused_heading = AHRSProtocol.decodeProtocolUnsignedHundredthsFloat(curr_data, IMURegisters.NAVX_REG_FUSED_HEADING_L-first_address) ahrspos_update.quaternionW = AHRSProtocol.decodeBinaryInt16(curr_data, IMURegisters.NAVX_REG_QUAT_W_L-first_address)/ 32768.0 ahrspos_update.quaternionX = AHRSProtocol.decodeBinaryInt16(curr_data, IMURegisters.NAVX_REG_QUAT_X_L-first_address)/ 32768.0 ahrspos_update.quaternionY = AHRSProtocol.decodeBinaryInt16(curr_data, IMURegisters.NAVX_REG_QUAT_Y_L-first_address)/ 32768.0 ahrspos_update.quaternionZ = AHRSProtocol.decodeBinaryInt16(curr_data, IMURegisters.NAVX_REG_QUAT_Z_L-first_address)/ 32768.0 if displacement_registers: ahrspos_update.vel_x = AHRSProtocol.decodeProtocol1616Float(curr_data, IMURegisters.NAVX_REG_VEL_X_I_L-first_address) ahrspos_update.vel_y = AHRSProtocol.decodeProtocol1616Float(curr_data, IMURegisters.NAVX_REG_VEL_Y_I_L-first_address) ahrspos_update.vel_z = AHRSProtocol.decodeProtocol1616Float(curr_data, IMURegisters.NAVX_REG_VEL_Z_I_L-first_address) ahrspos_update.disp_x = AHRSProtocol.decodeProtocol1616Float(curr_data, IMURegisters.NAVX_REG_DISP_X_I_L-first_address) ahrspos_update.disp_y = AHRSProtocol.decodeProtocol1616Float(curr_data, IMURegisters.NAVX_REG_DISP_Y_I_L-first_address) ahrspos_update.disp_z = AHRSProtocol.decodeProtocol1616Float(curr_data, IMURegisters.NAVX_REG_DISP_Z_I_L-first_address) self.notify_sink._setAHRSPosData(ahrspos_update, sensor_timestamp) else: self.notify_sink._setAHRSData(ahrspos_update, sensor_timestamp) board_state = self.board_state board_state.cal_status = curr_data[IMURegisters.NAVX_REG_CAL_STATUS-first_address] board_state.op_status = curr_data[IMURegisters.NAVX_REG_OP_STATUS-first_address] board_state.selftest_status = curr_data[IMURegisters.NAVX_REG_SELFTEST_STATUS-first_address] board_state.sensor_status = AHRSProtocol.decodeBinaryUint16(curr_data,IMURegisters.NAVX_REG_SENSOR_STATUS_L-first_address) board_state.update_rate_hz = curr_data[IMURegisters.NAVX_REG_UPDATE_RATE_HZ-first_address] board_state.gyro_fsr_dps = AHRSProtocol.decodeBinaryUint16(curr_data,IMURegisters.NAVX_REG_GYRO_FSR_DPS_L) board_state.accel_fsr_g = curr_data[IMURegisters.NAVX_REG_ACCEL_FSR_G] board_state.capability_flags= AHRSProtocol.decodeBinaryUint16(curr_data,IMURegisters.NAVX_REG_CAPABILITY_FLAGS_L-first_address) self.notify_sink._setBoardState(board_state) raw_data_update = self.raw_data_update raw_data_update.raw_gyro_x = AHRSProtocol.decodeBinaryInt16(curr_data, IMURegisters.NAVX_REG_GYRO_X_L-first_address) raw_data_update.raw_gyro_y = AHRSProtocol.decodeBinaryInt16(curr_data, IMURegisters.NAVX_REG_GYRO_Y_L-first_address) raw_data_update.raw_gyro_z = AHRSProtocol.decodeBinaryInt16(curr_data, IMURegisters.NAVX_REG_GYRO_Z_L-first_address) raw_data_update.raw_accel_x = AHRSProtocol.decodeBinaryInt16(curr_data, IMURegisters.NAVX_REG_ACC_X_L-first_address) raw_data_update.raw_accel_y = AHRSProtocol.decodeBinaryInt16(curr_data, IMURegisters.NAVX_REG_ACC_Y_L-first_address) raw_data_update.raw_accel_z = AHRSProtocol.decodeBinaryInt16(curr_data, IMURegisters.NAVX_REG_ACC_Z_L-first_address) raw_data_update.cal_mag_x = AHRSProtocol.decodeBinaryInt16(curr_data, IMURegisters.NAVX_REG_MAG_X_L-first_address) raw_data_update.cal_mag_y = AHRSProtocol.decodeBinaryInt16(curr_data, IMURegisters.NAVX_REG_MAG_Y_L-first_address) raw_data_update.cal_mag_z = AHRSProtocol.decodeBinaryInt16(curr_data, IMURegisters.NAVX_REG_MAG_Z_L-first_address) raw_data_update.mpu_temp_c = ahrspos_update.mpu_temp self.notify_sink._setRawData(raw_data_update, sensor_timestamp) self.last_update_time = Timer.getFPGATimestamp() self.byte_count += len(curr_data) self.update_count += 1 def isConnected(self): time_since_last_update = Timer.getFPGATimestamp() - self.last_update_time return time_since_last_update <= IO_TIMEOUT_SECONDS def getByteCount(self): return self.byte_count def getUpdateCount(self): return self.update_count def setUpdateRateHz(self, update_rate_hz): self.io_provider.write(IMURegisters.NAVX_REG_UPDATE_RATE_HZ, update_rate_hz) def zeroYaw(self): self.io_provider.write( IMURegisters.NAVX_REG_INTEGRATION_CTL, AHRSProtocol.NAVX_INTEGRATION_CTL_RESET_YAW ) self.notify_sink._yawResetComplete() def zeroDisplacement(self): self.io_provider.write( IMURegisters.NAVX_REG_INTEGRATION_CTL, (AHRSProtocol.NAVX_INTEGRATION_CTL_RESET_DISP_X | AHRSProtocol.NAVX_INTEGRATION_CTL_RESET_DISP_Y | AHRSProtocol.NAVX_INTEGRATION_CTL_RESET_DISP_Z ) )
54.632959
159
0.676973
13,781
0.944745
0
0
0
0
0
0
1,642
0.112566
b9e6a0bf2a4d3e860c6eb607624b101a086157b4
12,517
py
Python
RigolWFM/channel.py
wvdv2002/RigolWFM
849a1130c9194f052eaf5582dfa67e7a5708a3a3
[ "BSD-3-Clause" ]
null
null
null
RigolWFM/channel.py
wvdv2002/RigolWFM
849a1130c9194f052eaf5582dfa67e7a5708a3a3
[ "BSD-3-Clause" ]
null
null
null
RigolWFM/channel.py
wvdv2002/RigolWFM
849a1130c9194f052eaf5582dfa67e7a5708a3a3
[ "BSD-3-Clause" ]
null
null
null
#pylint: disable=invalid-name #pylint: disable=too-many-instance-attributes #pylint: disable=too-many-return-statements #pylint: disable=too-many-statements """ Class structure and methods for an oscilloscope channel. The idea is to collect all the relevant information from all the Rigol scope waveforms into a single structure that can be handled in a uniform and consistent manner. Specifically this lets one just use channel.times : numpy array of signal times channel.volts : numpy array of signal voltages or the stringification method to describe a channel print(channel) """ from enum import Enum import numpy as np class UnitEnum(Enum): """Enumerated units for scopes without them.""" w = 0 a = 1 v = 2 u = 3 def best_scale(number): """Scale and units for a number with proper prefix.""" absnr = abs(number) if absnr == 0: return 1, ' ' if absnr < 0.99999999e-9: return 1e12, 'p' if absnr < 0.99999999e-6: return 1e9, 'n' if absnr < 0.99999999e-3: return 1e6, 'µ' if absnr < 0.99999999: return 1e3, 'm' if absnr < 0.99999999e3: return 1, ' ' if absnr < 0.99999999e6: return 1e-3, 'k' if absnr < 0.999999991e9: return 1e-6, 'M' return 1e-9, 'G' def engineering_string(number, n_digits): """Format number with proper prefix.""" scale, prefix = best_scale(number) fformat = "%%.%df %%s" % n_digits s = fformat % (number * scale, prefix) return s def _channel_bytes(channel_number, w): """ Return right series of bytes for a channel for 1000Z scopes. Waveform points are interleaved stored in memory when two or more channels are saved. This unweaves them. Args: channel_number: the number of enabled channels before this one w: original waveform object Returns byte array for specified channel """ offset = 0 if w.header.stride == 2: # byte pattern CHx CHy # use odd bytes when this is the second enabled channel if any([w.header.ch[i].enabled for i in range(channel_number-1)]): offset = 1 elif w.header.stride == 4: # byte pattern CH4 CH3 CH2 CH1 offset = 4 - channel_number data = np.frombuffer(w.data.raw, dtype=np.uint8) raw_bytes = data[offset::w.header.stride] return raw_bytes class Channel(): """Base class for a single channel.""" def __init__(self, w, channel_number, scope, selected='1234'): """ Initialize a Channel Object. Args: w: Wfm object channel_number: 1, 2, 3, or 4 scope: string describing scope selected: string with channels chosen by user Returns: Channel object """ self.channel_number = channel_number self.name = "CH %d" % channel_number self.waveform = w self.seconds_per_point = w.header.seconds_per_point self.firmware = 'unknown' self.unit = UnitEnum.v self.points = 0 self.raw = None self.volts = None self.times = None self.coupling = 'unknown' self.roll_stop = 0 self.time_offset = 0 self.time_scale = 1 self.enabled = False self.enabled_and_selected = False self.volt_scale = 1 self.volt_offset = 0 self.y_scale = 1 self.y_offset = 0 self.volt_per_division = 1 self.probe_value = 1 self.inverted = False # determine if this channel is one of those chosen by user chosen = selected.find(str(channel_number)) != -1 if channel_number <= len(w.header.ch): channel = w.header.ch[channel_number-1] self.enabled = channel.enabled self.enabled_and_selected = channel.enabled and chosen self.volt_scale = channel.volt_scale self.volt_offset = channel.volt_offset self.y_scale = channel.volt_scale self.y_offset = channel.volt_offset self.volt_per_division = channel.volt_per_division self.probe_value = channel.probe_value self.unit = channel.unit self.inverted = channel.inverted if scope == 'wfm1000c': self.ds1000c(w, channel_number) elif scope == 'wfm1000d': self.ds1000d(w, channel_number) elif scope == 'wfm1000e': self.ds1000e(w, channel_number) elif scope == 'wfm1000z': self.ds1000z(w, channel_number) elif scope == 'wfm2000': self.ds2000(w, channel_number) elif scope == 'wfm4000': self.ds4000(w, channel_number) elif scope == 'wfm6000': self.ds6000(w, channel_number) def __str__(self): """Describe this channel.""" s = " Channel %d:\n" % self.channel_number s += " Coupling = %8s\n" % self.coupling.rjust(7, ' ') s += " Scale = %10sV/div\n" % engineering_string(self.volt_per_division, 2) s += " Offset = %10sV\n" % engineering_string(self.volt_offset, 2) s += " Probe = %7gX\n" % self.probe_value s += " Inverted = %8s\n\n" % self.inverted s += " Time Base = %10ss/div\n" % engineering_string(self.time_scale, 3) s += " Offset = %10ss\n" % engineering_string(self.time_offset, 3) s += " Delta = %10ss/point\n" % engineering_string(self.seconds_per_point, 3) s += " Points = %8d\n\n" % self.points if self.enabled_and_selected: s += " Count = [%9d,%9d,%9d ... %9d,%9d]\n" % ( 1, 2, 3, self.points-1, self.points) s += " Raw = [%9d,%9d,%9d ... %9d,%9d]\n" % ( self.raw[0], self.raw[1], self.raw[2], self.raw[-2], self.raw[-1]) t = [engineering_string(self.times[i], 3) + "s" for i in [0, 1, 2, -2, -1]] s += " Times = [%9s,%9s,%9s ... %9s,%9s]\n" % ( t[0], t[1], t[2], t[-2], t[-1]) v = [engineering_string(self.volts[i], 2) + "V" for i in [0, 1, 2, -2, -1]] s += " Volts = [%9s,%9s,%9s ... %9s,%9s]\n" % ( v[0], v[1], v[2], v[-2], v[-1]) return s def calc_times_and_volts(self): """Calculate the times and voltages for this channel.""" if self.enabled_and_selected: self.volts = self.y_scale * (127.0 - self.raw) - self.y_offset h = self.points * self.seconds_per_point / 2 self.times = np.linspace(-h, h, self.points) + self.time_offset def ds1000c(self, w, channel_number): """Interpret waveform data for 1000CD series scopes.""" self.time_scale = 1.0e-12 * w.header.time_scale self.time_offset = 1.0e-12 * w.header.time_offset if channel_number == 1: if self.enabled_and_selected: self.points = len(w.data.ch1) self.raw = np.frombuffer(w.data.ch1, dtype=np.uint8) if channel_number == 2: if self.enabled_and_selected: self.points = len(w.data.ch2) self.raw = np.frombuffer(w.data.ch2, dtype=np.uint8) self.calc_times_and_volts() def ds1000d(self, w, channel_number): """Interpret waveform data for 1000CD series scopes.""" self.time_scale = 1.0e-12 * w.header.time_scale self.time_offset = 1.0e-12 * w.header.time_offset if channel_number == 1: if self.enabled_and_selected: self.points = len(w.data.ch1) self.raw = np.frombuffer(w.data.ch1, dtype=np.uint8) if channel_number == 2: if self.enabled_and_selected: self.points = len(w.data.ch2) self.raw = np.frombuffer(w.data.ch2, dtype=np.uint8) self.calc_times_and_volts() def ds1000e(self, w, channel_number): """Interpret waveform data for 1000D and 1000E series scopes.""" self.roll_stop = w.header.roll_stop if channel_number == 1: self.time_offset = w.header.ch1_time_offset self.time_scale = w.header.ch1_time_scale if self.enabled_and_selected: self.points = len(w.data.ch1) self.raw = np.frombuffer(w.data.ch1, dtype=np.uint8) elif channel_number == 2: self.time_offset = w.header.ch2_time_offset self.time_scale = w.header.ch2_time_scale if self.enabled_and_selected: self.points = len(w.data.ch2) self.raw = np.frombuffer(w.data.ch2, dtype=np.uint8) self.calc_times_and_volts() def ds1000z(self, w, channel_number): """Interpret waveform for the Rigol DS1000Z series.""" self.time_scale = w.header.time_scale self.time_offset = w.header.time_offset self.points = w.header.points self.stride = w.header.stride self.firmware = w.preheader.firmware_version self.probe = w.header.ch[channel_number-1].probe_value self.coupling = w.header.ch[channel_number-1].coupling.name.upper() self.y_scale = w.header.ch[channel_number-1].y_scale self.y_offset = w.header.ch[channel_number-1].y_offset if self.enabled_and_selected: self.raw = _channel_bytes(channel_number, w) self.points = len(self.raw) self.calc_times_and_volts() def ds2000(self, w, channel_number): """Interpret waveform for the Rigol DS2000 series.""" self.time_offset = w.header.time_offset self.time_scale = w.header.time_scale self.points = w.header.storage_depth self.firmware = w.header.firmware_version self.unit = UnitEnum(w.header.ch[channel_number-1].unit_actual) self.coupling = w.header.ch[channel_number-1].coupling.name.upper() self.y_scale = -self.volt_scale self.y_offset = self.volt_offset if self.enabled_and_selected: if channel_number == 1: self.raw = np.frombuffer(w.header.raw_1, dtype=np.uint8) if channel_number == 2: self.raw = np.frombuffer(w.header.raw_2, dtype=np.uint8) if channel_number == 3: self.raw = np.frombuffer(w.header.raw_3, dtype=np.uint8) if channel_number == 4: self.raw = np.frombuffer(w.header.raw_4, dtype=np.uint8) self.calc_times_and_volts() def ds4000(self, w, channel_number): """Interpret waveform for the Rigol DS4000 series.""" self.time_offset = w.header.time_offset self.time_scale = w.header.time_scale self.points = w.header.points self.firmware = w.header.firmware_version self.coupling = w.header.ch[channel_number-1].coupling.name.upper() self.y_scale = -self.volt_scale self.y_offset = self.volt_offset if self.enabled_and_selected: if channel_number == 1: self.raw = np.frombuffer(w.header.raw_1, dtype=np.uint8) if channel_number == 2: self.raw = np.frombuffer(w.header.raw_2, dtype=np.uint8) if channel_number == 3: self.raw = np.frombuffer(w.header.raw_3, dtype=np.uint8) if channel_number == 4: self.raw = np.frombuffer(w.header.raw_4, dtype=np.uint8) self.calc_times_and_volts() def ds6000(self, w, channel_number): """Interpret waveform for the Rigol DS6000 series.""" self.time_offset = w.header.time_offset self.time_scale = w.header.time_scale self.points = w.header.points self.firmware = w.header.firmware_version self.coupling = w.header.ch[channel_number-1].coupling.name.upper() self.unit = w.header.ch[channel_number-1].unit if self.enabled_and_selected: if channel_number == 1: self.raw = np.array(w.header.raw_1, dtype=np.uint8) if channel_number == 2: self.raw = np.array(w.header.raw_2, dtype=np.uint8) if channel_number == 3: self.raw = np.array(w.header.raw_3, dtype=np.uint8) if channel_number == 4: self.raw = np.array(w.header.raw_4, dtype=np.uint8) self.calc_times_and_volts()
36.176301
96
0.589199
10,225
0.816824
0
0
0
0
0
0
2,709
0.216408
b9e6a9be08cb7ae14c68608c944b95cbe6233b10
1,477
py
Python
configs/raubtierv2a/faster_rcnn_x101_64x4d_fpn_1x_raubtierv2a_nofreeze_4gpu.py
esf-bt2020/mmdetection
abc5fe060e0fcb716f845c85441be3741b22d3cf
[ "Apache-2.0" ]
null
null
null
configs/raubtierv2a/faster_rcnn_x101_64x4d_fpn_1x_raubtierv2a_nofreeze_4gpu.py
esf-bt2020/mmdetection
abc5fe060e0fcb716f845c85441be3741b22d3cf
[ "Apache-2.0" ]
null
null
null
configs/raubtierv2a/faster_rcnn_x101_64x4d_fpn_1x_raubtierv2a_nofreeze_4gpu.py
esf-bt2020/mmdetection
abc5fe060e0fcb716f845c85441be3741b22d3cf
[ "Apache-2.0" ]
null
null
null
_base_ = '../faster_rcnn/faster_rcnn_x101_64x4d_fpn_1x_coco.py' model = dict( backbone=dict( num_stages=4, #frozen_stages=4 ), roi_head=dict( bbox_head=dict( num_classes=3 ) ) ) dataset_type = 'COCODataset' classes = ('luchs', 'rotfuchs', 'wolf') data = dict( train=dict( img_prefix='raubtierv2a/train/', classes=classes, ann_file='raubtierv2a/train/_annotations.coco.json'), val=dict( img_prefix='raubtierv2a/valid/', classes=classes, ann_file='raubtierv2a/valid/_annotations.coco.json'), test=dict( img_prefix='raubtierv2a/test/', classes=classes, ann_file='raubtierv2a/test/_annotations.coco.json')) #optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) #original (8x2=16) optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) #(4x2=8) 4 GPUs #optimizer = dict(type='SGD', lr=0.0025, momentum=0.9, weight_decay=0.0001) #(1x2=2) total_epochs=24 evaluation = dict(classwise=True, interval=1, metric='bbox') work_dir = '/media/storage1/projects/WilLiCam/checkpoint_workdir/raubtierv2a/faster_rcnn_x101_64x4d_fpn_1x_raubtierv2a_nofreeze_4gpu' #http://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_64x4d_fpn_1x_coco/faster_rcnn_x101_64x4d_fpn_1x_coco_20200204-833ee192.pth load_from = 'checkpoints/faster_rcnn_x101_64x4d_fpn_1x_coco_20200204-833ee192.pth'
26.375
151
0.704807
0
0
0
0
0
0
0
0
835
0.565335
b9e6fcabd0b33c8ba893844382f8413f57f64840
262
py
Python
driver/python/setup.py
wbaweto/QConf
977a53d601eab2055fd8fb344b92f4026d178ad5
[ "BSD-2-Clause" ]
2,056
2015-03-23T04:51:13.000Z
2022-03-20T11:57:36.000Z
driver/python/setup.py
xzz0329/QConf
f852f984de0b55bbca5bcb433a7be5af6383c449
[ "BSD-2-Clause" ]
116
2015-03-25T01:32:39.000Z
2022-02-12T03:21:08.000Z
driver/python/setup.py
xzz0329/QConf
f852f984de0b55bbca5bcb433a7be5af6383c449
[ "BSD-2-Clause" ]
634
2015-03-24T11:51:22.000Z
2022-01-28T04:22:19.000Z
from distutils.core import setup, Extension setup(name = 'qconf_py', version = '1.2.2', ext_modules = [Extension('qconf_py', ['lib/python_qconf.cc'], include_dirs=['/usr/local/include/qconf'], extra_objects=['/usr/local/qconf/lib/libqconf.a'] )])
43.666667
105
0.683206
0
0
0
0
0
0
0
0
107
0.408397
b9e707edd4da101ada4ff00b233330f2c2f9843e
148
py
Python
abc153/d.py
Lockdef/kyopro-code
2d943a87987af05122c556e173e5108a0c1c77c8
[ "MIT" ]
null
null
null
abc153/d.py
Lockdef/kyopro-code
2d943a87987af05122c556e173e5108a0c1c77c8
[ "MIT" ]
null
null
null
abc153/d.py
Lockdef/kyopro-code
2d943a87987af05122c556e173e5108a0c1c77c8
[ "MIT" ]
null
null
null
h = int(input()) i = 1 a = 1 b = 1 c = 1 while h >= a: a = 2 ** i i += 1 s = 0 t = True for j in range(1, i-1): c += 2 ** j print(c)
8.705882
23
0.398649
0
0
0
0
0
0
0
0
0
0
b9e96b262a690da4aaab0bf9584b51a15851826f
6,784
py
Python
demos/python/sdk_wireless_camera_control/open_gopro/demos/log_battery.py
Natureshadow/OpenGoPro
05110123cfbf6584288b813f2d4896d3a091480e
[ "MIT" ]
210
2021-06-05T20:06:17.000Z
2022-03-31T18:13:17.000Z
demos/python/sdk_wireless_camera_control/open_gopro/demos/log_battery.py
Natureshadow/OpenGoPro
05110123cfbf6584288b813f2d4896d3a091480e
[ "MIT" ]
73
2021-06-01T21:22:44.000Z
2022-03-31T18:33:24.000Z
demos/python/sdk_wireless_camera_control/open_gopro/demos/log_battery.py
Natureshadow/OpenGoPro
05110123cfbf6584288b813f2d4896d3a091480e
[ "MIT" ]
70
2021-06-07T03:59:04.000Z
2022-03-26T10:51:15.000Z
# log_battery.py/Open GoPro, Version 2.0 (C) Copyright 2021 GoPro, Inc. (http://gopro.com/OpenGoPro). # This copyright was auto-generated on Wed, Sep 1, 2021 5:05:45 PM """Example to continuously read the battery (with no Wifi connection)""" import csv import time import logging import argparse import threading from pathlib import Path from datetime import datetime from dataclasses import dataclass from typing import Optional, Tuple, Literal, List from rich.console import Console from open_gopro import GoPro from open_gopro.constants import StatusId from open_gopro.util import setup_logging, set_logging_level logger = logging.getLogger(__name__) console = Console() # rich consoler printer BarsType = Literal[0, 1, 2, 3] @dataclass class Sample: """Simple class to store battery samples""" index: int percentage: int bars: BarsType def __post_init__(self) -> None: self.time = datetime.now() def __str__(self) -> str: # pylint: disable=missing-return-doc return f"Index {self.index} @ time {self.time.strftime('%H:%M:%S')} --> bars: {self.bars}, percentage: {self.percentage}" SAMPLE_INDEX = 0 SAMPLES: List[Sample] = [] def dump_results_as_csv(location: Path) -> None: """Write all of the samples to a csv file Args: location (Path): File to write to """ console.print(f"Dumping results as CSV to {location}") with open(location, mode="w") as f: w = csv.writer(f, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL) w.writerow(["index", "time", "percentage", "bars"]) initial_time = SAMPLES[0].time for s in SAMPLES: w.writerow([s.index, (s.time - initial_time).seconds, s.percentage, s.bars]) def process_battery_notifications(gopro: GoPro, initial_bars: BarsType, initial_percentage: int) -> None: """Separate thread to continuously check for and store battery notifications. If the CLI parameter was set to poll, this isn't used. Args: gopro (GoPro): instance to get updates from initial_bars (BarsType): Initial bars level when notifications were enabled initial_percentage (int): Initial percentage when notifications were enabled """ last_percentage = initial_percentage last_bars = initial_bars while True: # Block until we receive an update notification = gopro.get_update() # Update data points if they have changed last_percentage = ( notification.data[StatusId.INT_BATT_PER] if StatusId.INT_BATT_PER in notification.data else last_percentage ) last_bars = ( notification.data[StatusId.BATT_LEVEL] if StatusId.BATT_LEVEL in notification.data else last_bars ) # Append and print sample global SAMPLE_INDEX SAMPLES.append(Sample(index=SAMPLE_INDEX, percentage=last_percentage, bars=last_bars)) console.print(str(SAMPLES[-1])) SAMPLE_INDEX += 1 def main() -> int: """Main program functionality Returns: int: program return code """ identifier, log_location, poll = parse_arguments() global logger logger = setup_logging(logger, log_location) global SAMPLE_INDEX gopro: Optional[GoPro] = None return_code = 0 try: with GoPro(identifier, enable_wifi=False) as gopro: set_logging_level(logger, logging.ERROR) # # Setup notifications if we are not polling if poll is None: console.print("Configuring battery notifications...") # Enable notifications of the relevant battery statuses. Also store initial values. bars = gopro.ble_status.batt_level.register_value_update().flatten percentage = gopro.ble_status.int_batt_per.register_value_update().flatten # Start a thread to handle asynchronous battery level notifications threading.Thread( target=process_battery_notifications, args=(gopro, bars, percentage), daemon=True ).start() with console.status("[bold green]Receiving battery notifications until it dies..."): # Sleep forever, allowing notification handler thread to deal with battery level notifications while True: time.sleep(1) # Otherwise, poll else: with console.status("[bold green]Polling the battery until it dies..."): while True: SAMPLES.append( Sample( index=SAMPLE_INDEX, percentage=gopro.ble_status.int_batt_per.get_value().flatten, bars=gopro.ble_status.batt_level.get_value().flatten, ) ) console.print(str(SAMPLES[-1])) SAMPLE_INDEX += 1 time.sleep(poll) except Exception as e: # pylint: disable=broad-except logger.error(repr(e)) return_code = 1 except KeyboardInterrupt: logger.warning("Received keyboard interrupt. Shutting down...") finally: if len(SAMPLES) > 0: csv_location = Path(log_location.parent) / "battery_results.csv" dump_results_as_csv(csv_location) if gopro is not None: gopro.close() console.print("Exiting...") return return_code # pylint: disable=lost-exception def parse_arguments() -> Tuple[str, Path, Optional[int]]: """Parse command line arguments Returns: Tuple[str, Path, Path]: (identifier, path to save log, path to VLC) """ parser = argparse.ArgumentParser( description="Connect to the GoPro via BLE only and continuously read the battery (either by polling or notifications)." ) parser.add_argument( "-i", "--identifier", type=str, help="Last 4 digits of GoPro serial number, which is the last 4 digits of the default camera SSID. \ If not used, first discovered GoPro will be connected to", default=None, ) parser.add_argument( "-l", "--log", type=Path, help="Location to store detailed log", default="log_battery.log", ) parser.add_argument( "-p", "--poll", type=int, help="Set to poll the battery at a given interval. If not set, battery level will be notified instead. Defaults to notifications.", default=None, ) args = parser.parse_args() return args.identifier, args.log, args.poll if __name__ == "__main__": main()
34.969072
139
0.627358
388
0.057193
0
0
399
0.058815
0
0
2,423
0.357164
b9ea2a649f07b6a108f30b09b86010ae0b3acd70
47
py
Python
tumbleweed/models.py
mcroydon/django-tumbleweed
3f1eab2bf12350a91ca38165efec0c221a1fe69a
[ "BSD-3-Clause" ]
1
2015-11-08T11:33:15.000Z
2015-11-08T11:33:15.000Z
tumbleweed/models.py
mcroydon/django-tumbleweed
3f1eab2bf12350a91ca38165efec0c221a1fe69a
[ "BSD-3-Clause" ]
null
null
null
tumbleweed/models.py
mcroydon/django-tumbleweed
3f1eab2bf12350a91ca38165efec0c221a1fe69a
[ "BSD-3-Clause" ]
null
null
null
# These are not the droids you are looking for.
47
47
0.765957
0
0
0
0
0
0
0
0
47
1
b9ea32c16e86b4071267eb26a711d79f81eaea56
2,925
py
Python
xos/hpc_observer/steps/sync_originserver.py
wathsalav/xos
f6bcaa37a948ee41729236afe7fce0802e002404
[ "Apache-2.0" ]
null
null
null
xos/hpc_observer/steps/sync_originserver.py
wathsalav/xos
f6bcaa37a948ee41729236afe7fce0802e002404
[ "Apache-2.0" ]
null
null
null
xos/hpc_observer/steps/sync_originserver.py
wathsalav/xos
f6bcaa37a948ee41729236afe7fce0802e002404
[ "Apache-2.0" ]
null
null
null
import os import sys import base64 from django.db.models import F, Q from xos.config import Config from observer.syncstep import SyncStep from core.models import Service from hpc.models import ServiceProvider, ContentProvider, CDNPrefix, OriginServer from util.logger import Logger, logging # hpclibrary will be in steps/.. parentdir = os.path.join(os.path.dirname(__file__),"..") sys.path.insert(0,parentdir) from hpclib import HpcLibrary logger = Logger(level=logging.INFO) class SyncOriginServer(SyncStep, HpcLibrary): provides=[OriginServer] requested_interval=0 def __init__(self, **args): SyncStep.__init__(self, **args) HpcLibrary.__init__(self) def fetch_pending(self, deleted): #self.consistency_check() return SyncStep.fetch_pending(self, deleted) def consistency_check(self): # set to true if something changed result=False # sanity check to make sure our PS objects have CMI objects behind them all_ors_ids = [x["origin_server_id"] for x in self.client.onev.ListAll("OriginServer")] for ors in OriginServer.objects.all(): if (ors.origin_server_id is not None) and (ors.origin_server_id not in all_ors_ids): # we have an origin server ID, but it doesn't exist in the CMI # something went wrong # start over logger.info("origin server %s was not found on CMI" % ors.origin_server_id) ors.origin_server_id=None ors.save() result = True return result def sync_record(self, ors): logger.info("sync'ing origin server %s" % str(ors)) if (not ors.contentProvider) or (not ors.contentProvider.content_provider_id): return cpid = ors.contentProvider.content_provider_id # validation requires URL start with http:// url = ors.url if not url.startswith("http://"): url = "http://" + url ors_dict = {"authenticated_content": ors.authenticated, "zone_redirects": ors.redirects, "content_provider_id": cpid, "url": url, "service_type": "HyperCache", "caching_type": "Optimistic", "description": ors.description} #print os_dict if not ors.origin_server_id: id = self.client.onev.Create("OriginServer", ors_dict) ors.origin_server_id = id else: self.client.onev.Update("OriginServer", ors.origin_server_id, ors_dict) # ... something breaks (analytics) if the URL starts with http://, so we # change it in cob after we added it via onev. url = url[7:] self.client.cob.UpdateContent(ors.origin_server_id, {"url": url}) ors.silent = True ors.save() def delete_record(self, m): if m.origin_server_id is not None: self.client.onev.Delete("OriginServer", m.origin_server_id)
34.411765
229
0.654701
2,443
0.835214
0
0
0
0
0
0
731
0.249915
b9ea437d66df34d28efcf808ad16c896dadcac76
400
py
Python
main.py
aroxby/pixel-processor
9cfe260a085ced0883ce8b0a35c28020f4aa8737
[ "MIT" ]
null
null
null
main.py
aroxby/pixel-processor
9cfe260a085ced0883ce8b0a35c28020f4aa8737
[ "MIT" ]
null
null
null
main.py
aroxby/pixel-processor
9cfe260a085ced0883ce8b0a35c28020f4aa8737
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 from PIL import Image def tranform(r, g, b): tmp = b b = g // 2 g = tmp r = r // 2 return r, g, b def main(): im = Image.open('blue-flames.jpg') input_pixels = im.getdata() output_pixels = tuple(tranform(*pixel) for pixel in input_pixels) im.putdata(output_pixels) im.save('green-flames.png') if __name__ == '__main__': main()
17.391304
69
0.6
0
0
0
0
0
0
0
0
67
0.1675
b9eab80495274dd2446a7b029f17be91df29a452
1,539
py
Python
scipy/weave/examples/swig2_example.py
lesserwhirls/scipy-cwt
ee673656d879d9356892621e23ed0ced3d358621
[ "BSD-3-Clause" ]
8
2015-10-07T00:37:32.000Z
2022-01-21T17:02:33.000Z
scipy/weave/examples/swig2_example.py
lesserwhirls/scipy-cwt
ee673656d879d9356892621e23ed0ced3d358621
[ "BSD-3-Clause" ]
null
null
null
scipy/weave/examples/swig2_example.py
lesserwhirls/scipy-cwt
ee673656d879d9356892621e23ed0ced3d358621
[ "BSD-3-Clause" ]
8
2015-05-09T14:23:57.000Z
2018-11-15T05:56:00.000Z
"""Simple example to show how to use weave.inline on SWIG2 wrapped objects. SWIG2 refers to SWIG versions >= 1.3. To run this example you must build the trivial SWIG2 extension called swig2_ext. To do this you need to do something like this:: $ swig -c++ -python -I. -o swig2_ext_wrap.cxx swig2_ext.i $ g++ -Wall -O2 -I/usr/include/python2.3 -fPIC -I. -c \ -o swig2_ext_wrap.os swig2_ext_wrap.cxx $ g++ -shared -o _swig2_ext.so swig2_ext_wrap.os \ -L/usr/lib/python2.3/config The files swig2_ext.i and swig2_ext.h are included in the same directory that contains this file. Note that weave's SWIG2 support works fine whether SWIG_COBJECT_TYPES are used or not. Author: Prabhu Ramachandran Copyright (c) 2004, Prabhu Ramachandran License: BSD Style. """ # Import our SWIG2 wrapped library import swig2_ext import scipy.weave as weave from scipy.weave import swig2_spec, converters # SWIG2 support is not enabled by default. We do this by adding the # swig2 converter to the default list of converters. converters.default.insert(0, swig2_spec.swig2_converter()) def test(): """Instantiate the SWIG wrapped object and then call its method from C++ using weave.inline """ a = swig2_ext.A() b = swig2_ext.foo() # This will be an APtr instance. b.thisown = 1 # Prevent memory leaks. code = """a->f(); b->f(); """ weave.inline(code, ['a', 'b'], include_dirs=['.'], headers=['"swig2_ext.h"'], verbose=1) if __name__ == "__main__": test()
28.5
69
0.690058
0
0
0
0
0
0
0
0
1,168
0.758934
b9eba9b75a6e45fee4cdfe3d81874f5e8476b939
1,951
py
Python
src/simplify.py
denghz/Probabilistic-Programming
fa505a75c4558e507fd3effd2737c63537bfe50d
[ "BSD-3-Clause" ]
null
null
null
src/simplify.py
denghz/Probabilistic-Programming
fa505a75c4558e507fd3effd2737c63537bfe50d
[ "BSD-3-Clause" ]
null
null
null
src/simplify.py
denghz/Probabilistic-Programming
fa505a75c4558e507fd3effd2737c63537bfe50d
[ "BSD-3-Clause" ]
null
null
null
from wolframclient.language.expression import WLSymbol from nnDiff import * def parseGlobalSymbol(s): if isinstance(s, numbers.Number): return s if isinstance(s, WLSymbol): if s.name == 'E': return 'E' else: return s.name[7:] def parse(exp): symbol = parseGlobalSymbol(exp) if symbol: return [symbol] else: f = str(exp.head) args = list(map(parse, exp.args)) res = [] if (f == "Power"): res1 = [] p = args[1][0] e = args[0] if e == ['E']: return ['Exp'] + args[1] if p < 0: res = ["Inv"] p = -p if p >= 2: p = p - 2 res1 = ["Times"] + e + e while p > 0: p = p - 1 res1 = ["Times"] + res1 + e return res + res1 else: return res + e else: if len(args) == 1: return [f] + args[0] elif len(args) >= 2: res = [f] + args[0] + args[1] args = args[2:] for arg in args: res = [f] + res + arg return res def simplify(exp): with WolframLanguageSession() as session: session.evaluate("Inv[zzz_] := 1/zzz") f = wlexpr(str(Func(exp))) getfreeVars = wlexpr("Reduce`FreeVariables") freeVariables = session.evaluate(getfreeVars(f)) ass = wl.Element(wl.Alternatives(freeVariables), wl.Reals) wmres = session.evaluate(wl.FullSimplify(f,ass)) print(wmres) res = parse(wmres) return res if __name__ == "__main__": exp = sys.argv[1:] if exp == []: exp = ["Sin", "x"] res = map(str,simplify(exp)) print(' '.join(res), file=sys.stderr)
27.097222
67
0.438237
0
0
0
0
0
0
0
0
103
0.052793
b9ebcddd99e456fbeb39a0191aad31656c7f4943
856
py
Python
setup.py
EdWard680/python-firetv
4c02f79a1c8ae60a489297178d010a31545a3b5d
[ "MIT" ]
null
null
null
setup.py
EdWard680/python-firetv
4c02f79a1c8ae60a489297178d010a31545a3b5d
[ "MIT" ]
null
null
null
setup.py
EdWard680/python-firetv
4c02f79a1c8ae60a489297178d010a31545a3b5d
[ "MIT" ]
null
null
null
from setuptools import setup setup( name='firetv', version='1.0.7', description='Communicate with an Amazon Fire TV device via ADB over a network.', url='https://github.com/happyleavesaoc/python-firetv/', license='MIT', author='happyleaves', author_email='[email protected]', packages=['firetv'], install_requires=['pycryptodome', 'rsa', 'adb-homeassistant', 'pure-python-adb-homeassistant'], extras_require={ 'firetv-server': ['Flask>=0.10.1', 'PyYAML>=3.12'] }, entry_points={ 'console_scripts': [ 'firetv-server = firetv.__main__:main' ] }, classifiers=[ 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 3' ] )
30.571429
99
0.613318
0
0
0
0
0
0
0
0
503
0.587617
b9ec25017a264a5c2dd928342198ca509ad93675
893
py
Python
neo/io/exampleio.py
Mario-Kart-Felix/python-neo
951c97cf9eb56f5489da88940de920329e0f4c1b
[ "BSD-3-Clause" ]
199
2015-01-20T13:49:13.000Z
2022-03-21T18:35:29.000Z
neo/io/exampleio.py
Mario-Kart-Felix/python-neo
951c97cf9eb56f5489da88940de920329e0f4c1b
[ "BSD-3-Clause" ]
905
2015-01-07T09:21:15.000Z
2022-03-31T16:29:44.000Z
neo/io/exampleio.py
Mario-Kart-Felix/python-neo
951c97cf9eb56f5489da88940de920329e0f4c1b
[ "BSD-3-Clause" ]
178
2015-01-05T12:34:39.000Z
2022-02-20T23:06:52.000Z
""" neo.io have been split in 2 level API: * neo.io: this API give neo object * neo.rawio: this API give raw data as they are in files. Developper are encourage to use neo.rawio. When this is done the neo.io is done automagically with this king of following code. Author: sgarcia """ from neo.io.basefromrawio import BaseFromRaw from neo.rawio.examplerawio import ExampleRawIO class ExampleIO(ExampleRawIO, BaseFromRaw): name = 'example IO' description = "Fake IO" # This is an inportant choice when there are several channels. # 'split-all' : 1 AnalogSignal each 1 channel # 'group-by-same-units' : one 2D AnalogSignal for each group of channel with same units _prefered_signal_group_mode = 'group-by-same-units' def __init__(self, filename=''): ExampleRawIO.__init__(self, filename=filename) BaseFromRaw.__init__(self, filename)
28.806452
93
0.724524
504
0.56439
0
0
0
0
0
0
534
0.597984
b9ecb48aece2a2ca161d7bba9b3c95a928b2be7f
728
py
Python
scrapyproject/migrations/0003_auto_20170209_1025.py
sap9433/Distributed-Multi-User-Scrapy-System-with-a-Web-UI
0676f7599f288409d0faf7b6211c171ce8c46a7a
[ "MIT" ]
108
2017-03-14T05:40:13.000Z
2022-03-03T12:35:49.000Z
scrapyproject/migrations/0003_auto_20170209_1025.py
sap9433/Distributed-Multi-User-Scrapy-System-with-a-Web-UI
0676f7599f288409d0faf7b6211c171ce8c46a7a
[ "MIT" ]
8
2017-03-14T05:40:13.000Z
2018-10-13T07:07:29.000Z
scrapyproject/migrations/0003_auto_20170209_1025.py
sap9433/Distributed-Multi-User-Scrapy-System-with-a-Web-UI
0676f7599f288409d0faf7b6211c171ce8c46a7a
[ "MIT" ]
43
2017-04-19T12:18:07.000Z
2021-11-25T09:37:17.000Z
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('scrapyproject', '0002_auto_20170208_1738'), ] operations = [ migrations.AlterField( model_name='project', name='link_generator', field=models.TextField(blank=True), ), migrations.AlterField( model_name='project', name='scraper_function', field=models.TextField(blank=True), ), migrations.AlterField( model_name='project', name='settings', field=models.TextField(blank=True), ), ]
24.266667
53
0.575549
619
0.850275
0
0
0
0
0
0
134
0.184066
b9eda494aa9f90de7b3474adbd78e46927f9990c
406
py
Python
src/cart/forms.py
cbsBiram/xarala__ssr
863e1362c786daa752b942b796f7a015211d2f1b
[ "FSFAP" ]
null
null
null
src/cart/forms.py
cbsBiram/xarala__ssr
863e1362c786daa752b942b796f7a015211d2f1b
[ "FSFAP" ]
null
null
null
src/cart/forms.py
cbsBiram/xarala__ssr
863e1362c786daa752b942b796f7a015211d2f1b
[ "FSFAP" ]
null
null
null
from django import forms from django.utils.translation import gettext_lazy as _ COURSE_QUANTITY_CHOICES = [(i, str(i)) for i in range(1, 21)] class CartAddCourseForm(forms.Form): quantity = forms.TypedChoiceField( choices=COURSE_QUANTITY_CHOICES, coerce=int, label=_("Quantité") ) override = forms.BooleanField( required=False, initial=False, widget=forms.HiddenInput )
27.066667
72
0.726601
260
0.638821
0
0
0
0
0
0
11
0.027027
b9eda5f604ec6cf197f2876a0f748c37ee805587
73
py
Python
patches/datasets/__init__.py
sflippl/patches
c19889e676e231af44669a01c61854e9e5791227
[ "MIT" ]
null
null
null
patches/datasets/__init__.py
sflippl/patches
c19889e676e231af44669a01c61854e9e5791227
[ "MIT" ]
null
null
null
patches/datasets/__init__.py
sflippl/patches
c19889e676e231af44669a01c61854e9e5791227
[ "MIT" ]
null
null
null
"""Datasets of latent predictability tasks. """ from .pilgrimm import *
14.6
43
0.726027
0
0
0
0
0
0
0
0
47
0.643836